From 83a51935460002cb8dd7a8e5a707c78290c5dc5e Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Sun, 29 Oct 2017 18:15:55 +0100 Subject: [PATCH 1/5] Add CSIConnection CSIConnection connects to CSI driver and handles all CSI calls. --- pkg/connection/connection.go | 150 +++++++++++++++ pkg/connection/connection_test.go | 300 ++++++++++++++++++++++++++++++ 2 files changed, 450 insertions(+) create mode 100644 pkg/connection/connection.go create mode 100644 pkg/connection/connection_test.go diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go new file mode 100644 index 000000000..f30bb2fe9 --- /dev/null +++ b/pkg/connection/connection.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connection + +import ( + "context" + "fmt" + "time" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/glog" + "google.golang.org/grpc" +) + +// CSIConnection is gRPC connection to a remote CSI driver and abstracts all +// CSI calls. +type CSIConnection interface { + // GetDriverName returns driver name as discovered by GetPluginInfo() + // gRPC call. + GetDriverName(ctx context.Context) (string, error) + + // SupportsControllerPublish returns true if the CSI driver reports + // PUBLISH_UNPUBLISH_VOLUME in ControllerGetCapabilities() gRPC call. + SupportsControllerPublish(ctx context.Context) (bool, error) + + // Close the connection + Close() error +} + +type csiConnection struct { + conn *grpc.ClientConn +} + +var ( + _ CSIConnection = &csiConnection{} + + // Version of CSI this client implements + csiVersion = csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + } +) + +func New(address string, timeoutSeconds int) (CSIConnection, error) { + conn, err := connect(address, timeoutSeconds) + if err != nil { + return nil, err + } + return &csiConnection{ + conn: conn, + }, nil +} + +func connect(address string, timeoutSeconds int) (*grpc.ClientConn, error) { + var err error + for i := 0; i < timeoutSeconds; i++ { + var conn *grpc.ClientConn + conn, err = grpc.Dial(address, grpc.WithInsecure()) + if err == nil { + return conn, nil + } + glog.Warningf("Error connecting to %s: %s", address, err) + time.Sleep(time.Second) + } + return nil, err +} + +func (c *csiConnection) GetDriverName(ctx context.Context) (string, error) { + client := csi.NewIdentityClient(c.conn) + + req := csi.GetPluginInfoRequest{ + Version: &csiVersion, + } + + rsp, err := client.GetPluginInfo(ctx, &req) + if err != nil { + return "", err + } + e := rsp.GetError() + if e != nil { + // TODO: report the right error + return "", fmt.Errorf("Error calling GetPluginInfo: %+v", e) + } + + result := rsp.GetResult() + if result == nil { + return "", fmt.Errorf("result is empty") + } + name := result.GetName() + if name == "" { + return "", fmt.Errorf("name is empty") + } + return result.GetName(), nil +} + +func (c *csiConnection) SupportsControllerPublish(ctx context.Context) (bool, error) { + client := csi.NewControllerClient(c.conn) + req := csi.ControllerGetCapabilitiesRequest{ + Version: &csiVersion, + } + + rsp, err := client.ControllerGetCapabilities(ctx, &req) + if err != nil { + return false, err + } + e := rsp.GetError() + if e != nil { + // TODO: report the right error + return false, fmt.Errorf("error calling ControllerGetCapabilities: %+v", e) + } + + result := rsp.GetResult() + if result == nil { + return false, fmt.Errorf("result is empty") + } + + caps := result.GetCapabilities() + for _, cap := range caps { + if cap == nil { + continue + } + rpc := cap.GetRpc() + if rpc == nil { + continue + } + if rpc.GetType() == csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME { + return true, nil + } + } + return false, nil +} + +func (c *csiConnection) Close() error { + return c.conn.Close() +} diff --git a/pkg/connection/connection_test.go b/pkg/connection/connection_test.go new file mode 100644 index 000000000..95788f3e6 --- /dev/null +++ b/pkg/connection/connection_test.go @@ -0,0 +1,300 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connection + +import ( + "context" + "fmt" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/kubernetes-csi/csi-test/driver" +) + +func createMockServer(t *testing.T) (*gomock.Controller, *driver.MockCSIDriver, *driver.MockIdentityServer, *driver.MockControllerServer, CSIConnection, error) { + // Start the mock server + mockController := gomock.NewController(t) + identityServer := driver.NewMockIdentityServer(mockController) + controllerServer := driver.NewMockControllerServer(mockController) + drv := driver.NewMockCSIDriver(&driver.MockCSIDriverServers{ + Identity: identityServer, + Controller: controllerServer, + }) + drv.Start() + + // Create a client connection to it + addr := drv.Address() + csiConn, err := New(addr, 1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + + return mockController, drv, identityServer, controllerServer, csiConn, nil +} + +func TestGetPluginInfo(t *testing.T) { + tests := []struct { + name string + output *csi.GetPluginInfoResponse + injectError bool + expectError bool + }{ + { + name: "success", + output: &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Result_{ + Result: &csi.GetPluginInfoResponse_Result{ + Name: "csi/example", + VendorVersion: "0.1.0", + Manifest: map[string]string{ + "hello": "world", + }, + }, + }, + }, + expectError: false, + }, + { + name: "gRPC error", + output: nil, + injectError: true, + expectError: true, + }, + { + name: "empty reply", + output: &csi.GetPluginInfoResponse{ + Reply: nil, + }, + expectError: true, + }, + { + name: "empty name", + output: &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Result_{ + Result: &csi.GetPluginInfoResponse_Result{ + Name: "", + }, + }, + }, + expectError: true, + }, + { + name: "general error", + output: &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Error{ + Error: &csi.Error{ + Value: &csi.Error_GeneralError_{ + GeneralError: &csi.Error_GeneralError{ + ErrorCode: csi.Error_GeneralError_UNSUPPORTED_REQUEST_VERSION, + CallerMustNotRetry: true, + ErrorDescription: "mock error 1", + }, + }, + }, + }, + }, + expectError: true, + }, + } + + mockController, driver, identityServer, _, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + + in := &csi.GetPluginInfoRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } + + out := test.output + var injectedErr error = nil + if test.injectError { + injectedErr = fmt.Errorf("mock error") + } + + // Setup expectation + identityServer.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, injectedErr).Times(1) + + name, err := csiConn.GetDriverName(context.Background()) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + if err == nil && name != "csi/example" { + t.Errorf("got unexpected name: %q", name) + } + } +} + +func TestSupportsControllerPublish(t *testing.T) { + tests := []struct { + name string + output *csi.ControllerGetCapabilitiesResponse + injectError bool + expectError bool + }{ + { + name: "success", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: &csi.ControllerGetCapabilitiesResponse_Result_{ + Result: &csi.ControllerGetCapabilitiesResponse_Result{ + Capabilities: []*csi.ControllerServiceCapability{ + &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }, + }, + }, + }, + }, + expectError: false, + }, + { + name: "gRPC error", + output: nil, + injectError: true, + expectError: true, + }, + { + name: "empty reply", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: nil, + }, + expectError: true, + }, + { + name: "general error", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: &csi.ControllerGetCapabilitiesResponse_Error{ + Error: &csi.Error{ + Value: &csi.Error_GeneralError_{ + GeneralError: &csi.Error_GeneralError{ + ErrorCode: csi.Error_GeneralError_UNSUPPORTED_REQUEST_VERSION, + CallerMustNotRetry: true, + ErrorDescription: "mock error 1", + }, + }, + }, + }, + }, + expectError: true, + }, + { + name: "no publish", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: &csi.ControllerGetCapabilitiesResponse_Result_{ + Result: &csi.ControllerGetCapabilitiesResponse_Result{ + Capabilities: []*csi.ControllerServiceCapability{ + &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + }, + }, + }, + }, + expectError: false, + }, + { + name: "empty capability", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: &csi.ControllerGetCapabilitiesResponse_Result_{ + Result: &csi.ControllerGetCapabilitiesResponse_Result{ + Capabilities: []*csi.ControllerServiceCapability{ + &csi.ControllerServiceCapability{ + Type: nil, + }, + }, + }, + }, + }, + expectError: false, + }, + { + name: "no capabilities", + output: &csi.ControllerGetCapabilitiesResponse{ + Reply: &csi.ControllerGetCapabilitiesResponse_Result_{ + Result: &csi.ControllerGetCapabilitiesResponse_Result{ + Capabilities: []*csi.ControllerServiceCapability{}, + }, + }, + }, + expectError: false, + }, + } + + mockController, driver, _, controllerServer, csiConn, err := createMockServer(t) + if err != nil { + t.Fatal(err) + } + defer mockController.Finish() + defer driver.Stop() + defer csiConn.Close() + + for _, test := range tests { + + in := &csi.ControllerGetCapabilitiesRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } + + out := test.output + var injectedErr error = nil + if test.injectError { + injectedErr = fmt.Errorf("mock error") + } + + // Setup expectation + controllerServer.EXPECT().ControllerGetCapabilities(gomock.Any(), in).Return(out, injectedErr).Times(1) + + _, err = csiConn.SupportsControllerPublish(context.Background()) + if test.expectError && err == nil { + t.Errorf("test %q: Expected error, got none", test.name) + } + if !test.expectError && err != nil { + t.Errorf("test %q: got error: %v", test.name, err) + } + } +} From 436a188d44a3a4130e15322e86d2e41ef58b2c63 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Sun, 29 Oct 2017 18:17:56 +0100 Subject: [PATCH 2/5] Connect to real CSI csi-attacher now connects to a real CSI and uses its driver name. For now, it expects that the CSI does not support ControllerPublish calls and marks all VolumeAttachment objects as attached. --- cmd/csi-attacher/main.go | 63 +++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 10 deletions(-) diff --git a/cmd/csi-attacher/main.go b/cmd/csi-attacher/main.go index 0c390a886..dca0dad3a 100644 --- a/cmd/csi-attacher/main.go +++ b/cmd/csi-attacher/main.go @@ -17,28 +17,37 @@ limitations under the License. package main import ( + "context" "flag" "os" "os/signal" "time" - "github.com/kubernetes-csi/external-attacher-csi/pkg/controller" + "github.com/golang/glog" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + + "github.com/kubernetes-csi/external-attacher-csi/pkg/connection" + "github.com/kubernetes-csi/external-attacher-csi/pkg/controller" ) const ( // Number of worker threads threads = 10 + + // Default timeout of short CSI calls like GetPluginInfo + csiTimeout = time.Second ) +// Command line flags var ( - // TODO: add a better description of attacher name - is it CSI plugin name? Kubernetes plugin name? kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") resync = flag.Duration("resync", 10*time.Second, "Resync interval of the controller.") - connectionTimeout = flag.Duration("connection-timeout", 60, "Timeout for waiting for CSI driver socket (in seconds).") + connectionTimeout = flag.Int("connection-timeout", 60, "Timeout for waiting for CSI driver socket (in seconds).") + csiAddress = flag.String("csi-address", "/run/csi/socket", "Address of the CSI driver socket.") + dummy = flag.Bool("dummy", false, "Run in dummy mode, i.e. not connecting to CSI driver and marking everything as attached. Expected CSI driver name is \"csi/dummy\".") ) func main() { @@ -48,21 +57,55 @@ func main() { // Create the client config. Use kubeconfig if given, otherwise assume in-cluster. config, err := buildConfig(*kubeconfig) if err != nil { - panic(err) + glog.Error(err.Error()) + os.Exit(1) } clientset, err := kubernetes.NewForConfig(config) if err != nil { - panic(err) + glog.Error(err.Error()) + os.Exit(1) } factory := informers.NewSharedInformerFactory(clientset, *resync) - // TODO: wait for CSI's socket and discover 'attacher' and whether the - // driver supports ControllerPulishVolume using ControllerGetCapabilities - attacher := "csi/example" - handler := controller.NewTrivialHandler(clientset) + var handler controller.Handler + + var attacher string + if *dummy { + // Do not connect to any CSI, mark everything as attached. + handler = controller.NewTrivialHandler(clientset) + attacher = "csi/dummy" + } else { + // Connect to CSI. + csiConn, err := connection.New(*csiAddress, *connectionTimeout) + if err != nil { + glog.Error(err.Error()) + os.Exit(1) + } + + // Find driver name. + ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) + defer cancel() + attacher, err = csiConn.GetDriverName(ctx) + if err != nil { + glog.Error(err.Error()) + os.Exit(1) + } + + // Find out if the driver supports attach/detach. + supportsAttach, err := csiConn.SupportsControllerPublish(ctx) + if err != nil { + glog.Error(err.Error()) + os.Exit(1) + } + if !supportsAttach { + handler = controller.NewTrivialHandler(clientset) + } else { + glog.Error("Real attach/detach handler is not implemented yet") + os.Exit(1) + } + } - // Start the provision controller which will dynamically provision NFS PVs ctrl := controller.NewCSIAttachController( clientset, attacher, From 6c707c3bea10d504c96003eb8c62de48009fc5bc Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Sun, 29 Oct 2017 18:18:57 +0100 Subject: [PATCH 3/5] Vendor --- Gopkg.lock | 261 + Gopkg.toml | 54 + .../spec/.gitignore | 2 + .../spec/.travis.yml | 38 + .../spec/CONTRIBUTING.md | 48 + .../container-storage-interface/spec/LICENSE | 201 + .../container-storage-interface/spec/Makefile | 50 + .../container-storage-interface/spec/OWNERS | 10 + .../spec/README.md | 5 + .../container-storage-interface/spec/VERSION | 1 + .../spec/csi.proto | 1301 ++++ .../spec/lib/README.md | 2 + .../spec/lib/cxx/Makefile | 9 + .../spec/lib/go/.gitignore | 4 + .../spec/lib/go/Makefile | 119 + .../spec/lib/go/README.md | 21 + .../spec/lib/go/csi.go | 5 + .../spec/lib/go/csi/csi.pb.go | 5819 +++++++++++++++++ .../container-storage-interface/spec/logo.png | Bin 0 -> 21136 bytes .../container-storage-interface/spec/spec.md | 1838 ++++++ vendor/github.com/golang/mock/.gitignore | 17 + vendor/github.com/golang/mock/.travis.yml | 13 + vendor/github.com/golang/mock/AUTHORS | 12 + vendor/github.com/golang/mock/CONTRIBUTORS | 37 + vendor/github.com/golang/mock/LICENSE | 202 + vendor/github.com/golang/mock/README.md | 86 + .../github.com/golang/mock/ci/check_go_fmt.sh | 12 + .../golang/mock/ci/check_go_generate.sh | 25 + vendor/github.com/golang/mock/gomock/call.go | 258 + .../golang/mock/gomock/call_test.go | 47 + .../github.com/golang/mock/gomock/callset.go | 76 + .../golang/mock/gomock/controller.go | 183 + .../golang/mock/gomock/controller_test.go | 475 ++ .../github.com/golang/mock/gomock/matchers.go | 99 + .../golang/mock/gomock/matchers_test.go | 70 + .../mock/gomock/mock_matcher/mock_matcher.go | 56 + .../github.com/golang/mock/mockgen/mockgen.go | 442 ++ .../golang/mock/mockgen/model/model.go | 431 ++ .../github.com/golang/mock/mockgen/parse.go | 447 ++ .../github.com/golang/mock/mockgen/reflect.go | 163 + .../mockgen/tests/unexported_method/README.md | 1 + .../tests/unexported_method/bugreport.go | 15 + .../tests/unexported_method/bugreport_mock.go | 44 + .../tests/unexported_method/bugreport_test.go | 17 + .../github.com/golang/mock/sample/README.md | 16 + .../golang/mock/sample/imp1/imp1.go | 17 + .../golang/mock/sample/imp2/imp2.go | 3 + .../golang/mock/sample/imp3/imp3.go | 3 + .../golang/mock/sample/imp4/imp4.go | 3 + .../golang/mock/sample/mock_user/mock_user.go | 383 ++ vendor/github.com/golang/mock/sample/user.go | 114 + .../golang/mock/sample/user_test.go | 122 + .../kubernetes-csi/csi-test/.gitignore | 14 + .../kubernetes-csi/csi-test/.travis.yml | 12 + .../kubernetes-csi/csi-test/LICENSE | 201 + .../kubernetes-csi/csi-test/README.md | 17 + .../kubernetes-csi/csi-test/driver/driver.go | 113 + .../csi-test/driver/driver.mock.go | 1243 ++++ .../kubernetes-csi/csi-test/glide.lock | 67 + .../kubernetes-csi/csi-test/glide.yaml | 16 + .../kubernetes-csi/csi-test/test/co_test.go | 125 + .../csi-test/test/driver_test.go | 138 + .../csi-test/utils/safegoroutinetester.go | 40 + .../x/net/dns/dnsmessage/message.go | 10 +- .../x/net/dns/dnsmessage/message_test.go | 25 + vendor/golang.org/x/sys/unix/linux/Dockerfile | 8 +- vendor/golang.org/x/sys/unix/linux/mkall.go | 3 +- vendor/golang.org/x/sys/unix/syscall.go | 16 - .../golang.org/x/sys/unix/syscall_openbsd.go | 10 +- .../golang.org/x/sys/unix/syscall_solaris.go | 9 + vendor/golang.org/x/sys/unix/timestruct.go | 22 + vendor/golang.org/x/sys/unix/types_openbsd.go | 18 + vendor/golang.org/x/sys/unix/types_solaris.go | 18 + .../x/sys/unix/zerrors_linux_386.go | 2 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 2 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_mips.go | 2 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 2 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../golang.org/x/sys/unix/zptrace386_linux.go | 2 +- .../golang.org/x/sys/unix/zptracearm_linux.go | 2 +- .../x/sys/unix/zptracemips_linux.go | 2 +- .../x/sys/unix/zptracemipsle_linux.go | 2 +- .../x/sys/unix/zsyscall_openbsd_386.go | 11 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_openbsd_arm.go | 11 + .../x/sys/unix/zsyscall_solaris_amd64.go | 12 + .../x/sys/unix/ztypes_linux_sparc64.go | 2 - .../x/sys/unix/ztypes_openbsd_386.go | 23 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 23 +- .../x/sys/unix/ztypes_openbsd_arm.go | 23 +- .../x/sys/unix/ztypes_solaris_amd64.go | 19 + vendor/google.golang.org/genproto/.travis.yml | 12 + .../genproto/CONTRIBUTING.md | 27 + vendor/google.golang.org/genproto/LICENSE | 202 + vendor/google.golang.org/genproto/README.md | 28 + .../api/annotations/annotations.pb.go | 64 + .../googleapis/api/annotations/http.pb.go | 566 ++ .../googleapis/api/authorization_config.pb.go | 80 + .../api/configchange/config_change.pb.go | 189 + .../api/distribution/distribution.pb.go | 506 ++ .../googleapis/api/experimental.pb.go | 56 + .../googleapis/api/httpbody/httpbody.pb.go | 114 + .../genproto/googleapis/api/label/label.pb.go | 119 + .../googleapis/api/metric/metric.pb.go | 358 + .../api/monitoredres/monitored_resource.pb.go | 180 + .../googleapis/api/serviceconfig/auth.pb.go | 393 ++ .../api/serviceconfig/backend.pb.go | 98 + .../api/serviceconfig/billing.pb.go | 150 + .../api/serviceconfig/consumer.pb.go | 158 + .../api/serviceconfig/context.pb.go | 114 + .../api/serviceconfig/control.pb.go | 55 + .../api/serviceconfig/documentation.pb.go | 267 + .../api/serviceconfig/endpoint.pb.go | 130 + .../googleapis/api/serviceconfig/log.pb.go | 98 + .../api/serviceconfig/logging.pb.go | 135 + .../api/serviceconfig/monitoring.pb.go | 143 + .../googleapis/api/serviceconfig/quota.pb.go | 391 ++ .../api/serviceconfig/service.pb.go | 368 ++ .../api/serviceconfig/source_info.pb.go | 55 + .../api/serviceconfig/system_parameter.pb.go | 169 + .../googleapis/api/serviceconfig/usage.pb.go | 157 + .../api/servicecontrol/v1/check_error.pb.go | 202 + .../api/servicecontrol/v1/distribution.pb.go | 430 ++ .../api/servicecontrol/v1/log_entry.pb.go | 258 + .../api/servicecontrol/v1/metric_value.pb.go | 324 + .../api/servicecontrol/v1/operation.pb.go | 215 + .../v1/service_controller.pb.go | 484 ++ .../api/servicemanagement/v1/resources.pb.go | 802 +++ .../servicemanagement/v1/servicemanager.pb.go | 1514 +++++ .../appengine/legacy/audit_data.pb.go | 83 + .../appengine/logging/v1/request_log.pb.go | 538 ++ .../googleapis/appengine/v1/app_yaml.pb.go | 900 +++ .../googleapis/appengine/v1/appengine.pb.go | 1359 ++++ .../googleapis/appengine/v1/application.pb.go | 222 + .../googleapis/appengine/v1/audit_data.pb.go | 208 + .../googleapis/appengine/v1/deploy.pb.go | 183 + .../googleapis/appengine/v1/instance.pb.go | 266 + .../googleapis/appengine/v1/location.pb.go | 71 + .../googleapis/appengine/v1/operation.pb.go | 108 + .../googleapis/appengine/v1/service.pb.go | 165 + .../googleapis/appengine/v1/version.pb.go | 1072 +++ .../v1alpha1/embedded_assistant.pb.go | 969 +++ .../cluster/v1/bigtable_cluster_data.pb.go | 252 + .../cluster/v1/bigtable_cluster_service.pb.go | 472 ++ .../bigtable_cluster_service_messages.pb.go | 376 ++ .../admin/table/v1/bigtable_table_data.pb.go | 451 ++ .../table/v1/bigtable_table_service.pb.go | 423 ++ .../v1/bigtable_table_service_messages.pb.go | 400 ++ .../admin/v2/bigtable_instance_admin.pb.go | 962 +++ .../admin/v2/bigtable_table_admin.pb.go | 900 +++ .../googleapis/bigtable/admin/v2/common.pb.go | 69 + .../bigtable/admin/v2/instance.pb.go | 289 + .../googleapis/bigtable/admin/v2/table.pb.go | 417 ++ .../bigtable/v1/bigtable_data.pb.go | 1980 ++++++ .../bigtable/v1/bigtable_service.pb.go | 387 ++ .../v1/bigtable_service_messages.pb.go | 759 +++ .../googleapis/bigtable/v2/bigtable.pb.go | 1168 ++++ .../googleapis/bigtable/v2/data.pb.go | 2118 ++++++ .../googleapis/bytestream/bytestream.pb.go | 555 ++ .../googleapis/cloud/audit/audit_log.pb.go | 310 + .../bigquery/logging/v1/audit_data.pb.go | 2225 +++++++ .../cloud/billing/v1/cloud_billing.pb.go | 688 ++ .../cloud/dataproc/v1/clusters.pb.go | 1332 ++++ .../googleapis/cloud/dataproc/v1/jobs.pb.go | 2196 +++++++ .../cloud/dataproc/v1/operations.pb.go | 201 + .../cloud/functions/v1beta2/functions.pb.go | 1203 ++++ .../cloud/functions/v1beta2/operations.pb.go | 116 + .../cloud/language/v1/language_service.pb.go | 2460 +++++++ .../language/v1beta1/language_service.pb.go | 2311 +++++++ .../language/v1beta2/language_service.pb.go | 2596 ++++++++ .../googleapis/cloud/ml/v1/job_service.pb.go | 1822 ++++++ .../cloud/ml/v1/model_service.pb.go | 1049 +++ .../cloud/ml/v1/operation_metadata.pb.go | 161 + .../cloud/ml/v1/prediction_service.pb.go | 343 + .../cloud/ml/v1/project_service.pb.go | 177 + .../cloud/ml/v1beta1/job_service.pb.go | 1823 ++++++ .../cloud/ml/v1beta1/model_service.pb.go | 1050 +++ .../cloud/ml/v1beta1/operation_metadata.pb.go | 161 + .../cloud/ml/v1beta1/prediction_service.pb.go | 343 + .../cloud/ml/v1beta1/project_service.pb.go | 178 + .../runtimeconfig/v1beta1/resources.pb.go | 590 ++ .../runtimeconfig/v1beta1/runtimeconfig.pb.go | 1354 ++++ .../cloud/speech/v1/cloud_speech.pb.go | 1254 ++++ .../cloud/speech/v1beta1/cloud_speech.pb.go | 1195 ++++ .../cloud/speech/v1p1beta1/cloud_speech.pb.go | 1646 +++++ .../cloud/support/common/common.pb.go | 864 +++ .../support/v1alpha1/cloud_support.pb.go | 806 +++ .../v1beta1/video_intelligence.pb.go | 989 +++ .../v1beta2/video_intelligence.pb.go | 1157 ++++ .../googleapis/cloud/vision/v1/geometry.pb.go | 171 + .../cloud/vision/v1/image_annotator.pb.go | 1438 ++++ .../cloud/vision/v1/text_annotation.pb.go | 537 ++ .../cloud/vision/v1/web_detection.pb.go | 193 + .../container/v1/cluster_service.pb.go | 4704 +++++++++++++ .../admin/v1beta1/datastore_admin.pb.go | 732 +++ .../googleapis/datastore/v1/datastore.pb.go | 1596 +++++ .../googleapis/datastore/v1/entity.pb.go | 762 +++ .../googleapis/datastore/v1/query.pb.go | 969 +++ .../datastore/v1beta3/datastore.pb.go | 1597 +++++ .../googleapis/datastore/v1beta3/entity.pb.go | 763 +++ .../googleapis/datastore/v1beta3/query.pb.go | 969 +++ .../devtools/build/v1/build_events.pb.go | 834 +++ .../devtools/build/v1/build_status.pb.go | 130 + .../build/v1/publish_build_event.pb.go | 465 ++ .../devtools/cloudbuild/v1/cloudbuild.pb.go | 2213 +++++++ .../clouddebugger/v2/controller.pb.go | 475 ++ .../devtools/clouddebugger/v2/data.pb.go | 889 +++ .../devtools/clouddebugger/v2/debugger.pb.go | 647 ++ .../clouderrorreporting/v1beta1/common.pb.go | 433 ++ .../v1beta1/error_group_service.pb.go | 210 + .../v1beta1/error_stats_service.pb.go | 909 +++ .../v1beta1/report_errors_service.pb.go | 242 + .../devtools/cloudprofiler/v2/profiler.pb.go | 464 ++ .../devtools/cloudtrace/v1/trace.pb.go | 661 ++ .../v1test/remote_execution.pb.go | 2013 ++++++ .../devtools/source/v1/source_context.pb.go | 945 +++ .../devtools/sourcerepo/v1/sourcerepo.pb.go | 634 ++ .../example/library/v1/library.pb.go | 981 +++ .../googleapis/firestore/v1beta1/common.pb.go | 497 ++ .../firestore/v1beta1/document.pb.go | 566 ++ .../firestore/v1beta1/firestore.pb.go | 3124 +++++++++ .../googleapis/firestore/v1beta1/query.pb.go | 782 +++ .../googleapis/firestore/v1beta1/write.pb.go | 609 ++ .../googleapis/genomics/v1/annotations.pb.go | 2227 +++++++ .../googleapis/genomics/v1/cigar.pb.go | 168 + .../googleapis/genomics/v1/datasets.pb.go | 777 +++ .../googleapis/genomics/v1/operations.pb.go | 189 + .../googleapis/genomics/v1/position.pb.go | 78 + .../googleapis/genomics/v1/range.pb.go | 75 + .../genomics/v1/readalignment.pb.go | 393 ++ .../googleapis/genomics/v1/readgroup.pb.go | 275 + .../googleapis/genomics/v1/readgroupset.pb.go | 132 + .../googleapis/genomics/v1/reads.pb.go | 1414 ++++ .../googleapis/genomics/v1/references.pb.go | 864 +++ .../googleapis/genomics/v1/variants.pb.go | 2808 ++++++++ .../genomics/v1alpha2/pipelines.pb.go | 1838 ++++++ .../googleapis/iam/admin/v1/iam.pb.go | 2523 +++++++ .../googleapis/iam/v1/iam_policy.pb.go | 337 + .../iam/v1/logging/audit_data.pb.go | 75 + .../genproto/googleapis/iam/v1/policy.pb.go | 269 + .../logging/type/http_request.pb.go | 233 + .../logging/type/log_severity.pb.go | 110 + .../googleapis/logging/v2/log_entry.pb.go | 511 ++ .../googleapis/logging/v2/logging.pb.go | 781 +++ .../logging/v2/logging_config.pb.go | 1203 ++++ .../logging/v2/logging_metrics.pb.go | 669 ++ .../googleapis/longrunning/operations.pb.go | 596 ++ .../googleapis/monitoring/v3/common.pb.go | 701 ++ .../googleapis/monitoring/v3/group.pb.go | 125 + .../monitoring/v3/group_service.pb.go | 753 +++ .../googleapis/monitoring/v3/metric.pb.go | 163 + .../monitoring/v3/metric_service.pb.go | 928 +++ .../googleapis/privacy/dlp/v2beta1/dlp.pb.go | 2238 +++++++ .../privacy/dlp/v2beta1/storage.pb.go | 874 +++ .../googleapis/pubsub/v1/pubsub.pb.go | 2523 +++++++ .../googleapis/pubsub/v1beta2/pubsub.pb.go | 1459 +++++ .../genproto/googleapis/rpc/code/code.pb.go | 252 + .../rpc/errdetails/error_details.pb.go | 495 ++ .../googleapis/rpc/status/status.pb.go | 143 + .../database/v1/spanner_database_admin.pb.go | 920 +++ .../instance/v1/spanner_instance_admin.pb.go | 1281 ++++ .../genproto/googleapis/spanner/v1/keys.pb.go | 438 ++ .../googleapis/spanner/v1/mutation.pb.go | 347 + .../googleapis/spanner/v1/query_plan.pb.go | 285 + .../googleapis/spanner/v1/result_set.pb.go | 311 + .../googleapis/spanner/v1/spanner.pb.go | 1215 ++++ .../googleapis/spanner/v1/transaction.pb.go | 830 +++ .../genproto/googleapis/spanner/v1/type.pb.go | 216 + .../storagetransfer/v1/transfer.pb.go | 655 ++ .../storagetransfer/v1/transfer_types.pb.go | 1218 ++++ .../streetview/publish/v1/resources.pb.go | 398 ++ .../streetview/publish/v1/rpcmessages.pb.go | 470 ++ .../publish/v1/streetview_publish.pb.go | 524 ++ .../googleapis/tracing/v1/trace.pb.go | 888 +++ .../googleapis/type/color/color.pb.go | 221 + .../genproto/googleapis/type/date/date.pb.go | 93 + .../googleapis/type/dayofweek/dayofweek.pb.go | 100 + .../googleapis/type/latlng/latlng.pb.go | 114 + .../googleapis/type/money/money.pb.go | 92 + .../type/postaladdress/postal_address.pb.go | 240 + .../googleapis/type/timeofday/timeofday.pb.go | 100 + .../googleapis/watcher/v1/watch.pb.go | 376 ++ .../genproto/protobuf/api/api.pb.go | 350 + .../protobuf/field_mask/field_mask.pb.go | 267 + .../genproto/protobuf/ptype/type.pb.go | 538 ++ .../source_context/source_context.pb.go | 70 + vendor/google.golang.org/genproto/regen.go | 123 + vendor/google.golang.org/genproto/regen.sh | 77 + .../grpc/.github/ISSUE_TEMPLATE | 14 + vendor/google.golang.org/grpc/.please-update | 0 vendor/google.golang.org/grpc/.travis.yml | 20 + vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 32 + .../grpc/Documentation/gomock-example.md | 182 + .../grpc/Documentation/grpc-auth-support.md | 41 + .../grpc/Documentation/grpc-metadata.md | 204 + .../server-reflection-tutorial.md | 152 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/Makefile | 45 + vendor/google.golang.org/grpc/README.md | 45 + vendor/google.golang.org/grpc/backoff.go | 98 + vendor/google.golang.org/grpc/backoff_test.go | 29 + vendor/google.golang.org/grpc/balancer.go | 408 ++ .../grpc/balancer/balancer.go | 206 + .../grpc/balancer/roundrobin/roundrobin.go | 241 + .../balancer/roundrobin/roundrobin_test.go | 470 ++ .../grpc/balancer_conn_wrappers.go | 248 + .../google.golang.org/grpc/balancer_test.go | 798 +++ .../grpc/balancer_v1_wrapper.go | 367 ++ .../grpc/benchmark/benchmain/main.go | 499 ++ .../grpc/benchmark/benchmark.go | 364 ++ .../grpc/benchmark/benchmark_test.go | 85 + .../grpc/benchmark/benchresult/main.go | 133 + .../grpc/benchmark/client/main.go | 180 + .../grpc/benchmark/grpc_testing/control.pb.go | 1194 ++++ .../grpc/benchmark/grpc_testing/control.proto | 186 + .../benchmark/grpc_testing/messages.pb.go | 479 ++ .../benchmark/grpc_testing/messages.proto | 157 + .../benchmark/grpc_testing/payloads.pb.go | 250 + .../benchmark/grpc_testing/payloads.proto | 40 + .../benchmark/grpc_testing/services.pb.go | 442 ++ .../benchmark/grpc_testing/services.proto | 56 + .../grpc/benchmark/grpc_testing/stats.pb.go | 208 + .../grpc/benchmark/grpc_testing/stats.proto | 55 + .../grpc/benchmark/latency/latency.go | 316 + .../grpc/benchmark/latency/latency_test.go | 353 + .../benchmark/primitives/primitives_test.go | 235 + .../grpc/benchmark/server/main.go | 53 + .../grpc/benchmark/stats/histogram.go | 222 + .../grpc/benchmark/stats/stats.go | 291 + .../grpc/benchmark/stats/util.go | 208 + .../grpc/benchmark/worker/benchmark_client.go | 392 ++ .../grpc/benchmark/worker/benchmark_server.go | 184 + .../grpc/benchmark/worker/main.go | 229 + .../grpc/benchmark/worker/util.go | 35 + vendor/google.golang.org/grpc/call.go | 307 + vendor/google.golang.org/grpc/call_test.go | 292 + vendor/google.golang.org/grpc/clientconn.go | 1142 ++++ .../google.golang.org/grpc/clientconn_test.go | 390 ++ vendor/google.golang.org/grpc/codec.go | 104 + .../grpc/codec_benchmark_test.go | 100 + vendor/google.golang.org/grpc/codec_test.go | 128 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 16 + vendor/google.golang.org/grpc/codes/codes.go | 144 + .../grpc/connectivity/connectivity.go | 72 + .../grpc/credentials/credentials.go | 219 + .../grpc/credentials/credentials_test.go | 206 + .../grpc/credentials/credentials_util_go17.go | 60 + .../grpc/credentials/credentials_util_go18.go | 38 + .../credentials/credentials_util_pre_go17.go | 57 + .../grpc/credentials/oauth/oauth.go | 173 + vendor/google.golang.org/grpc/doc.go | 24 + .../google.golang.org/grpc/examples/README.md | 60 + .../grpc/examples/gotutorial.md | 431 ++ .../helloworld/greeter_client/main.go | 54 + .../helloworld/greeter_server/main.go | 57 + .../helloworld/helloworld/helloworld.pb.go | 164 + .../helloworld/helloworld/helloworld.proto | 37 + .../helloworld/mock_helloworld/hw_mock.go | 48 + .../mock_helloworld/hw_mock_test.go | 67 + .../grpc/examples/route_guide/README.md | 35 + .../examples/route_guide/client/client.go | 184 + .../route_guide/mock_routeguide/rg_mock.go | 200 + .../mock_routeguide/rg_mock_test.go | 82 + .../route_guide/routeguide/route_guide.pb.go | 543 ++ .../route_guide/routeguide/route_guide.proto | 110 + .../examples/route_guide/server/server.go | 233 + .../route_guide/testdata/route_guide_db.json | 601 ++ vendor/google.golang.org/grpc/grpclb.go | 704 ++ .../grpclb/grpc_lb_v1/messages/messages.pb.go | 615 ++ .../grpclb/grpc_lb_v1/messages/messages.proto | 155 + .../grpclb/grpc_lb_v1/service/service.pb.go | 154 + .../grpclb/grpc_lb_v1/service/service.proto | 26 + .../grpc/grpclb/grpclb_test.go | 891 +++ .../grpc/grpclog/glogger/glogger.go | 86 + .../google.golang.org/grpc/grpclog/grpclog.go | 123 + .../google.golang.org/grpc/grpclog/logger.go | 83 + .../grpc/grpclog/loggerv2.go | 195 + .../grpc/grpclog/loggerv2_test.go | 62 + .../grpc/health/grpc_health_v1/health.pb.go | 190 + .../grpc/health/grpc_health_v1/health.proto | 34 + .../google.golang.org/grpc/health/health.go | 72 + vendor/google.golang.org/grpc/interceptor.go | 75 + .../grpc/internal/internal.go | 34 + .../grpc/interop/client/client.go | 190 + .../grpc/interop/grpc_testing/test.pb.go | 905 +++ .../grpc/interop/grpc_testing/test.proto | 174 + .../interop/http2/negative_http2_client.go | 159 + .../grpc/interop/server/server.go | 65 + .../grpc/interop/test_utils.go | 748 +++ .../grpc/keepalive/keepalive.go | 65 + .../grpc/metadata/metadata.go | 137 + .../grpc/metadata/metadata_test.go | 71 + .../grpc/naming/dns_resolver.go | 290 + .../grpc/naming/dns_resolver_test.go | 315 + vendor/google.golang.org/grpc/naming/go17.go | 34 + .../grpc/naming/go17_test.go | 42 + vendor/google.golang.org/grpc/naming/go18.go | 28 + .../grpc/naming/go18_test.go | 41 + .../google.golang.org/grpc/naming/naming.go | 59 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 141 + .../grpc/picker_wrapper_test.go | 160 + vendor/google.golang.org/grpc/pickfirst.go | 95 + .../google.golang.org/grpc/pickfirst_test.go | 352 + vendor/google.golang.org/grpc/proxy.go | 131 + vendor/google.golang.org/grpc/proxy_test.go | 182 + .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 763 +++ .../grpc_reflection_v1alpha/reflection.proto | 136 + .../grpc/reflection/grpc_testing/proto2.pb.go | 77 + .../grpc/reflection/grpc_testing/proto2.proto | 22 + .../reflection/grpc_testing/proto2_ext.pb.go | 82 + .../reflection/grpc_testing/proto2_ext.proto | 30 + .../reflection/grpc_testing/proto2_ext2.pb.go | 71 + .../reflection/grpc_testing/proto2_ext2.proto | 28 + .../grpc/reflection/grpc_testing/test.pb.go | 247 + .../grpc/reflection/grpc_testing/test.proto | 35 + .../reflection/grpc_testingv3/testv3.pb.go | 236 + .../reflection/grpc_testingv3/testv3.proto | 21 + .../grpc/reflection/serverreflection.go | 398 ++ .../grpc/reflection/serverreflection_test.go | 520 ++ .../grpc/resolver/dns/dns_resolver.go | 379 ++ .../grpc/resolver/dns/dns_resolver_test.go | 898 +++ .../grpc/resolver/dns/go17.go | 35 + .../grpc/resolver/dns/go17_test.go | 52 + .../grpc/resolver/dns/go18.go | 29 + .../grpc/resolver/dns/go18_test.go | 51 + .../grpc/resolver/manual/manual.go | 80 + .../grpc/resolver/passthrough/passthrough.go | 58 + .../grpc/resolver/resolver.go | 143 + .../grpc/resolver_conn_wrapper.go | 139 + .../grpc/resolver_conn_wrapper_test.go | 47 + vendor/google.golang.org/grpc/rpc_util.go | 572 ++ .../google.golang.org/grpc/rpc_util_test.go | 237 + vendor/google.golang.org/grpc/server.go | 1177 ++++ vendor/google.golang.org/grpc/server_test.go | 102 + .../grpc/stats/grpc_testing/test.pb.go | 369 ++ .../grpc/stats/grpc_testing/test.proto | 43 + .../google.golang.org/grpc/stats/handlers.go | 64 + vendor/google.golang.org/grpc/stats/stats.go | 294 + .../grpc/stats/stats_test.go | 1265 ++++ .../google.golang.org/grpc/status/status.go | 168 + .../grpc/status/status_test.go | 261 + vendor/google.golang.org/grpc/stream.go | 657 ++ .../grpc/stress/client/main.go | 336 + .../grpc/stress/grpc_testing/metrics.pb.go | 374 ++ .../grpc/stress/grpc_testing/metrics.proto | 49 + .../grpc/stress/metrics_client/main.go | 82 + vendor/google.golang.org/grpc/tap/tap.go | 51 + .../grpc/test/bufconn/bufconn.go | 229 + .../grpc/test/bufconn/bufconn_test.go | 149 + .../grpc/test/codec_perf/perf.pb.go | 62 + .../grpc/test/codec_perf/perf.proto | 25 + .../grpc/test/end2end_test.go | 5030 ++++++++++++++ .../grpc/test/grpc_testing/test.pb.go | 788 +++ .../grpc/test/grpc_testing/test.proto | 154 + .../grpc/test/leakcheck/leakcheck.go | 118 + .../grpc/test/leakcheck/leakcheck_test.go | 76 + vendor/google.golang.org/grpc/test/race.go | 24 + .../grpc/test/servertester.go | 280 + vendor/google.golang.org/grpc/testdata/ca.pem | 15 + .../grpc/testdata/server1.key | 16 + .../grpc/testdata/server1.pem | 16 + .../grpc/testdata/testdata.go | 63 + vendor/google.golang.org/grpc/trace.go | 113 + .../grpc/transport/bdp_estimator.go | 143 + .../grpc/transport/control.go | 311 + .../grpc/transport/handler_server.go | 414 ++ .../grpc/transport/handler_server_test.go | 462 ++ .../grpc/transport/http2_client.go | 1322 ++++ .../grpc/transport/http2_server.go | 1188 ++++ .../grpc/transport/http_util.go | 489 ++ .../grpc/transport/http_util_test.go | 175 + .../google.golang.org/grpc/transport/log.go | 50 + .../grpc/transport/transport.go | 777 +++ .../grpc/transport/transport_test.go | 2158 ++++++ vendor/google.golang.org/grpc/vet.sh | 78 + 485 files changed, 201768 insertions(+), 55 deletions(-) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 vendor/github.com/container-storage-interface/spec/.gitignore create mode 100644 vendor/github.com/container-storage-interface/spec/.travis.yml create mode 100644 vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md create mode 100644 vendor/github.com/container-storage-interface/spec/LICENSE create mode 100644 vendor/github.com/container-storage-interface/spec/Makefile create mode 100644 vendor/github.com/container-storage-interface/spec/OWNERS create mode 100644 vendor/github.com/container-storage-interface/spec/README.md create mode 100644 vendor/github.com/container-storage-interface/spec/VERSION create mode 100644 vendor/github.com/container-storage-interface/spec/csi.proto create mode 100644 vendor/github.com/container-storage-interface/spec/lib/README.md create mode 100644 vendor/github.com/container-storage-interface/spec/lib/cxx/Makefile create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/.gitignore create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/Makefile create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/README.md create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/csi.go create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go create mode 100755 vendor/github.com/container-storage-interface/spec/logo.png create mode 100644 vendor/github.com/container-storage-interface/spec/spec.md create mode 100644 vendor/github.com/golang/mock/.gitignore create mode 100644 vendor/github.com/golang/mock/.travis.yml create mode 100644 vendor/github.com/golang/mock/AUTHORS create mode 100644 vendor/github.com/golang/mock/CONTRIBUTORS create mode 100644 vendor/github.com/golang/mock/LICENSE create mode 100644 vendor/github.com/golang/mock/README.md create mode 100755 vendor/github.com/golang/mock/ci/check_go_fmt.sh create mode 100755 vendor/github.com/golang/mock/ci/check_go_generate.sh create mode 100644 vendor/github.com/golang/mock/gomock/call.go create mode 100644 vendor/github.com/golang/mock/gomock/call_test.go create mode 100644 vendor/github.com/golang/mock/gomock/callset.go create mode 100644 vendor/github.com/golang/mock/gomock/controller.go create mode 100644 vendor/github.com/golang/mock/gomock/controller_test.go create mode 100644 vendor/github.com/golang/mock/gomock/matchers.go create mode 100644 vendor/github.com/golang/mock/gomock/matchers_test.go create mode 100644 vendor/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go create mode 100644 vendor/github.com/golang/mock/mockgen/mockgen.go create mode 100644 vendor/github.com/golang/mock/mockgen/model/model.go create mode 100644 vendor/github.com/golang/mock/mockgen/parse.go create mode 100644 vendor/github.com/golang/mock/mockgen/reflect.go create mode 100644 vendor/github.com/golang/mock/mockgen/tests/unexported_method/README.md create mode 100644 vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport.go create mode 100644 vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_mock.go create mode 100644 vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_test.go create mode 100644 vendor/github.com/golang/mock/sample/README.md create mode 100644 vendor/github.com/golang/mock/sample/imp1/imp1.go create mode 100644 vendor/github.com/golang/mock/sample/imp2/imp2.go create mode 100644 vendor/github.com/golang/mock/sample/imp3/imp3.go create mode 100644 vendor/github.com/golang/mock/sample/imp4/imp4.go create mode 100644 vendor/github.com/golang/mock/sample/mock_user/mock_user.go create mode 100644 vendor/github.com/golang/mock/sample/user.go create mode 100644 vendor/github.com/golang/mock/sample/user_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/.gitignore create mode 100644 vendor/github.com/kubernetes-csi/csi-test/.travis.yml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/LICENSE create mode 100644 vendor/github.com/kubernetes-csi/csi-test/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.lock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/test/co_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go create mode 100644 vendor/google.golang.org/genproto/.travis.yml create mode 100644 vendor/google.golang.org/genproto/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/README.md create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/configchange/config_change.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/quota.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/source_info.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/check_error.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/log_entry.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/metric_value.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/operation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/service_controller.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/legacy/audit_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/logging/v1/request_log.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/app_yaml.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/appengine.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/application.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/audit_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/deploy.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/instance.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/location.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/operation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/appengine/v1/version.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1/embedded_assistant.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service_messages.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/bytestream/bytestream.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/audit/audit_log.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/bigquery/logging/v1/audit_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/billing/v1/cloud_billing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/functions.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/operations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/language/v1/language_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta1/language_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta2/language_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/job_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/model_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/operation_metadata.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/prediction_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/project_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/resources.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/runtimeconfig.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/support/common/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/support/v1alpha1/cloud_support.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1/video_intelligence.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2/video_intelligence.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/admin/v1beta1/datastore_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1/datastore.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1/entity.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1/query.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/datastore.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/entity.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/query.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_events.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_status.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/build/v1/publish_build_event.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/controller.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/debugger.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_group_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_stats_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/report_errors_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2/profiler.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test/remote_execution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/source/v1/source_context.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/sourcerepo/v1/sourcerepo.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/example/library/v1/library.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/document.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/firestore.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/query.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/write.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/cigar.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/datasets.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/operations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/position.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/range.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/readalignment.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroup.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroupset.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/reads.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/references.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1/variants.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/genomics/v1alpha2/pipelines.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/admin/v1/iam.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/logging/audit_data.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/logging/v2/logging_metrics.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/dlp.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/storage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/pubsub/v1beta2/pubsub.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer_types.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/resources.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/rpcmessages.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/streetview_publish.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/dayofweek/dayofweek.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/latlng/latlng.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/money/money.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/postaladdress/postal_address.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/timeofday/timeofday.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/watcher/v1/watch.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/api/api.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/ptype/type.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/source_context/source_context.pb.go create mode 100644 vendor/google.golang.org/genproto/regen.go create mode 100755 vendor/google.golang.org/genproto/regen.sh create mode 100644 vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE create mode 100644 vendor/google.golang.org/grpc/.please-update create mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/Documentation/gomock-example.md create mode 100644 vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md create mode 100644 vendor/google.golang.org/grpc/Documentation/grpc-metadata.md create mode 100644 vendor/google.golang.org/grpc/Documentation/server-reflection-tutorial.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff_test.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_test.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchmain/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchmark.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchmark_test.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchresult/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/client/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go create mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto create mode 100644 vendor/google.golang.org/grpc/benchmark/latency/latency.go create mode 100644 vendor/google.golang.org/grpc/benchmark/latency/latency_test.go create mode 100644 vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go create mode 100644 vendor/google.golang.org/grpc/benchmark/server/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/stats/histogram.go create mode 100644 vendor/google.golang.org/grpc/benchmark/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/benchmark/stats/util.go create mode 100644 vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go create mode 100644 vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go create mode 100644 vendor/google.golang.org/grpc/benchmark/worker/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/worker/util.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/call_test.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/clientconn_test.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codec_benchmark_test.go create mode 100644 vendor/google.golang.org/grpc/codec_test.go create mode 100755 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_test.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go18.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/examples/README.md create mode 100644 vendor/google.golang.org/grpc/examples/gotutorial.md create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go create mode 100644 vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/README.md create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/client/client.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/server/server.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json create mode 100644 vendor/google.golang.org/grpc/grpclb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto create mode 100644 vendor/google.golang.org/grpc/grpclb/grpclb_test.go create mode 100644 vendor/google.golang.org/grpc/grpclog/glogger/glogger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2_test.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto create mode 100644 vendor/google.golang.org/grpc/health/health.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/interop/client/client.go create mode 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go create mode 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.proto create mode 100644 vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go create mode 100644 vendor/google.golang.org/grpc/interop/server/server.go create mode 100644 vendor/google.golang.org/grpc/interop/test_utils.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata_test.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver_test.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go create mode 100644 vendor/google.golang.org/grpc/naming/go17_test.go create mode 100644 vendor/google.golang.org/grpc/naming/go18.go create mode 100644 vendor/google.golang.org/grpc/naming/go18_test.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper_test.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/pickfirst_test.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/proxy_test.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/rpc_util_test.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/server_test.go create mode 100644 vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go create mode 100644 vendor/google.golang.org/grpc/stats/grpc_testing/test.proto create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/stats/stats_test.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/status/status_test.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/stress/client/main.go create mode 100644 vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go create mode 100644 vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto create mode 100644 vendor/google.golang.org/grpc/stress/metrics_client/main.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go create mode 100644 vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go create mode 100644 vendor/google.golang.org/grpc/test/codec_perf/perf.proto create mode 100644 vendor/google.golang.org/grpc/test/end2end_test.go create mode 100644 vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go create mode 100644 vendor/google.golang.org/grpc/test/grpc_testing/test.proto create mode 100644 vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go create mode 100644 vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go create mode 100644 vendor/google.golang.org/grpc/test/race.go create mode 100644 vendor/google.golang.org/grpc/test/servertester.go create mode 100644 vendor/google.golang.org/grpc/testdata/ca.pem create mode 100644 vendor/google.golang.org/grpc/testdata/server1.key create mode 100644 vendor/google.golang.org/grpc/testdata/server1.pem create mode 100644 vendor/google.golang.org/grpc/testdata/testdata.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/transport/control.go create mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/transport/handler_server_test.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/transport/http_util_test.go create mode 100644 vendor/google.golang.org/grpc/transport/log.go create mode 100644 vendor/google.golang.org/grpc/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/transport/transport_test.go create mode 100755 vendor/google.golang.org/grpc/vet.sh diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..ea5817bd5 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,261 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/PuerkitoBio/purell" + packages = ["."] + revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + branch = "master" + name = "github.com/container-storage-interface/spec" + packages = ["lib/go/csi"] + revision = "9d4ce05b42cbfc75a7e0c198ab46e6399e01fbb5" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/emicklei/go-restful" + packages = [".","log"] + revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c" + version = "v2.4.0" + +[[projects]] + name = "github.com/emicklei/go-restful-swagger12" + packages = ["."] + revision = "dcef7f55730566d41eae5db10e7d6981829720f6" + version = "1.0.1" + +[[projects]] + name = "github.com/ghodss/yaml" + packages = ["."] + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + revision = "36d33bfe519efae5632669801b180bf1a245da3b" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/spec" + packages = ["."] + revision = "84b5bee7bcb76f3d17bcbaf421bac44bd5709ca6" + +[[projects]] + branch = "master" + name = "github.com/go-openapi/swag" + packages = ["."] + revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = ["proto","sortkeys"] + revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02" + version = "v0.5" + +[[projects]] + branch = "master" + name = "github.com/golang/glog" + packages = ["."] + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + name = "github.com/golang/groupcache" + packages = ["lru"] + revision = "b710c8433bd175204919eb38776e944233235d03" + +[[projects]] + name = "github.com/golang/mock" + packages = ["gomock"] + revision = "13f360950a79f5864a972c786a10a50e44b69541" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/golang/protobuf" + packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] + revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + +[[projects]] + branch = "master" + name = "github.com/google/btree" + packages = ["."] + revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435" + +[[projects]] + branch = "master" + name = "github.com/google/gofuzz" + packages = ["."] + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + name = "github.com/googleapis/gnostic" + packages = ["OpenAPIv2","compiler","extensions"] + revision = "ee43cbb60db7bd22502942cccbc39059117352ab" + version = "v0.1.0" + +[[projects]] + branch = "master" + name = "github.com/gregjones/httpcache" + packages = [".","diskcache"] + revision = "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + packages = [".","simplelru"] + revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" + +[[projects]] + branch = "master" + name = "github.com/howeyc/gopass" + packages = ["."] + revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874" + version = "0.2.4" + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "6240e1e7983a85228f7fd9c3e1b6932d46ec58e2" + version = "1.0.3" + +[[projects]] + branch = "master" + name = "github.com/juju/ratelimit" + packages = ["."] + revision = "59fac5042749a5afb9af70e813da1dd5474f0167" + +[[projects]] + branch = "master" + name = "github.com/kubernetes-csi/csi-test" + packages = ["driver"] + revision = "3b78567d8ab0777981385a1337760095d21ba07f" + +[[projects]] + branch = "master" + name = "github.com/mailru/easyjson" + packages = ["buffer","jlexer","jwriter"] + revision = "4d347d79dea0067c945f374f990601decb08abb5" + +[[projects]] + branch = "master" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + name = "github.com/peterbourgon/diskv" + packages = ["."] + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "2509b142fb2b797aa7587dad548f113b2c0f20ce" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] + revision = "c73622c77280266305273cb545f54516ced95b93" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix","windows"] + revision = "661970f62f5897bc0cd5fdca7e087ba8a98a8fa1" + +[[projects]] + branch = "master" + name = "golang.org/x/text" + packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"] + revision = "6eab0e8f74e86c598ec3b6fad4888e0c11482d48" + +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "f676e0f3ac6395ff1a529ae59a6670878a8371a6" + +[[projects]] + name = "google.golang.org/grpc" + packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","reflection","reflection/grpc_reflection_v1alpha","resolver","stats","status","tap","transport"] + revision = "61d37c5d657a47e4404fd6823bd598341a2595de" + version = "v1.7.1" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" + version = "v0.9.0" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" + +[[projects]] + branch = "master" + name = "k8s.io/api" + packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"] + revision = "218912509d74a117d05a718bb926d0948e531c20" + +[[projects]] + branch = "master" + name = "k8s.io/apimachinery" + packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/reflect"] + revision = "18a564baac720819100827c16fdebcadb05b2d0d" + +[[projects]] + name = "k8s.io/client-go" + packages = ["discovery","informers","informers/admissionregistration","informers/admissionregistration/v1alpha1","informers/apps","informers/apps/v1beta1","informers/apps/v1beta2","informers/autoscaling","informers/autoscaling/v1","informers/autoscaling/v2beta1","informers/batch","informers/batch/v1","informers/batch/v1beta1","informers/batch/v2alpha1","informers/certificates","informers/certificates/v1beta1","informers/core","informers/core/v1","informers/extensions","informers/extensions/v1beta1","informers/internalinterfaces","informers/networking","informers/networking/v1","informers/policy","informers/policy/v1beta1","informers/rbac","informers/rbac/v1","informers/rbac/v1alpha1","informers/rbac/v1beta1","informers/scheduling","informers/scheduling/v1alpha1","informers/settings","informers/settings/v1alpha1","informers/storage","informers/storage/v1","informers/storage/v1beta1","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta2","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1beta1","listers/admissionregistration/v1alpha1","listers/apps/v1beta1","listers/apps/v1beta2","listers/autoscaling/v1","listers/autoscaling/v2beta1","listers/batch/v1","listers/batch/v1beta1","listers/batch/v2alpha1","listers/certificates/v1beta1","listers/core/v1","listers/extensions/v1beta1","listers/networking/v1","listers/policy/v1beta1","listers/rbac/v1","listers/rbac/v1alpha1","listers/rbac/v1beta1","listers/scheduling/v1alpha1","listers/settings/v1alpha1","listers/storage/v1","listers/storage/v1beta1","pkg/version","rest","rest/watch","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/pager","tools/record","tools/reference","transport","util/cert","util/flowcontrol","util/homedir","util/integer","util/workqueue"] + revision = "2ae454230481a7cb5544325e12ad7658ecccd19b" + version = "v5.0.1" + +[[projects]] + branch = "master" + name = "k8s.io/kube-openapi" + packages = ["pkg/common"] + revision = "61b46af70dfed79c6d24530cd23b41440a7f22a5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "d8fd3a7810387e74dbc74005583ec7fe7815f55d6f18cd246af632d7f65d3252" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..4c48fa2b1 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,54 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/container-storage-interface/spec" + +[[constraint]] + branch = "master" + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/golang/mock" + version = "1.0.0" + +[[constraint]] + branch = "master" + name = "github.com/kubernetes-csi/csi-test" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.7.1" + +[[constraint]] + branch = "master" + name = "k8s.io/api" + +[[constraint]] + branch = "master" + name = "k8s.io/apimachinery" + +[[constraint]] + name = "k8s.io/client-go" + version = "5.0.1" diff --git a/vendor/github.com/container-storage-interface/spec/.gitignore b/vendor/github.com/container-storage-interface/spec/.gitignore new file mode 100644 index 000000000..83250da77 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/.gitignore @@ -0,0 +1,2 @@ +/csi.proto.tmp +.DS_Store diff --git a/vendor/github.com/container-storage-interface/spec/.travis.yml b/vendor/github.com/container-storage-interface/spec/.travis.yml new file mode 100644 index 000000000..ec3dde8a2 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/.travis.yml @@ -0,0 +1,38 @@ +# Setting "sudo" to false forces Travis-CI to use its +# container-based build infrastructure, which has shorter +# queue times. +sudo: false + +# Use the newer Travis-CI build templates based on the +# Debian Linux distribution "Trusty" release. +dist: trusty + +# Selecting C as the language keeps the container to a +# minimum footprint. +language: c + +jobs: + include: + + # The test stage validates that the protobuf file was updated + # correctly prior to being committed. + - stage: test + script: make + + # The lang stages validate the specification using + # language-specific bindings. + + # Lang stage: Cxx + - stage: lang + script: make -C lib/cxx + + # Lang stage: Go + - stage: lang + language: go + go: 1.8.3 + go_import_path: github.com/container-storage-interface/spec + install: + - make -C lib/go protoc + - make -C lib/go protoc-gen-go + script: + - make -C lib/go diff --git a/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md new file mode 100644 index 000000000..cc5a61ce1 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/CONTRIBUTING.md @@ -0,0 +1,48 @@ +# How to Contribute + +CSI is under [Apache 2.0](LICENSE) and accepts contributions via GitHub pull requests. +This document outlines some of the conventions on development workflow, commit message formatting, contact points and other resources to make it easier to get your contribution accepted. + +## Markdown style + +To keep consistency throughout the Markdown files in the CSI spec, all files should be formatted one sentence per line. +This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length. +For example, this paragraph will span three lines in the Markdown source. + +## Code style + +This also applies to the code snippets in the markdown files. + +* Please wrap the code at 72 characters. + +## Comments + +This also applies to the code snippets in the markdown files. + +* End each sentence within a comment with a punctuation mark (please note that we generally prefer periods); this applies to incomplete sentences as well. +* For trailing comments, leave one space between the end of the code and the beginning of the comment. + +## Git commit + +Prior to committing code please run `make` in order to update the protobuf file and any language bindings. + +### Commit Style + +Each commit should represent a single logical (atomic) change: this makes your changes easier to review. + +* Try to avoid unrelated cleanups (e.g., typo fixes or style nits) in the same commit that makes functional changes. + While typo fixes are great, including them in the same commit as functional changes makes the commit history harder to read. +* Developers often make incremental commits to save their progress when working on a change, and then “rewrite history” (e.g., using `git rebase -i`) to create a clean set of commits once the change is ready to be reviewed. + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). + +* Separate the subject from body with a blank line. +* Limit the subject line to 50 characters. +* Capitalize the subject line. +* Do not end the subject line with a period. +* Use the imperative mood in the subject line. +* Wrap the body at 72 characters. +* Use the body to explain what and why vs. how. + * If there was important/useful/essential conversation or information, copy or include a reference. +* When possible, one keyword to scope the change in the subject (i.e. "README: ...", "tool: ..."). diff --git a/vendor/github.com/container-storage-interface/spec/LICENSE b/vendor/github.com/container-storage-interface/spec/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/container-storage-interface/spec/Makefile b/vendor/github.com/container-storage-interface/spec/Makefile new file mode 100644 index 000000000..fa69d61de --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/Makefile @@ -0,0 +1,50 @@ +all: build + +CSI_SPEC := spec.md +CSI_PROTO := csi.proto + +# This is the target for building the temporary CSI protobuf file. +# +# The temporary file is not versioned, and thus will always be +# built on Travis-CI. +$(CSI_PROTO).tmp: $(CSI_SPEC) + cat $? | \ + sed -n -e '/```protobuf$$/,/```$$/ p' | \ + sed -e 's@^```.*$$@////////@g' > $@ + +# This is the target for building the CSI protobuf file. +# +# This target depends on its temp file, which is not versioned. +# Therefore when built on Travis-CI the temp file will always +# be built and trigger this target. On Travis-CI the temp file +# is compared with the real file, and if they differ the build +# will fail. +# +# Locally the temp file is simply copied over the real file. +$(CSI_PROTO): $(CSI_PROTO).tmp +ifeq (true,$(TRAVIS)) + diff "$@" "$?" +else + diff "$@" "$?" > /dev/null 2>&1 || cp -f "$?" "$@" +endif + +build: check + +# If this is not running on Travis-CI then for sake of convenience +# go ahead and update the language bindings as well. +ifneq (true,$(TRAVIS)) +build: + $(MAKE) -C lib/go + $(MAKE) -C lib/cxx +endif + +clean: + +clobber: clean + rm -f $(CSI_PROTO) $(CSI_PROTO).tmp + +# check generated files for violation of standards +check: $(CSI_PROTO) + awk '{ if (length > 72) print NR, $$0 }' $? | diff - /dev/null + +.PHONY: clean clobber check diff --git a/vendor/github.com/container-storage-interface/spec/OWNERS b/vendor/github.com/container-storage-interface/spec/OWNERS new file mode 100644 index 000000000..b11f91910 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/OWNERS @@ -0,0 +1,10 @@ +approvers: + - saad-ali # Representing Kubernetes + - thockin # Representing Kubernetes + - jieyu # Representing Mesos + - jdef # Representing Mesos + - cpuguy83 # Representing Docker + - mycure # Representing Docker + - julian-hj # Representing Cloud Foundry + - paulcwarren # Representing Cloud Foundry +reviewers: diff --git a/vendor/github.com/container-storage-interface/spec/README.md b/vendor/github.com/container-storage-interface/spec/README.md new file mode 100644 index 000000000..e16a3cc64 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/README.md @@ -0,0 +1,5 @@ +# Container Storage Interface (CSI) Specification [![build status](https://travis-ci.org/container-storage-interface/spec.svg?branch=master)](https://travis-ci.org/container-storage-interface/spec) + +![CSI Logo](logo.png) + +This project contains the CSI [specification](spec.md) and [protobuf](csi.proto) files. diff --git a/vendor/github.com/container-storage-interface/spec/VERSION b/vendor/github.com/container-storage-interface/spec/VERSION new file mode 100644 index 000000000..6e8bf73aa --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/vendor/github.com/container-storage-interface/spec/csi.proto b/vendor/github.com/container-storage-interface/spec/csi.proto new file mode 100644 index 000000000..6900868da --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/csi.proto @@ -0,0 +1,1301 @@ +//////// +syntax = "proto3"; +package csi; +//////// +//////// +service Identity { + rpc GetSupportedVersions (GetSupportedVersionsRequest) + returns (GetSupportedVersionsResponse) {} + + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} +} + +service Node { + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc GetNodeID (GetNodeIDRequest) + returns (GetNodeIDResponse) {} + + rpc ProbeNode (ProbeNodeRequest) + returns (ProbeNodeResponse) {} + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} +} +//////// +//////// +message GetSupportedVersionsRequest { +} + +message GetSupportedVersionsResponse { + message Result { + // All the CSI versions that the Plugin supports. This field is + // REQUIRED. + repeated Version supported_versions = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a version in Semantic Version 2.0 format. +// (http://semver.org/spec/v2.0.0.html) +message Version { + uint32 major = 1; // This field is REQUIRED. + uint32 minor = 2; // This field is REQUIRED. + uint32 patch = 3; // This field is REQUIRED. +} +//////// +//////// +message GetPluginInfoRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message GetPluginInfoResponse { + message Result { + // This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message CreateVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + string name = 2; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange capacity_range = 3; + + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + repeated VolumeCapability volume_capabilities = 4; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 5; + + // End user credentials used to authenticate/authorize volume creation + // request. + // This field is OPTIONAL. + Credentials user_credentials = 6; +} + +message CreateVolumeResponse { + message Result { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identifying the volume. This field is REQUIRED. + VolumeInfo volume_info = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can be published as read/write at one node at a time. + SINGLE_NODE_WRITER = 1; + + // Can be published as readonly at one node at a time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume must be at least this big. + uint64 required_bytes = 1; + + // Volume must not be bigger than this. + uint64 limit_bytes = 2; +} + +// The information about a provisioned volume. +message VolumeInfo { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set, it indicates that the capacity of the volume is unknown (e.g., + // NFS share). If set, it MUST be non-zero. + uint64 capacity_bytes = 1; + + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + VolumeHandle handle = 2; +} + +// VolumeHandle objects are generated by Plugin implementations to +// serve two purposes: first, to efficiently identify a volume known +// to the Plugin implementation; second, to capture additional, opaque, +// possibly sensitive information associated with a volume. It SHALL +// NOT change over the lifetime of the volume and MAY be safely cached +// by the CO. +// Since this object will be passed around by the CO, it is RECOMMENDED +// that each Plugin keeps the information herein as small as possible. +// The total bytes of a serialized VolumeHandle must be less than 1 MiB. +message VolumeHandle { + // ID is the identity of the provisioned volume specified by the + // Plugin. This field is REQUIRED. + // This information SHALL NOT be considered sensitive such that, for + // example, the CO MAY generate log messages that include this data. + string id = 1; + + // Metadata captures additional, possibly sensitive, information about + // a volume in the form of key-value pairs. This field is OPTIONAL. + // Since this field MAY contain sensitive information, the CO MUST NOT + // leak this information to untrusted entities. + map metadata = 2; +} + +// A standard way to encode credential data. The total bytes of the +// values in the Data field must be less than 1 Mebibyte. +message Credentials { + // Data contains the credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is REQUIRED. + map data = 1; +} +//////// +//////// +message DeleteVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // End user credentials used to authenticate/authorize volume deletion + // request. + // This field is OPTIONAL. + Credentials user_credentials = 3; +} + +message DeleteVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message ControllerPublishVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume to be used on a node. + // This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + NodeID node_id = 3; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 5; + + // End user credentials used to authenticate/authorize controller + // publish request. + // This field is OPTIONAL. + Credentials user_credentials = 6; +} + +message ControllerPublishVolumeResponse { + message Result { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodePublishVolume` call for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + map publish_volume_info = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +message NodeID { + // Information about a node in the form of key-value pairs. This + // information is opaque to the CO. Given this information will be + // passed around by the CO, it is RECOMMENDED that each Plugin keeps + // this information as small as possible. This field is REQUIRED. + map values = 1; +} +//////// +//////// +message ControllerUnpublishVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + // + // If `GetNodeID` does not omit `NodeID` from a successful `Result`, + // the CO MAY omit this field as well, indicating that it does not + // know which node the volume was previously used. The Plugin SHOULD + // return an Error if this is not supported. + NodeID node_id = 3; + + // End user credentials used to authenticate/authorize controller + // unpublish request. + // This field is OPTIONAL. + Credentials user_credentials = 4; +} + +message ControllerUnpublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message ValidateVolumeCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The information about the volume to check. This is a REQUIRED + // field. + VolumeInfo volume_info = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; +} + +message ValidateVolumeCapabilitiesResponse { + message Result { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + bool supported = 1; + + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + string message = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message ListVolumesRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // If specified, the Plugin MUST NOT return more entries than this + // number in the response. If the actual number of entries is more + // than this number, the Plugin MUST set `next_token` in the response + // which can be used to get the next page of entries in the subsequent + // `ListVolumes` call. This field is OPTIONAL. If not specified, it + // means there is no restriction on the number of entries that can be + // returned. + uint32 max_entries = 2; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + string starting_token = 3; +} + +message ListVolumesResponse { + message Result { + message Entry { + VolumeInfo volume_info = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + string next_token = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message GetCapacityRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 3; +} + +message GetCapacityResponse { + message Result { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + uint64 available_capacity = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message ControllerGetCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message ControllerGetCapabilitiesResponse { + message Result { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +//////// +//////// +message NodePublishVolumeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The handle of the volume to publish. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_volume_info = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // This is a REQUIRED field. + string target_path = 4; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 6; + + // End user credentials used to authenticate/authorize node publish + // request. This field is OPTIONAL. + Credentials user_credentials = 7; +} + +message NodePublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message NodeUnpublishVolumeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The handle of the volume. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string target_path = 3; + + // End user credentials used to authenticate/authorize node unpublish + // request. This field is OPTIONAL. + Credentials user_credentials = 4; +} + +message NodeUnpublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message GetNodeIDRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message GetNodeIDResponse { + message Result { + // The ID of the node which SHALL be used by CO in + // `ControllerPublishVolume`. This is an OPTIONAL field. If unset, + // the CO SHALL leave the `node_id` field unset in + // `ControllerPublishVolume`. + NodeID node_id = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message ProbeNodeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message ProbeNodeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +//////// +//////// +message NodeGetCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message NodeGetCapabilitiesResponse { + message Result { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +//////// +//////// +message Error { + // General Error that MAY be returned by any RPC. + message GeneralError { + enum GeneralErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GeneralErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that an undefined error occurred. More human-readable + // information MAY be provided in the `error_description` field. + // The `caller_must_not_retry` field MUST be set appropriately by + // the Plugin to provide callers expected recovery behavior. + // + // Recovery behavior: Caller MAY retry (with exponential backoff), + // if `caller_must_not_retry` is set to false. Otherwise, the + // caller MUST not reissue the same request. + UNDEFINED = 1; + + // Indicates that the version specified in the request is not + // supported by the Plugin. The `caller_must_not_retry` field MUST + // be set to true. + // + // Recovery behavior: Caller MUST NOT retry; caller SHOULD call + // `GetSupportedVersions` to discover which CSI versions the + // Plugin supports. + UNSUPPORTED_REQUEST_VERSION = 2; + + // Indicates that a required field is missing from the request. + // More human-readable information MAY be provided in the + // `error_description` field. The `caller_must_not_retry` field + // MUST be set to true. + // + // Recovery behavior: Caller MUST fix the request by adding the + // missing required field before retrying. + MISSING_REQUIRED_FIELD = 3; + } + + // Machine parsable error code. + GeneralErrorCode error_code = 1; + + // When set to true, `caller_must_not_retry` indicates that the + // caller MUST not retry the same call again. This MAY be because + // the call is deemed invalid by the Plugin and no amount of retries + // will cause it to succeed. If this value is false, the caller MAY + // reissue the same call, but SHOULD implement exponential backoff + // on retires. + bool caller_must_not_retry = 2; + + // Human readable description of error, possibly with additional + // information. This string MAY be surfaced by CO to end users. + string error_description = 3; + } + + // `CreateVolume` specific error. + message CreateVolumeError { + enum CreateVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `CreateVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified volume name is not allowed by the + // Plugin. More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the name before retrying. + INVALID_VOLUME_NAME = 3; + + // Indicates that the capacity range is not allowed by the Plugin. + // More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the capacity range before // + // retrying. + UNSUPPORTED_CAPACITY_RANGE = 4; + + // Indicates that a volume corresponding to the specified volume + // name already exists. + // + // Recovery behavior: Caller MAY assume the `CreateVolume` + // call succeeded. + VOLUME_ALREADY_EXISTS = 5; + + // Indicates that a key in the opaque key/value parameters field + // is not supported by the Plugin. More human-readable information + // MAY be provided in the `error_description` field. This MAY + // occur, for example, due to caller error, Plugin version skew, + // etc. + // + // Recovery behavior: Caller MUST remove the unsupported key/value + // pair from the list of parameters before retrying. + UNSUPPORTED_PARAMETER_KEY = 6; + + // Indicates that a value in one of the opaque key/value pairs + // parameter contains invalid data. More human-readable + // information (such as the corresponding key) MAY be provided in + // the `error_description` field. + // + // Recovery behavior: Caller MUST fix the invalid value before + // retrying. + INVALID_PARAMETER_VALUE = 7; + } + + // Machine parsable error code. + CreateVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + } + + // `DeleteVolume` specific error. + message DeleteVolumeError { + enum DeleteVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `DeleteVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD assume the `DeleteVolume` call + // succeeded. + VOLUME_DOES_NOT_EXIST = 4; + } + + // Machine parsable error code. + DeleteVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + } + + // `ControllerPublishVolume` specific error. + message ControllerPublishVolumeError { + enum ControllerPublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerPublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 4; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to another node and does not + // support multi-node attach. If this error code is returned, the + // Plugin MUST also specify the `node_id` of the node the volume + // is already attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from the other node. Caller + // SHOULD ensure the specified volume is not attached to any other + // node before retrying with exponential back off. + VOLUME_ALREADY_PUBLISHED = 5; + + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying with exponential backoff. + NODE_DOES_NOT_EXIST = 6; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to the maximum supported + // number of nodes and therefore this operation can not be + // completed until the volume is detached from at least one of the + // existing nodes. When this error code is returned, the Plugin + // MUST also specify the `NodeId` of all the nodes the volume is + // attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from one other node before + // retrying with exponential backoff. + MAX_ATTACHED_NODES = 7; + + UNSUPPORTED_MOUNT_FLAGS = 9; + UNSUPPORTED_VOLUME_TYPE = 10; + UNSUPPORTED_FS_TYPE = 11; + + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin, or the Plugin does not support the + // operation without a `NodeID`. More human-readable information + // MAY be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + INVALID_NODE_ID = 8; + } + + // Machine parsable error code. + ControllerPublishVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + + // On `VOLUME_ALREADY_ATTACHED` and `MAX_ATTACHED_NODES` errors, + // this field contains the node(s) that the specified volume is + // already attached to. + repeated NodeID node_ids = 3; + } + + // `ControllerUnpublishVolume` specific error. + message ControllerUnpublishVolumeError { + enum ControllerUnpublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 4; + + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying. + NODE_DOES_NOT_EXIST = 5; + + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + INVALID_NODE_ID = 6; + + VOLUME_NOT_ATTACHED_TO_SPECIFIED_NODE = 7; + + // Indicates that the Plugin does not support the operation + // without a `NodeID`. + // + // Recovery behavior: Caller MUST specify the `NodeID` before + // retrying. + NODE_ID_REQUIRED = 8; + } + + ControllerUnpublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `ValidateVolumeCapabilities` specific error. + message ValidateVolumeCapabilitiesError { + enum ValidateVolumeCapabilitiesErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ValidateVolumeCapabilitiesErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that a volume corresponding to the specified + // `VolumeInfo` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeInfo` + // is correct and that the volume is accessable and has not been + // deleted before retrying. + VOLUME_DOES_NOT_EXIST = 1; + + UNSUPPORTED_MOUNT_FLAGS = 2; + UNSUPPORTED_VOLUME_TYPE = 3; + UNSUPPORTED_FS_TYPE = 4; + + // Indicates that the specified `VolumeInfo` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeInfo` before + // retrying. + INVALID_VOLUME_INFO = 5; + } + + ValidateVolumeCapabilitiesErrorCode error_code = 1; + string error_description = 2; + } + + // `NodePublishVolume` specific error. + message NodePublishVolumeError { + enum NodePublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodePublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 1; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 2; + + UNSUPPORTED_MOUNT_FLAGS = 3; + UNSUPPORTED_VOLUME_TYPE = 4; + UNSUPPORTED_FS_TYPE = 5; + MOUNT_ERROR = 6; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 7; + } + + NodePublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `NodeUnpublishVolume` specific error. + message NodeUnpublishVolumeError { + enum NodeUnpublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodeUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 1; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 2; + + UNMOUNT_ERROR = 3; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 4; + } + + NodeUnpublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `ProbeNode` specific error. + message ProbeNodeError { + enum ProbeNodeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ProbeNodeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + BAD_PLUGIN_CONFIG = 1; + MISSING_REQUIRED_HOST_DEPENDENCY = 2; + } + + ProbeNodeErrorCode error_code = 1; + string error_description = 2; + } + + // `GetNodeID` specific error. + message GetNodeIDError { + enum GetNodeIDErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GetNodeIDErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + BAD_PLUGIN_CONFIG = 1; + MISSING_REQUIRED_HOST_DEPENDENCY = 2; + } + + GetNodeIDErrorCode error_code = 1; + string error_description = 2; + } + + // One of the following fields MUST be specified. + oneof value { + GeneralError general_error = 1; + + CreateVolumeError create_volume_error = 2; + DeleteVolumeError delete_volume_error = 3; + ControllerPublishVolumeError controller_publish_volume_error = 4; + + ControllerUnpublishVolumeError + controller_unpublish_volume_error = 5; + + ValidateVolumeCapabilitiesError + validate_volume_capabilities_error = 6; + + NodePublishVolumeError node_publish_volume_error = 7; + NodeUnpublishVolumeError node_unpublish_volume_error = 8; + ProbeNodeError probe_node_error = 9; + GetNodeIDError get_node_id_error = 10; + } +} +//////// diff --git a/vendor/github.com/container-storage-interface/spec/lib/README.md b/vendor/github.com/container-storage-interface/spec/lib/README.md new file mode 100644 index 000000000..1cc2aade4 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/README.md @@ -0,0 +1,2 @@ +# CSI Validation Libraries +This directory contains language bindings generated from the CSI [protobuf file](../csi.proto) used to validate the model and workflows of the CSI specification. diff --git a/vendor/github.com/container-storage-interface/spec/lib/cxx/Makefile b/vendor/github.com/container-storage-interface/spec/lib/cxx/Makefile new file mode 100644 index 000000000..dcc07c7d9 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/cxx/Makefile @@ -0,0 +1,9 @@ +all: build + +build: + @echo "cxx bindings & validation" + +clean: + @echo "clean cxx" + +.PHONY: clean diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/.gitignore b/vendor/github.com/container-storage-interface/spec/lib/go/.gitignore new file mode 100644 index 000000000..13f0dc281 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/.gitignore @@ -0,0 +1,4 @@ +/protoc +/protoc-gen-go +/csi.a +/csi/.build/ diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/Makefile b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile new file mode 100644 index 000000000..d7a70e6df --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/Makefile @@ -0,0 +1,119 @@ +all: build + +######################################################################## +## GOLANG ## +######################################################################## + +# If GOPATH isn't defined then set its default location. +ifeq (,$(strip $(GOPATH))) +GOPATH := $(HOME)/go +else +# If GOPATH is already set then update GOPATH to be its own +# first element. +GOPATH := $(word 1,$(subst :, ,$(GOPATH))) +endif +export GOPATH + + +######################################################################## +## PROTOC ## +######################################################################## + +# Only set PROTOC_VER if it has an empty value. +ifeq (,$(strip $(PROTOC_VER))) +PROTOC_VER := 3.3.0 +endif + +PROTOC_OS := $(shell uname -s) +ifeq (Darwin,$(PROTOC_OS)) +PROTOC_OS := osx +endif + +PROTOC_ARCH := $(shell uname -m) +ifeq (i386,$(PROTOC_ARCH)) +PROTOC_ARCH := x86_32 +endif + +PROTOC := ./protoc +PROTOC_ZIP := protoc-$(PROTOC_VER)-$(PROTOC_OS)-$(PROTOC_ARCH).zip +PROTOC_URL := https://github.com/google/protobuf/releases/download/v$(PROTOC_VER)/$(PROTOC_ZIP) +PROTOC_TMP_DIR := .protoc +PROTOC_TMP_BIN := $(PROTOC_TMP_DIR)/bin/protoc + +$(PROTOC): + -mkdir -p "$(PROTOC_TMP_DIR)" && \ + curl -L $(PROTOC_URL) -o "$(PROTOC_TMP_DIR)/$(PROTOC_ZIP)" && \ + unzip "$(PROTOC_TMP_DIR)/$(PROTOC_ZIP)" -d "$(PROTOC_TMP_DIR)" && \ + chmod 0755 "$(PROTOC_TMP_BIN)" && \ + cp -f "$(PROTOC_TMP_BIN)" "$@" + -rm -fr "$(PROTOC_TMP_DIR)" + stat "$@" > /dev/null 2>&1 + + +######################################################################## +## PROTOC-GEN-GO ## +######################################################################## + +# This is the recipe for getting and installing the go plug-in +# for protoc +PROTOC_GEN_GO_PKG := github.com/golang/protobuf/protoc-gen-go +PROTOC_GEN_GO := protoc-gen-go +$(PROTOC_GEN_GO): + go get -d $(PROTOC_GEN_GO_PKG) && \ + go build -o "$@" $(PROTOC_GEN_GO_PKG) + + +######################################################################## +## PATH ## +######################################################################## + +# Update PATH with the current directory. This enables the protoc +# binary to discover the protoc-gen-go binary, built inside this +# directory. +export PATH := $(shell pwd):$(PATH) + + +######################################################################## +## BUILD ## +######################################################################## +CSI_GO := csi/csi.pb.go +CSI_A := csi.a +CSI_PROTO := ../../csi.proto +CSI_GO_TMP := csi/.build/csi.pb.go + +# This recipe generates the go language bindings to a temp area. +$(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO) + @mkdir -p "$(@D)" + $(PROTOC) -I "$( /dev/null 2>&1 || cp -f "$?" "$@" +endif + +# This recipe builds the Go archive from the sources in three steps: +# +# 1. Go get any missing dependencies. +# 2. Cache the packages. +# 3. Build the archive file. +$(CSI_A): $(CSI_GO) + go get -d ./... + go install ./csi + go build -o "$@" ./csi + +build: $(CSI_A) + +clean: + go clean -i ./... + rm -f "$(CSI_A)" + +clobber: clean + rm -fr "$(PROTOC)" "$(PROTOC_GEN_GO)" "$(dir $(CSI_GO))" + +.PHONY: clean clobber diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/README.md b/vendor/github.com/container-storage-interface/spec/lib/go/README.md new file mode 100644 index 000000000..79c48ade5 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/README.md @@ -0,0 +1,21 @@ +# CSI Go Validation + +This package is used to validate the CSI specification with Go language bindings. + +## Build Reference + +To validate the Go language bindings against the current specification use the following command: + +```bash +$ make +``` + +The above command will download the `protoc` and `protoc-gen-go` binaries if they are not present and then proceed to build the CSI Go language bindings. + +### Environment Variables + +The following table lists the environment variables that can be used to influence the behavior of the Makefile: + +| Name | Default Value | Description | +|------|---------------|-------------| +| `PROTOC_VER` | `3.3.0` | The version of the protoc binary. | diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi.go new file mode 100644 index 000000000..33826d5d5 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi.go @@ -0,0 +1,5 @@ +// Package csi is the Container Storage Interface (CSI) specification +// repository. This package contains no functional code, and this file +// exists only to make it possible to import this repository with a Go +// dependency manager such as Dep (https://github.com/golang/dep). +package csi diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go new file mode 100644 index 000000000..efc2abcc3 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -0,0 +1,5819 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: csi.proto + +/* +Package csi is a generated protocol buffer package. + +It is generated from these files: + csi.proto + +It has these top-level messages: + GetSupportedVersionsRequest + GetSupportedVersionsResponse + Version + GetPluginInfoRequest + GetPluginInfoResponse + CreateVolumeRequest + CreateVolumeResponse + VolumeCapability + CapacityRange + VolumeInfo + VolumeHandle + Credentials + DeleteVolumeRequest + DeleteVolumeResponse + ControllerPublishVolumeRequest + ControllerPublishVolumeResponse + NodeID + ControllerUnpublishVolumeRequest + ControllerUnpublishVolumeResponse + ValidateVolumeCapabilitiesRequest + ValidateVolumeCapabilitiesResponse + ListVolumesRequest + ListVolumesResponse + GetCapacityRequest + GetCapacityResponse + ControllerGetCapabilitiesRequest + ControllerGetCapabilitiesResponse + ControllerServiceCapability + NodePublishVolumeRequest + NodePublishVolumeResponse + NodeUnpublishVolumeRequest + NodeUnpublishVolumeResponse + GetNodeIDRequest + GetNodeIDResponse + ProbeNodeRequest + ProbeNodeResponse + NodeGetCapabilitiesRequest + NodeGetCapabilitiesResponse + NodeServiceCapability + Error +*/ +package csi + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can be published as read/write at one node at a time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can be published as readonly at one node at a time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{27, 0, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{38, 0, 0} +} + +type Error_GeneralError_GeneralErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GeneralErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_GeneralError_UNKNOWN Error_GeneralError_GeneralErrorCode = 0 + // Indicates that an undefined error occurred. More human-readable + // information MAY be provided in the `error_description` field. + // The `caller_must_not_retry` field MUST be set appropriately by + // the Plugin to provide callers expected recovery behavior. + // + // Recovery behavior: Caller MAY retry (with exponential backoff), + // if `caller_must_not_retry` is set to false. Otherwise, the + // caller MUST not reissue the same request. + Error_GeneralError_UNDEFINED Error_GeneralError_GeneralErrorCode = 1 + // Indicates that the version specified in the request is not + // supported by the Plugin. The `caller_must_not_retry` field MUST + // be set to true. + // + // Recovery behavior: Caller MUST NOT retry; caller SHOULD call + // `GetSupportedVersions` to discover which CSI versions the + // Plugin supports. + Error_GeneralError_UNSUPPORTED_REQUEST_VERSION Error_GeneralError_GeneralErrorCode = 2 + // Indicates that a required field is missing from the request. + // More human-readable information MAY be provided in the + // `error_description` field. The `caller_must_not_retry` field + // MUST be set to true. + // + // Recovery behavior: Caller MUST fix the request by adding the + // missing required field before retrying. + Error_GeneralError_MISSING_REQUIRED_FIELD Error_GeneralError_GeneralErrorCode = 3 +) + +var Error_GeneralError_GeneralErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNDEFINED", + 2: "UNSUPPORTED_REQUEST_VERSION", + 3: "MISSING_REQUIRED_FIELD", +} +var Error_GeneralError_GeneralErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "UNDEFINED": 1, + "UNSUPPORTED_REQUEST_VERSION": 2, + "MISSING_REQUIRED_FIELD": 3, +} + +func (x Error_GeneralError_GeneralErrorCode) String() string { + return proto.EnumName(Error_GeneralError_GeneralErrorCode_name, int32(x)) +} +func (Error_GeneralError_GeneralErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 0, 0} +} + +type Error_CreateVolumeError_CreateVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `CreateVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_CreateVolumeError_UNKNOWN Error_CreateVolumeError_CreateVolumeErrorCode = 0 + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + Error_CreateVolumeError_CALL_NOT_IMPLEMENTED Error_CreateVolumeError_CreateVolumeErrorCode = 1 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_CreateVolumeError_OPERATION_PENDING_FOR_VOLUME Error_CreateVolumeError_CreateVolumeErrorCode = 2 + // Indicates that the specified volume name is not allowed by the + // Plugin. More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the name before retrying. + Error_CreateVolumeError_INVALID_VOLUME_NAME Error_CreateVolumeError_CreateVolumeErrorCode = 3 + // Indicates that the capacity range is not allowed by the Plugin. + // More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the capacity range before // + // retrying. + Error_CreateVolumeError_UNSUPPORTED_CAPACITY_RANGE Error_CreateVolumeError_CreateVolumeErrorCode = 4 + // Indicates that a volume corresponding to the specified volume + // name already exists. + // + // Recovery behavior: Caller MAY assume the `CreateVolume` + // call succeeded. + Error_CreateVolumeError_VOLUME_ALREADY_EXISTS Error_CreateVolumeError_CreateVolumeErrorCode = 5 + // Indicates that a key in the opaque key/value parameters field + // is not supported by the Plugin. More human-readable information + // MAY be provided in the `error_description` field. This MAY + // occur, for example, due to caller error, Plugin version skew, + // etc. + // + // Recovery behavior: Caller MUST remove the unsupported key/value + // pair from the list of parameters before retrying. + Error_CreateVolumeError_UNSUPPORTED_PARAMETER_KEY Error_CreateVolumeError_CreateVolumeErrorCode = 6 + // Indicates that a value in one of the opaque key/value pairs + // parameter contains invalid data. More human-readable + // information (such as the corresponding key) MAY be provided in + // the `error_description` field. + // + // Recovery behavior: Caller MUST fix the invalid value before + // retrying. + Error_CreateVolumeError_INVALID_PARAMETER_VALUE Error_CreateVolumeError_CreateVolumeErrorCode = 7 +) + +var Error_CreateVolumeError_CreateVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_IMPLEMENTED", + 2: "OPERATION_PENDING_FOR_VOLUME", + 3: "INVALID_VOLUME_NAME", + 4: "UNSUPPORTED_CAPACITY_RANGE", + 5: "VOLUME_ALREADY_EXISTS", + 6: "UNSUPPORTED_PARAMETER_KEY", + 7: "INVALID_PARAMETER_VALUE", +} +var Error_CreateVolumeError_CreateVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_IMPLEMENTED": 1, + "OPERATION_PENDING_FOR_VOLUME": 2, + "INVALID_VOLUME_NAME": 3, + "UNSUPPORTED_CAPACITY_RANGE": 4, + "VOLUME_ALREADY_EXISTS": 5, + "UNSUPPORTED_PARAMETER_KEY": 6, + "INVALID_PARAMETER_VALUE": 7, +} + +func (x Error_CreateVolumeError_CreateVolumeErrorCode) String() string { + return proto.EnumName(Error_CreateVolumeError_CreateVolumeErrorCode_name, int32(x)) +} +func (Error_CreateVolumeError_CreateVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 1, 0} +} + +type Error_DeleteVolumeError_DeleteVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `DeleteVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_DeleteVolumeError_UNKNOWN Error_DeleteVolumeError_DeleteVolumeErrorCode = 0 + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + Error_DeleteVolumeError_CALL_NOT_IMPLEMENTED Error_DeleteVolumeError_DeleteVolumeErrorCode = 1 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_DeleteVolumeError_OPERATION_PENDING_FOR_VOLUME Error_DeleteVolumeError_DeleteVolumeErrorCode = 2 + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + Error_DeleteVolumeError_INVALID_VOLUME_HANDLE Error_DeleteVolumeError_DeleteVolumeErrorCode = 3 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD assume the `DeleteVolume` call + // succeeded. + Error_DeleteVolumeError_VOLUME_DOES_NOT_EXIST Error_DeleteVolumeError_DeleteVolumeErrorCode = 4 +) + +var Error_DeleteVolumeError_DeleteVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_IMPLEMENTED", + 2: "OPERATION_PENDING_FOR_VOLUME", + 3: "INVALID_VOLUME_HANDLE", + 4: "VOLUME_DOES_NOT_EXIST", +} +var Error_DeleteVolumeError_DeleteVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_IMPLEMENTED": 1, + "OPERATION_PENDING_FOR_VOLUME": 2, + "INVALID_VOLUME_HANDLE": 3, + "VOLUME_DOES_NOT_EXIST": 4, +} + +func (x Error_DeleteVolumeError_DeleteVolumeErrorCode) String() string { + return proto.EnumName(Error_DeleteVolumeError_DeleteVolumeErrorCode_name, int32(x)) +} +func (Error_DeleteVolumeError_DeleteVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 2, 0} +} + +type Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerPublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_ControllerPublishVolumeError_UNKNOWN Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 0 + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + Error_ControllerPublishVolumeError_CALL_NOT_IMPLEMENTED Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 1 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_ControllerPublishVolumeError_OPERATION_PENDING_FOR_VOLUME Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 2 + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + Error_ControllerPublishVolumeError_INVALID_VOLUME_HANDLE Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 3 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + Error_ControllerPublishVolumeError_VOLUME_DOES_NOT_EXIST Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 4 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to another node and does not + // support multi-node attach. If this error code is returned, the + // Plugin MUST also specify the `node_id` of the node the volume + // is already attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from the other node. Caller + // SHOULD ensure the specified volume is not attached to any other + // node before retrying with exponential back off. + Error_ControllerPublishVolumeError_VOLUME_ALREADY_PUBLISHED Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 5 + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying with exponential backoff. + Error_ControllerPublishVolumeError_NODE_DOES_NOT_EXIST Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 6 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to the maximum supported + // number of nodes and therefore this operation can not be + // completed until the volume is detached from at least one of the + // existing nodes. When this error code is returned, the Plugin + // MUST also specify the `NodeId` of all the nodes the volume is + // attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from one other node before + // retrying with exponential backoff. + Error_ControllerPublishVolumeError_MAX_ATTACHED_NODES Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 7 + Error_ControllerPublishVolumeError_UNSUPPORTED_MOUNT_FLAGS Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 9 + Error_ControllerPublishVolumeError_UNSUPPORTED_VOLUME_TYPE Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 10 + Error_ControllerPublishVolumeError_UNSUPPORTED_FS_TYPE Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 11 + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin, or the Plugin does not support the + // operation without a `NodeID`. More human-readable information + // MAY be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + Error_ControllerPublishVolumeError_INVALID_NODE_ID Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode = 8 +) + +var Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_IMPLEMENTED", + 2: "OPERATION_PENDING_FOR_VOLUME", + 3: "INVALID_VOLUME_HANDLE", + 4: "VOLUME_DOES_NOT_EXIST", + 5: "VOLUME_ALREADY_PUBLISHED", + 6: "NODE_DOES_NOT_EXIST", + 7: "MAX_ATTACHED_NODES", + 9: "UNSUPPORTED_MOUNT_FLAGS", + 10: "UNSUPPORTED_VOLUME_TYPE", + 11: "UNSUPPORTED_FS_TYPE", + 8: "INVALID_NODE_ID", +} +var Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_IMPLEMENTED": 1, + "OPERATION_PENDING_FOR_VOLUME": 2, + "INVALID_VOLUME_HANDLE": 3, + "VOLUME_DOES_NOT_EXIST": 4, + "VOLUME_ALREADY_PUBLISHED": 5, + "NODE_DOES_NOT_EXIST": 6, + "MAX_ATTACHED_NODES": 7, + "UNSUPPORTED_MOUNT_FLAGS": 9, + "UNSUPPORTED_VOLUME_TYPE": 10, + "UNSUPPORTED_FS_TYPE": 11, + "INVALID_NODE_ID": 8, +} + +func (x Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode) String() string { + return proto.EnumName(Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode_name, int32(x)) +} +func (Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 3, 0} +} + +type Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_ControllerUnpublishVolumeError_UNKNOWN Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 0 + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + Error_ControllerUnpublishVolumeError_CALL_NOT_IMPLEMENTED Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 1 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_ControllerUnpublishVolumeError_OPERATION_PENDING_FOR_VOLUME Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 2 + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + Error_ControllerUnpublishVolumeError_INVALID_VOLUME_HANDLE Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 3 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + Error_ControllerUnpublishVolumeError_VOLUME_DOES_NOT_EXIST Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 4 + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying. + Error_ControllerUnpublishVolumeError_NODE_DOES_NOT_EXIST Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 5 + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + Error_ControllerUnpublishVolumeError_INVALID_NODE_ID Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 6 + Error_ControllerUnpublishVolumeError_VOLUME_NOT_ATTACHED_TO_SPECIFIED_NODE Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 7 + // Indicates that the Plugin does not support the operation + // without a `NodeID`. + // + // Recovery behavior: Caller MUST specify the `NodeID` before + // retrying. + Error_ControllerUnpublishVolumeError_NODE_ID_REQUIRED Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode = 8 +) + +var Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_IMPLEMENTED", + 2: "OPERATION_PENDING_FOR_VOLUME", + 3: "INVALID_VOLUME_HANDLE", + 4: "VOLUME_DOES_NOT_EXIST", + 5: "NODE_DOES_NOT_EXIST", + 6: "INVALID_NODE_ID", + 7: "VOLUME_NOT_ATTACHED_TO_SPECIFIED_NODE", + 8: "NODE_ID_REQUIRED", +} +var Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_IMPLEMENTED": 1, + "OPERATION_PENDING_FOR_VOLUME": 2, + "INVALID_VOLUME_HANDLE": 3, + "VOLUME_DOES_NOT_EXIST": 4, + "NODE_DOES_NOT_EXIST": 5, + "INVALID_NODE_ID": 6, + "VOLUME_NOT_ATTACHED_TO_SPECIFIED_NODE": 7, + "NODE_ID_REQUIRED": 8, +} + +func (x Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode) String() string { + return proto.EnumName(Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode_name, int32(x)) +} +func (Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 4, 0} +} + +type Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ValidateVolumeCapabilitiesErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_ValidateVolumeCapabilitiesError_UNKNOWN Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 0 + // Indicates that a volume corresponding to the specified + // `VolumeInfo` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeInfo` + // is correct and that the volume is accessable and has not been + // deleted before retrying. + Error_ValidateVolumeCapabilitiesError_VOLUME_DOES_NOT_EXIST Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 1 + Error_ValidateVolumeCapabilitiesError_UNSUPPORTED_MOUNT_FLAGS Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 2 + Error_ValidateVolumeCapabilitiesError_UNSUPPORTED_VOLUME_TYPE Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 3 + Error_ValidateVolumeCapabilitiesError_UNSUPPORTED_FS_TYPE Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 4 + // Indicates that the specified `VolumeInfo` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeInfo` before + // retrying. + Error_ValidateVolumeCapabilitiesError_INVALID_VOLUME_INFO Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode = 5 +) + +var Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VOLUME_DOES_NOT_EXIST", + 2: "UNSUPPORTED_MOUNT_FLAGS", + 3: "UNSUPPORTED_VOLUME_TYPE", + 4: "UNSUPPORTED_FS_TYPE", + 5: "INVALID_VOLUME_INFO", +} +var Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "VOLUME_DOES_NOT_EXIST": 1, + "UNSUPPORTED_MOUNT_FLAGS": 2, + "UNSUPPORTED_VOLUME_TYPE": 3, + "UNSUPPORTED_FS_TYPE": 4, + "INVALID_VOLUME_INFO": 5, +} + +func (x Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode) String() string { + return proto.EnumName(Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode_name, int32(x)) +} +func (Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 5, 0} +} + +type Error_NodePublishVolumeError_NodePublishVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodePublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_NodePublishVolumeError_UNKNOWN Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 0 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_NodePublishVolumeError_OPERATION_PENDING_FOR_VOLUME Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 1 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + Error_NodePublishVolumeError_VOLUME_DOES_NOT_EXIST Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 2 + Error_NodePublishVolumeError_UNSUPPORTED_MOUNT_FLAGS Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 3 + Error_NodePublishVolumeError_UNSUPPORTED_VOLUME_TYPE Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 4 + Error_NodePublishVolumeError_UNSUPPORTED_FS_TYPE Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 5 + Error_NodePublishVolumeError_MOUNT_ERROR Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 6 + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + Error_NodePublishVolumeError_INVALID_VOLUME_HANDLE Error_NodePublishVolumeError_NodePublishVolumeErrorCode = 7 +) + +var Error_NodePublishVolumeError_NodePublishVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "OPERATION_PENDING_FOR_VOLUME", + 2: "VOLUME_DOES_NOT_EXIST", + 3: "UNSUPPORTED_MOUNT_FLAGS", + 4: "UNSUPPORTED_VOLUME_TYPE", + 5: "UNSUPPORTED_FS_TYPE", + 6: "MOUNT_ERROR", + 7: "INVALID_VOLUME_HANDLE", +} +var Error_NodePublishVolumeError_NodePublishVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "OPERATION_PENDING_FOR_VOLUME": 1, + "VOLUME_DOES_NOT_EXIST": 2, + "UNSUPPORTED_MOUNT_FLAGS": 3, + "UNSUPPORTED_VOLUME_TYPE": 4, + "UNSUPPORTED_FS_TYPE": 5, + "MOUNT_ERROR": 6, + "INVALID_VOLUME_HANDLE": 7, +} + +func (x Error_NodePublishVolumeError_NodePublishVolumeErrorCode) String() string { + return proto.EnumName(Error_NodePublishVolumeError_NodePublishVolumeErrorCode_name, int32(x)) +} +func (Error_NodePublishVolumeError_NodePublishVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 6, 0} +} + +type Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodeUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_NodeUnpublishVolumeError_UNKNOWN Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode = 0 + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + Error_NodeUnpublishVolumeError_OPERATION_PENDING_FOR_VOLUME Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode = 1 + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + Error_NodeUnpublishVolumeError_VOLUME_DOES_NOT_EXIST Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode = 2 + Error_NodeUnpublishVolumeError_UNMOUNT_ERROR Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode = 3 + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + Error_NodeUnpublishVolumeError_INVALID_VOLUME_HANDLE Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode = 4 +) + +var Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "OPERATION_PENDING_FOR_VOLUME", + 2: "VOLUME_DOES_NOT_EXIST", + 3: "UNMOUNT_ERROR", + 4: "INVALID_VOLUME_HANDLE", +} +var Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "OPERATION_PENDING_FOR_VOLUME": 1, + "VOLUME_DOES_NOT_EXIST": 2, + "UNMOUNT_ERROR": 3, + "INVALID_VOLUME_HANDLE": 4, +} + +func (x Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode) String() string { + return proto.EnumName(Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode_name, int32(x)) +} +func (Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 7, 0} +} + +type Error_ProbeNodeError_ProbeNodeErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ProbeNodeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_ProbeNodeError_UNKNOWN Error_ProbeNodeError_ProbeNodeErrorCode = 0 + Error_ProbeNodeError_BAD_PLUGIN_CONFIG Error_ProbeNodeError_ProbeNodeErrorCode = 1 + Error_ProbeNodeError_MISSING_REQUIRED_HOST_DEPENDENCY Error_ProbeNodeError_ProbeNodeErrorCode = 2 +) + +var Error_ProbeNodeError_ProbeNodeErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BAD_PLUGIN_CONFIG", + 2: "MISSING_REQUIRED_HOST_DEPENDENCY", +} +var Error_ProbeNodeError_ProbeNodeErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "BAD_PLUGIN_CONFIG": 1, + "MISSING_REQUIRED_HOST_DEPENDENCY": 2, +} + +func (x Error_ProbeNodeError_ProbeNodeErrorCode) String() string { + return proto.EnumName(Error_ProbeNodeError_ProbeNodeErrorCode_name, int32(x)) +} +func (Error_ProbeNodeError_ProbeNodeErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 8, 0} +} + +type Error_GetNodeIDError_GetNodeIDErrorCode int32 + +const ( + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GetNodeIDErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + Error_GetNodeIDError_UNKNOWN Error_GetNodeIDError_GetNodeIDErrorCode = 0 + Error_GetNodeIDError_BAD_PLUGIN_CONFIG Error_GetNodeIDError_GetNodeIDErrorCode = 1 + Error_GetNodeIDError_MISSING_REQUIRED_HOST_DEPENDENCY Error_GetNodeIDError_GetNodeIDErrorCode = 2 +) + +var Error_GetNodeIDError_GetNodeIDErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BAD_PLUGIN_CONFIG", + 2: "MISSING_REQUIRED_HOST_DEPENDENCY", +} +var Error_GetNodeIDError_GetNodeIDErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "BAD_PLUGIN_CONFIG": 1, + "MISSING_REQUIRED_HOST_DEPENDENCY": 2, +} + +func (x Error_GetNodeIDError_GetNodeIDErrorCode) String() string { + return proto.EnumName(Error_GetNodeIDError_GetNodeIDErrorCode_name, int32(x)) +} +func (Error_GetNodeIDError_GetNodeIDErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 9, 0} +} + +// ////// +// ////// +type GetSupportedVersionsRequest struct { +} + +func (m *GetSupportedVersionsRequest) Reset() { *m = GetSupportedVersionsRequest{} } +func (m *GetSupportedVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSupportedVersionsRequest) ProtoMessage() {} +func (*GetSupportedVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetSupportedVersionsResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *GetSupportedVersionsResponse_Result_ + // *GetSupportedVersionsResponse_Error + Reply isGetSupportedVersionsResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *GetSupportedVersionsResponse) Reset() { *m = GetSupportedVersionsResponse{} } +func (m *GetSupportedVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetSupportedVersionsResponse) ProtoMessage() {} +func (*GetSupportedVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isGetSupportedVersionsResponse_Reply interface { + isGetSupportedVersionsResponse_Reply() +} + +type GetSupportedVersionsResponse_Result_ struct { + Result *GetSupportedVersionsResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type GetSupportedVersionsResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*GetSupportedVersionsResponse_Result_) isGetSupportedVersionsResponse_Reply() {} +func (*GetSupportedVersionsResponse_Error) isGetSupportedVersionsResponse_Reply() {} + +func (m *GetSupportedVersionsResponse) GetReply() isGetSupportedVersionsResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *GetSupportedVersionsResponse) GetResult() *GetSupportedVersionsResponse_Result { + if x, ok := m.GetReply().(*GetSupportedVersionsResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *GetSupportedVersionsResponse) GetError() *Error { + if x, ok := m.GetReply().(*GetSupportedVersionsResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GetSupportedVersionsResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GetSupportedVersionsResponse_OneofMarshaler, _GetSupportedVersionsResponse_OneofUnmarshaler, _GetSupportedVersionsResponse_OneofSizer, []interface{}{ + (*GetSupportedVersionsResponse_Result_)(nil), + (*GetSupportedVersionsResponse_Error)(nil), + } +} + +func _GetSupportedVersionsResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GetSupportedVersionsResponse) + // reply + switch x := m.Reply.(type) { + case *GetSupportedVersionsResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *GetSupportedVersionsResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GetSupportedVersionsResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _GetSupportedVersionsResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GetSupportedVersionsResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetSupportedVersionsResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &GetSupportedVersionsResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &GetSupportedVersionsResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _GetSupportedVersionsResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GetSupportedVersionsResponse) + // reply + switch x := m.Reply.(type) { + case *GetSupportedVersionsResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GetSupportedVersionsResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type GetSupportedVersionsResponse_Result struct { + // All the CSI versions that the Plugin supports. This field is + // REQUIRED. + SupportedVersions []*Version `protobuf:"bytes,1,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` +} + +func (m *GetSupportedVersionsResponse_Result) Reset() { *m = GetSupportedVersionsResponse_Result{} } +func (m *GetSupportedVersionsResponse_Result) String() string { return proto.CompactTextString(m) } +func (*GetSupportedVersionsResponse_Result) ProtoMessage() {} +func (*GetSupportedVersionsResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +func (m *GetSupportedVersionsResponse_Result) GetSupportedVersions() []*Version { + if m != nil { + return m.SupportedVersions + } + return nil +} + +// Specifies a version in Semantic Version 2.0 format. +// (http://semver.org/spec/v2.0.0.html) +type Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() uint32 { + if m != nil { + return m.Patch + } + return 0 +} + +// ////// +// ////// +type GetPluginInfoRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetPluginInfoRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type GetPluginInfoResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *GetPluginInfoResponse_Result_ + // *GetPluginInfoResponse_Error + Reply isGetPluginInfoResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type isGetPluginInfoResponse_Reply interface { + isGetPluginInfoResponse_Reply() +} + +type GetPluginInfoResponse_Result_ struct { + Result *GetPluginInfoResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type GetPluginInfoResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*GetPluginInfoResponse_Result_) isGetPluginInfoResponse_Reply() {} +func (*GetPluginInfoResponse_Error) isGetPluginInfoResponse_Reply() {} + +func (m *GetPluginInfoResponse) GetReply() isGetPluginInfoResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *GetPluginInfoResponse) GetResult() *GetPluginInfoResponse_Result { + if x, ok := m.GetReply().(*GetPluginInfoResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *GetPluginInfoResponse) GetError() *Error { + if x, ok := m.GetReply().(*GetPluginInfoResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GetPluginInfoResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GetPluginInfoResponse_OneofMarshaler, _GetPluginInfoResponse_OneofUnmarshaler, _GetPluginInfoResponse_OneofSizer, []interface{}{ + (*GetPluginInfoResponse_Result_)(nil), + (*GetPluginInfoResponse_Error)(nil), + } +} + +func _GetPluginInfoResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GetPluginInfoResponse) + // reply + switch x := m.Reply.(type) { + case *GetPluginInfoResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *GetPluginInfoResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GetPluginInfoResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _GetPluginInfoResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GetPluginInfoResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetPluginInfoResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &GetPluginInfoResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &GetPluginInfoResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _GetPluginInfoResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GetPluginInfoResponse) + // reply + switch x := m.Reply.(type) { + case *GetPluginInfoResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GetPluginInfoResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type GetPluginInfoResponse_Result struct { + // This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetPluginInfoResponse_Result) Reset() { *m = GetPluginInfoResponse_Result{} } +func (m *GetPluginInfoResponse_Result) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse_Result) ProtoMessage() {} +func (*GetPluginInfoResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *GetPluginInfoResponse_Result) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse_Result) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse_Result) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +// ////// +// ////// +type CreateVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,4,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,5,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // End user credentials used to authenticate/authorize volume creation + // request. + // This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,6,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type CreateVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *CreateVolumeResponse_Result_ + // *CreateVolumeResponse_Error + Reply isCreateVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isCreateVolumeResponse_Reply interface { + isCreateVolumeResponse_Reply() +} + +type CreateVolumeResponse_Result_ struct { + Result *CreateVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type CreateVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*CreateVolumeResponse_Result_) isCreateVolumeResponse_Reply() {} +func (*CreateVolumeResponse_Error) isCreateVolumeResponse_Reply() {} + +func (m *CreateVolumeResponse) GetReply() isCreateVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *CreateVolumeResponse) GetResult() *CreateVolumeResponse_Result { + if x, ok := m.GetReply().(*CreateVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *CreateVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*CreateVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CreateVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CreateVolumeResponse_OneofMarshaler, _CreateVolumeResponse_OneofUnmarshaler, _CreateVolumeResponse_OneofSizer, []interface{}{ + (*CreateVolumeResponse_Result_)(nil), + (*CreateVolumeResponse_Error)(nil), + } +} + +func _CreateVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CreateVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *CreateVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *CreateVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CreateVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _CreateVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CreateVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CreateVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &CreateVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &CreateVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _CreateVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CreateVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *CreateVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CreateVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type CreateVolumeResponse_Result struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identifying the volume. This field is REQUIRED. + VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` +} + +func (m *CreateVolumeResponse_Result) Reset() { *m = CreateVolumeResponse_Result{} } +func (m *CreateVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse_Result) ProtoMessage() {} +func (*CreateVolumeResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} } + +func (m *CreateVolumeResponse_Result) GetVolumeInfo() *VolumeInfo { + if m != nil { + return m.VolumeInfo + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` +} +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} } + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 2} } + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume must be at least this big. + RequiredBytes uint64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` + // Volume must not be bigger than this. + LimitBytes uint64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CapacityRange) GetRequiredBytes() uint64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() uint64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// The information about a provisioned volume. +type VolumeInfo struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set, it indicates that the capacity of the volume is unknown (e.g., + // NFS share). If set, it MUST be non-zero. + CapacityBytes uint64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + Handle *VolumeHandle `protobuf:"bytes,2,opt,name=handle" json:"handle,omitempty"` +} + +func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } +func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } +func (*VolumeInfo) ProtoMessage() {} +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *VolumeInfo) GetCapacityBytes() uint64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *VolumeInfo) GetHandle() *VolumeHandle { + if m != nil { + return m.Handle + } + return nil +} + +// VolumeHandle objects are generated by Plugin implementations to +// serve two purposes: first, to efficiently identify a volume known +// to the Plugin implementation; second, to capture additional, opaque, +// possibly sensitive information associated with a volume. It SHALL +// NOT change over the lifetime of the volume and MAY be safely cached +// by the CO. +// Since this object will be passed around by the CO, it is RECOMMENDED +// that each Plugin keeps the information herein as small as possible. +// The total bytes of a serialized VolumeHandle must be less than 1 MiB. +type VolumeHandle struct { + // ID is the identity of the provisioned volume specified by the + // Plugin. This field is REQUIRED. + // This information SHALL NOT be considered sensitive such that, for + // example, the CO MAY generate log messages that include this data. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Metadata captures additional, possibly sensitive, information about + // a volume in the form of key-value pairs. This field is OPTIONAL. + // Since this field MAY contain sensitive information, the CO MUST NOT + // leak this information to untrusted entities. + Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VolumeHandle) Reset() { *m = VolumeHandle{} } +func (m *VolumeHandle) String() string { return proto.CompactTextString(m) } +func (*VolumeHandle) ProtoMessage() {} +func (*VolumeHandle) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *VolumeHandle) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *VolumeHandle) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +// A standard way to encode credential data. The total bytes of the +// values in the Data field must be less than 1 Mebibyte. +type Credentials struct { + // Data contains the credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is REQUIRED. + Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Credentials) Reset() { *m = Credentials{} } +func (m *Credentials) String() string { return proto.CompactTextString(m) } +func (*Credentials) ProtoMessage() {} +func (*Credentials) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Credentials) GetData() map[string]string { + if m != nil { + return m.Data + } + return nil +} + +// ////// +// ////// +type DeleteVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The handle of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeHandle *VolumeHandle `protobuf:"bytes,2,opt,name=volume_handle,json=volumeHandle" json:"volume_handle,omitempty"` + // End user credentials used to authenticate/authorize volume deletion + // request. + // This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,3,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *DeleteVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *DeleteVolumeRequest) GetVolumeHandle() *VolumeHandle { + if m != nil { + return m.VolumeHandle + } + return nil +} + +func (m *DeleteVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type DeleteVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *DeleteVolumeResponse_Result_ + // *DeleteVolumeResponse_Error + Reply isDeleteVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type isDeleteVolumeResponse_Reply interface { + isDeleteVolumeResponse_Reply() +} + +type DeleteVolumeResponse_Result_ struct { + Result *DeleteVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type DeleteVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*DeleteVolumeResponse_Result_) isDeleteVolumeResponse_Reply() {} +func (*DeleteVolumeResponse_Error) isDeleteVolumeResponse_Reply() {} + +func (m *DeleteVolumeResponse) GetReply() isDeleteVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *DeleteVolumeResponse) GetResult() *DeleteVolumeResponse_Result { + if x, ok := m.GetReply().(*DeleteVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *DeleteVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*DeleteVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DeleteVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeleteVolumeResponse_OneofMarshaler, _DeleteVolumeResponse_OneofUnmarshaler, _DeleteVolumeResponse_OneofSizer, []interface{}{ + (*DeleteVolumeResponse_Result_)(nil), + (*DeleteVolumeResponse_Error)(nil), + } +} + +func _DeleteVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DeleteVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *DeleteVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *DeleteVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DeleteVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _DeleteVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DeleteVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &DeleteVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &DeleteVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _DeleteVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeleteVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *DeleteVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *DeleteVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type DeleteVolumeResponse_Result struct { +} + +func (m *DeleteVolumeResponse_Result) Reset() { *m = DeleteVolumeResponse_Result{} } +func (m *DeleteVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse_Result) ProtoMessage() {} +func (*DeleteVolumeResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +// ////// +// ////// +type ControllerPublishVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The handle of the volume to be used on a node. + // This field is REQUIRED. + VolumeHandle *VolumeHandle `protobuf:"bytes,2,opt,name=volume_handle,json=volumeHandle" json:"volume_handle,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + NodeId *NodeID `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,5,opt,name=readonly" json:"readonly,omitempty"` + // End user credentials used to authenticate/authorize controller + // publish request. + // This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,6,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ControllerPublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeHandle() *VolumeHandle { + if m != nil { + return m.VolumeHandle + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() *NodeID { + if m != nil { + return m.NodeId + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ControllerPublishVolumeResponse_Result_ + // *ControllerPublishVolumeResponse_Error + Reply isControllerPublishVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15} +} + +type isControllerPublishVolumeResponse_Reply interface { + isControllerPublishVolumeResponse_Reply() +} + +type ControllerPublishVolumeResponse_Result_ struct { + Result *ControllerPublishVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ControllerPublishVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ControllerPublishVolumeResponse_Result_) isControllerPublishVolumeResponse_Reply() {} +func (*ControllerPublishVolumeResponse_Error) isControllerPublishVolumeResponse_Reply() {} + +func (m *ControllerPublishVolumeResponse) GetReply() isControllerPublishVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ControllerPublishVolumeResponse) GetResult() *ControllerPublishVolumeResponse_Result { + if x, ok := m.GetReply().(*ControllerPublishVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ControllerPublishVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*ControllerPublishVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerPublishVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerPublishVolumeResponse_OneofMarshaler, _ControllerPublishVolumeResponse_OneofUnmarshaler, _ControllerPublishVolumeResponse_OneofSizer, []interface{}{ + (*ControllerPublishVolumeResponse_Result_)(nil), + (*ControllerPublishVolumeResponse_Error)(nil), + } +} + +func _ControllerPublishVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerPublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerPublishVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ControllerPublishVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerPublishVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ControllerPublishVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerPublishVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerPublishVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ControllerPublishVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ControllerPublishVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerPublishVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerPublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerPublishVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ControllerPublishVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerPublishVolumeResponse_Result struct { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodePublishVolume` call for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + PublishVolumeInfo map[string]string `protobuf:"bytes,1,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerPublishVolumeResponse_Result) Reset() { + *m = ControllerPublishVolumeResponse_Result{} +} +func (m *ControllerPublishVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse_Result) ProtoMessage() {} +func (*ControllerPublishVolumeResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15, 0} +} + +func (m *ControllerPublishVolumeResponse_Result) GetPublishVolumeInfo() map[string]string { + if m != nil { + return m.PublishVolumeInfo + } + return nil +} + +type NodeID struct { + // Information about a node in the form of key-value pairs. This + // information is opaque to the CO. Given this information will be + // passed around by the CO, it is RECOMMENDED that each Plugin keeps + // this information as small as possible. This field is REQUIRED. + Values map[string]string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *NodeID) Reset() { *m = NodeID{} } +func (m *NodeID) String() string { return proto.CompactTextString(m) } +func (*NodeID) ProtoMessage() {} +func (*NodeID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *NodeID) GetValues() map[string]string { + if m != nil { + return m.Values + } + return nil +} + +// ////// +// ////// +type ControllerUnpublishVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The handle of the volume. This field is REQUIRED. + VolumeHandle *VolumeHandle `protobuf:"bytes,2,opt,name=volume_handle,json=volumeHandle" json:"volume_handle,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + // + // If `GetNodeID` does not omit `NodeID` from a successful `Result`, + // the CO MAY omit this field as well, indicating that it does not + // know which node the volume was previously used. The Plugin SHOULD + // return an Error if this is not supported. + NodeId *NodeID `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // End user credentials used to authenticate/authorize controller + // unpublish request. + // This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,4,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17} +} + +func (m *ControllerUnpublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ControllerUnpublishVolumeRequest) GetVolumeHandle() *VolumeHandle { + if m != nil { + return m.VolumeHandle + } + return nil +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() *NodeID { + if m != nil { + return m.NodeId + } + return nil +} + +func (m *ControllerUnpublishVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ControllerUnpublishVolumeResponse_Result_ + // *ControllerUnpublishVolumeResponse_Error + Reply isControllerUnpublishVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18} +} + +type isControllerUnpublishVolumeResponse_Reply interface { + isControllerUnpublishVolumeResponse_Reply() +} + +type ControllerUnpublishVolumeResponse_Result_ struct { + Result *ControllerUnpublishVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ControllerUnpublishVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ControllerUnpublishVolumeResponse_Result_) isControllerUnpublishVolumeResponse_Reply() {} +func (*ControllerUnpublishVolumeResponse_Error) isControllerUnpublishVolumeResponse_Reply() {} + +func (m *ControllerUnpublishVolumeResponse) GetReply() isControllerUnpublishVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ControllerUnpublishVolumeResponse) GetResult() *ControllerUnpublishVolumeResponse_Result { + if x, ok := m.GetReply().(*ControllerUnpublishVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ControllerUnpublishVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*ControllerUnpublishVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerUnpublishVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerUnpublishVolumeResponse_OneofMarshaler, _ControllerUnpublishVolumeResponse_OneofUnmarshaler, _ControllerUnpublishVolumeResponse_OneofSizer, []interface{}{ + (*ControllerUnpublishVolumeResponse_Result_)(nil), + (*ControllerUnpublishVolumeResponse_Error)(nil), + } +} + +func _ControllerUnpublishVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerUnpublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerUnpublishVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ControllerUnpublishVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerUnpublishVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ControllerUnpublishVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerUnpublishVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerUnpublishVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ControllerUnpublishVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ControllerUnpublishVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerUnpublishVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerUnpublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerUnpublishVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ControllerUnpublishVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerUnpublishVolumeResponse_Result struct { +} + +func (m *ControllerUnpublishVolumeResponse_Result) Reset() { + *m = ControllerUnpublishVolumeResponse_Result{} +} +func (m *ControllerUnpublishVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse_Result) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18, 0} +} + +// ////// +// ////// +type ValidateVolumeCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The information about the volume to check. This is a REQUIRED + // field. + VolumeInfo *VolumeInfo `protobuf:"bytes,2,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19} +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeInfo() *VolumeInfo { + if m != nil { + return m.VolumeInfo + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ValidateVolumeCapabilitiesResponse_Result_ + // *ValidateVolumeCapabilitiesResponse_Error + Reply isValidateVolumeCapabilitiesResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20} +} + +type isValidateVolumeCapabilitiesResponse_Reply interface { + isValidateVolumeCapabilitiesResponse_Reply() +} + +type ValidateVolumeCapabilitiesResponse_Result_ struct { + Result *ValidateVolumeCapabilitiesResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ValidateVolumeCapabilitiesResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ValidateVolumeCapabilitiesResponse_Result_) isValidateVolumeCapabilitiesResponse_Reply() {} +func (*ValidateVolumeCapabilitiesResponse_Error) isValidateVolumeCapabilitiesResponse_Reply() {} + +func (m *ValidateVolumeCapabilitiesResponse) GetReply() isValidateVolumeCapabilitiesResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse) GetResult() *ValidateVolumeCapabilitiesResponse_Result { + if x, ok := m.GetReply().(*ValidateVolumeCapabilitiesResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse) GetError() *Error { + if x, ok := m.GetReply().(*ValidateVolumeCapabilitiesResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ValidateVolumeCapabilitiesResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ValidateVolumeCapabilitiesResponse_OneofMarshaler, _ValidateVolumeCapabilitiesResponse_OneofUnmarshaler, _ValidateVolumeCapabilitiesResponse_OneofSizer, []interface{}{ + (*ValidateVolumeCapabilitiesResponse_Result_)(nil), + (*ValidateVolumeCapabilitiesResponse_Error)(nil), + } +} + +func _ValidateVolumeCapabilitiesResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ValidateVolumeCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *ValidateVolumeCapabilitiesResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ValidateVolumeCapabilitiesResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ValidateVolumeCapabilitiesResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ValidateVolumeCapabilitiesResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ValidateVolumeCapabilitiesResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ValidateVolumeCapabilitiesResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ValidateVolumeCapabilitiesResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ValidateVolumeCapabilitiesResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ValidateVolumeCapabilitiesResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ValidateVolumeCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *ValidateVolumeCapabilitiesResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ValidateVolumeCapabilitiesResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ValidateVolumeCapabilitiesResponse_Result struct { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *ValidateVolumeCapabilitiesResponse_Result) Reset() { + *m = ValidateVolumeCapabilitiesResponse_Result{} +} +func (m *ValidateVolumeCapabilitiesResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse_Result) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *ValidateVolumeCapabilitiesResponse_Result) GetSupported() bool { + if m != nil { + return m.Supported + } + return false +} + +func (m *ValidateVolumeCapabilitiesResponse_Result) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// ////// +// ////// +type ListVolumesRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // If specified, the Plugin MUST NOT return more entries than this + // number in the response. If the actual number of entries is more + // than this number, the Plugin MUST set `next_token` in the response + // which can be used to get the next page of entries in the subsequent + // `ListVolumes` call. This field is OPTIONAL. If not specified, it + // means there is no restriction on the number of entries that can be + // returned. + MaxEntries uint32 `protobuf:"varint,2,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + StartingToken string `protobuf:"bytes,3,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *ListVolumesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ListVolumesRequest) GetMaxEntries() uint32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ListVolumesResponse_Result_ + // *ListVolumesResponse_Error + Reply isListVolumesResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +type isListVolumesResponse_Reply interface { + isListVolumesResponse_Reply() +} + +type ListVolumesResponse_Result_ struct { + Result *ListVolumesResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ListVolumesResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ListVolumesResponse_Result_) isListVolumesResponse_Reply() {} +func (*ListVolumesResponse_Error) isListVolumesResponse_Reply() {} + +func (m *ListVolumesResponse) GetReply() isListVolumesResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ListVolumesResponse) GetResult() *ListVolumesResponse_Result { + if x, ok := m.GetReply().(*ListVolumesResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ListVolumesResponse) GetError() *Error { + if x, ok := m.GetReply().(*ListVolumesResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListVolumesResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListVolumesResponse_OneofMarshaler, _ListVolumesResponse_OneofUnmarshaler, _ListVolumesResponse_OneofSizer, []interface{}{ + (*ListVolumesResponse_Result_)(nil), + (*ListVolumesResponse_Error)(nil), + } +} + +func _ListVolumesResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListVolumesResponse) + // reply + switch x := m.Reply.(type) { + case *ListVolumesResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ListVolumesResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ListVolumesResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ListVolumesResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListVolumesResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListVolumesResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ListVolumesResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ListVolumesResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ListVolumesResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListVolumesResponse) + // reply + switch x := m.Reply.(type) { + case *ListVolumesResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListVolumesResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ListVolumesResponse_Result struct { + Entries []*ListVolumesResponse_Result_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` +} + +func (m *ListVolumesResponse_Result) Reset() { *m = ListVolumesResponse_Result{} } +func (m *ListVolumesResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Result) ProtoMessage() {} +func (*ListVolumesResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} } + +func (m *ListVolumesResponse_Result) GetEntries() []*ListVolumesResponse_Result_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse_Result) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Result_Entry struct { + VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` +} + +func (m *ListVolumesResponse_Result_Entry) Reset() { *m = ListVolumesResponse_Result_Entry{} } +func (m *ListVolumesResponse_Result_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Result_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Result_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{22, 0, 0} +} + +func (m *ListVolumesResponse_Result_Entry) GetVolumeInfo() *VolumeInfo { + if m != nil { + return m.VolumeInfo + } + return nil +} + +// ////// +// ////// +type GetCapacityRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *GetCapacityRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type GetCapacityResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *GetCapacityResponse_Result_ + // *GetCapacityResponse_Error + Reply isGetCapacityResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +type isGetCapacityResponse_Reply interface { + isGetCapacityResponse_Reply() +} + +type GetCapacityResponse_Result_ struct { + Result *GetCapacityResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type GetCapacityResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*GetCapacityResponse_Result_) isGetCapacityResponse_Reply() {} +func (*GetCapacityResponse_Error) isGetCapacityResponse_Reply() {} + +func (m *GetCapacityResponse) GetReply() isGetCapacityResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *GetCapacityResponse) GetResult() *GetCapacityResponse_Result { + if x, ok := m.GetReply().(*GetCapacityResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *GetCapacityResponse) GetError() *Error { + if x, ok := m.GetReply().(*GetCapacityResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GetCapacityResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GetCapacityResponse_OneofMarshaler, _GetCapacityResponse_OneofUnmarshaler, _GetCapacityResponse_OneofSizer, []interface{}{ + (*GetCapacityResponse_Result_)(nil), + (*GetCapacityResponse_Error)(nil), + } +} + +func _GetCapacityResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GetCapacityResponse) + // reply + switch x := m.Reply.(type) { + case *GetCapacityResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *GetCapacityResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GetCapacityResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _GetCapacityResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GetCapacityResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetCapacityResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &GetCapacityResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &GetCapacityResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _GetCapacityResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GetCapacityResponse) + // reply + switch x := m.Reply.(type) { + case *GetCapacityResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GetCapacityResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type GetCapacityResponse_Result struct { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + AvailableCapacity uint64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` +} + +func (m *GetCapacityResponse_Result) Reset() { *m = GetCapacityResponse_Result{} } +func (m *GetCapacityResponse_Result) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse_Result) ProtoMessage() {} +func (*GetCapacityResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} } + +func (m *GetCapacityResponse_Result) GetAvailableCapacity() uint64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +// ////// +// ////// +type ControllerGetCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{25} +} + +func (m *ControllerGetCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type ControllerGetCapabilitiesResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ControllerGetCapabilitiesResponse_Result_ + // *ControllerGetCapabilitiesResponse_Error + Reply isControllerGetCapabilitiesResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{26} +} + +type isControllerGetCapabilitiesResponse_Reply interface { + isControllerGetCapabilitiesResponse_Reply() +} + +type ControllerGetCapabilitiesResponse_Result_ struct { + Result *ControllerGetCapabilitiesResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ControllerGetCapabilitiesResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ControllerGetCapabilitiesResponse_Result_) isControllerGetCapabilitiesResponse_Reply() {} +func (*ControllerGetCapabilitiesResponse_Error) isControllerGetCapabilitiesResponse_Reply() {} + +func (m *ControllerGetCapabilitiesResponse) GetReply() isControllerGetCapabilitiesResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ControllerGetCapabilitiesResponse) GetResult() *ControllerGetCapabilitiesResponse_Result { + if x, ok := m.GetReply().(*ControllerGetCapabilitiesResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ControllerGetCapabilitiesResponse) GetError() *Error { + if x, ok := m.GetReply().(*ControllerGetCapabilitiesResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerGetCapabilitiesResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerGetCapabilitiesResponse_OneofMarshaler, _ControllerGetCapabilitiesResponse_OneofUnmarshaler, _ControllerGetCapabilitiesResponse_OneofSizer, []interface{}{ + (*ControllerGetCapabilitiesResponse_Result_)(nil), + (*ControllerGetCapabilitiesResponse_Error)(nil), + } +} + +func _ControllerGetCapabilitiesResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerGetCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerGetCapabilitiesResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ControllerGetCapabilitiesResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerGetCapabilitiesResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ControllerGetCapabilitiesResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerGetCapabilitiesResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerGetCapabilitiesResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ControllerGetCapabilitiesResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ControllerGetCapabilitiesResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerGetCapabilitiesResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerGetCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *ControllerGetCapabilitiesResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ControllerGetCapabilitiesResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerGetCapabilitiesResponse_Result struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *ControllerGetCapabilitiesResponse_Result) Reset() { + *m = ControllerGetCapabilitiesResponse_Result{} +} +func (m *ControllerGetCapabilitiesResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse_Result) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0} +} + +func (m *ControllerGetCapabilitiesResponse_Result) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{27, 0} +} + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +// ////// +// ////// +type NodePublishVolumeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The handle of the volume to publish. This field is REQUIRED. + VolumeHandle *VolumeHandle `protobuf:"bytes,2,opt,name=volume_handle,json=volumeHandle" json:"volume_handle,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishVolumeInfo map[string]string `protobuf:"bytes,3,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` + // End user credentials used to authenticate/authorize node publish + // request. This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,7,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NodePublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeHandle() *VolumeHandle { + if m != nil { + return m.VolumeHandle + } + return nil +} + +func (m *NodePublishVolumeRequest) GetPublishVolumeInfo() map[string]string { + if m != nil { + return m.PublishVolumeInfo + } + return nil +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type NodePublishVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *NodePublishVolumeResponse_Result_ + // *NodePublishVolumeResponse_Error + Reply isNodePublishVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isNodePublishVolumeResponse_Reply interface { + isNodePublishVolumeResponse_Reply() +} + +type NodePublishVolumeResponse_Result_ struct { + Result *NodePublishVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type NodePublishVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*NodePublishVolumeResponse_Result_) isNodePublishVolumeResponse_Reply() {} +func (*NodePublishVolumeResponse_Error) isNodePublishVolumeResponse_Reply() {} + +func (m *NodePublishVolumeResponse) GetReply() isNodePublishVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *NodePublishVolumeResponse) GetResult() *NodePublishVolumeResponse_Result { + if x, ok := m.GetReply().(*NodePublishVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *NodePublishVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*NodePublishVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodePublishVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodePublishVolumeResponse_OneofMarshaler, _NodePublishVolumeResponse_OneofUnmarshaler, _NodePublishVolumeResponse_OneofSizer, []interface{}{ + (*NodePublishVolumeResponse_Result_)(nil), + (*NodePublishVolumeResponse_Error)(nil), + } +} + +func _NodePublishVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodePublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *NodePublishVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *NodePublishVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodePublishVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _NodePublishVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodePublishVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodePublishVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &NodePublishVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &NodePublishVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _NodePublishVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodePublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *NodePublishVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NodePublishVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodePublishVolumeResponse_Result struct { +} + +func (m *NodePublishVolumeResponse_Result) Reset() { *m = NodePublishVolumeResponse_Result{} } +func (m *NodePublishVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse_Result) ProtoMessage() {} +func (*NodePublishVolumeResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{29, 0} +} + +// ////// +// ////// +type NodeUnpublishVolumeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The handle of the volume. This field is REQUIRED. + VolumeHandle *VolumeHandle `protobuf:"bytes,2,opt,name=volume_handle,json=volumeHandle" json:"volume_handle,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,3,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // End user credentials used to authenticate/authorize node unpublish + // request. This field is OPTIONAL. + UserCredentials *Credentials `protobuf:"bytes,4,opt,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *NodeUnpublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *NodeUnpublishVolumeRequest) GetVolumeHandle() *VolumeHandle { + if m != nil { + return m.VolumeHandle + } + return nil +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetUserCredentials() *Credentials { + if m != nil { + return m.UserCredentials + } + return nil +} + +type NodeUnpublishVolumeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *NodeUnpublishVolumeResponse_Result_ + // *NodeUnpublishVolumeResponse_Error + Reply isNodeUnpublishVolumeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type isNodeUnpublishVolumeResponse_Reply interface { + isNodeUnpublishVolumeResponse_Reply() +} + +type NodeUnpublishVolumeResponse_Result_ struct { + Result *NodeUnpublishVolumeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type NodeUnpublishVolumeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*NodeUnpublishVolumeResponse_Result_) isNodeUnpublishVolumeResponse_Reply() {} +func (*NodeUnpublishVolumeResponse_Error) isNodeUnpublishVolumeResponse_Reply() {} + +func (m *NodeUnpublishVolumeResponse) GetReply() isNodeUnpublishVolumeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *NodeUnpublishVolumeResponse) GetResult() *NodeUnpublishVolumeResponse_Result { + if x, ok := m.GetReply().(*NodeUnpublishVolumeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *NodeUnpublishVolumeResponse) GetError() *Error { + if x, ok := m.GetReply().(*NodeUnpublishVolumeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeUnpublishVolumeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeUnpublishVolumeResponse_OneofMarshaler, _NodeUnpublishVolumeResponse_OneofUnmarshaler, _NodeUnpublishVolumeResponse_OneofSizer, []interface{}{ + (*NodeUnpublishVolumeResponse_Result_)(nil), + (*NodeUnpublishVolumeResponse_Error)(nil), + } +} + +func _NodeUnpublishVolumeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeUnpublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *NodeUnpublishVolumeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *NodeUnpublishVolumeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeUnpublishVolumeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _NodeUnpublishVolumeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeUnpublishVolumeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeUnpublishVolumeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &NodeUnpublishVolumeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &NodeUnpublishVolumeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _NodeUnpublishVolumeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeUnpublishVolumeResponse) + // reply + switch x := m.Reply.(type) { + case *NodeUnpublishVolumeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NodeUnpublishVolumeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeUnpublishVolumeResponse_Result struct { +} + +func (m *NodeUnpublishVolumeResponse_Result) Reset() { *m = NodeUnpublishVolumeResponse_Result{} } +func (m *NodeUnpublishVolumeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse_Result) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 0} +} + +// ////// +// ////// +type GetNodeIDRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *GetNodeIDRequest) Reset() { *m = GetNodeIDRequest{} } +func (m *GetNodeIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetNodeIDRequest) ProtoMessage() {} +func (*GetNodeIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *GetNodeIDRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type GetNodeIDResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *GetNodeIDResponse_Result_ + // *GetNodeIDResponse_Error + Reply isGetNodeIDResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *GetNodeIDResponse) Reset() { *m = GetNodeIDResponse{} } +func (m *GetNodeIDResponse) String() string { return proto.CompactTextString(m) } +func (*GetNodeIDResponse) ProtoMessage() {} +func (*GetNodeIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +type isGetNodeIDResponse_Reply interface { + isGetNodeIDResponse_Reply() +} + +type GetNodeIDResponse_Result_ struct { + Result *GetNodeIDResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type GetNodeIDResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*GetNodeIDResponse_Result_) isGetNodeIDResponse_Reply() {} +func (*GetNodeIDResponse_Error) isGetNodeIDResponse_Reply() {} + +func (m *GetNodeIDResponse) GetReply() isGetNodeIDResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *GetNodeIDResponse) GetResult() *GetNodeIDResponse_Result { + if x, ok := m.GetReply().(*GetNodeIDResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *GetNodeIDResponse) GetError() *Error { + if x, ok := m.GetReply().(*GetNodeIDResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GetNodeIDResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GetNodeIDResponse_OneofMarshaler, _GetNodeIDResponse_OneofUnmarshaler, _GetNodeIDResponse_OneofSizer, []interface{}{ + (*GetNodeIDResponse_Result_)(nil), + (*GetNodeIDResponse_Error)(nil), + } +} + +func _GetNodeIDResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GetNodeIDResponse) + // reply + switch x := m.Reply.(type) { + case *GetNodeIDResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *GetNodeIDResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GetNodeIDResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _GetNodeIDResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GetNodeIDResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetNodeIDResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &GetNodeIDResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &GetNodeIDResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _GetNodeIDResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GetNodeIDResponse) + // reply + switch x := m.Reply.(type) { + case *GetNodeIDResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GetNodeIDResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type GetNodeIDResponse_Result struct { + // The ID of the node which SHALL be used by CO in + // `ControllerPublishVolume`. This is an OPTIONAL field. If unset, + // the CO SHALL leave the `node_id` field unset in + // `ControllerPublishVolume`. + NodeId *NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` +} + +func (m *GetNodeIDResponse_Result) Reset() { *m = GetNodeIDResponse_Result{} } +func (m *GetNodeIDResponse_Result) String() string { return proto.CompactTextString(m) } +func (*GetNodeIDResponse_Result) ProtoMessage() {} +func (*GetNodeIDResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33, 0} } + +func (m *GetNodeIDResponse_Result) GetNodeId() *NodeID { + if m != nil { + return m.NodeId + } + return nil +} + +// ////// +// ////// +type ProbeNodeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *ProbeNodeRequest) Reset() { *m = ProbeNodeRequest{} } +func (m *ProbeNodeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeNodeRequest) ProtoMessage() {} +func (*ProbeNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *ProbeNodeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type ProbeNodeResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *ProbeNodeResponse_Result_ + // *ProbeNodeResponse_Error + Reply isProbeNodeResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *ProbeNodeResponse) Reset() { *m = ProbeNodeResponse{} } +func (m *ProbeNodeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeNodeResponse) ProtoMessage() {} +func (*ProbeNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type isProbeNodeResponse_Reply interface { + isProbeNodeResponse_Reply() +} + +type ProbeNodeResponse_Result_ struct { + Result *ProbeNodeResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type ProbeNodeResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*ProbeNodeResponse_Result_) isProbeNodeResponse_Reply() {} +func (*ProbeNodeResponse_Error) isProbeNodeResponse_Reply() {} + +func (m *ProbeNodeResponse) GetReply() isProbeNodeResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *ProbeNodeResponse) GetResult() *ProbeNodeResponse_Result { + if x, ok := m.GetReply().(*ProbeNodeResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *ProbeNodeResponse) GetError() *Error { + if x, ok := m.GetReply().(*ProbeNodeResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ProbeNodeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ProbeNodeResponse_OneofMarshaler, _ProbeNodeResponse_OneofUnmarshaler, _ProbeNodeResponse_OneofSizer, []interface{}{ + (*ProbeNodeResponse_Result_)(nil), + (*ProbeNodeResponse_Error)(nil), + } +} + +func _ProbeNodeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ProbeNodeResponse) + // reply + switch x := m.Reply.(type) { + case *ProbeNodeResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *ProbeNodeResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ProbeNodeResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _ProbeNodeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ProbeNodeResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ProbeNodeResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &ProbeNodeResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &ProbeNodeResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _ProbeNodeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ProbeNodeResponse) + // reply + switch x := m.Reply.(type) { + case *ProbeNodeResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ProbeNodeResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ProbeNodeResponse_Result struct { +} + +func (m *ProbeNodeResponse_Result) Reset() { *m = ProbeNodeResponse_Result{} } +func (m *ProbeNodeResponse_Result) String() string { return proto.CompactTextString(m) } +func (*ProbeNodeResponse_Result) ProtoMessage() {} +func (*ProbeNodeResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35, 0} } + +// ////// +// ////// +type NodeGetCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *NodeGetCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type NodeGetCapabilitiesResponse struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Reply: + // *NodeGetCapabilitiesResponse_Result_ + // *NodeGetCapabilitiesResponse_Error + Reply isNodeGetCapabilitiesResponse_Reply `protobuf_oneof:"reply"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isNodeGetCapabilitiesResponse_Reply interface { + isNodeGetCapabilitiesResponse_Reply() +} + +type NodeGetCapabilitiesResponse_Result_ struct { + Result *NodeGetCapabilitiesResponse_Result `protobuf:"bytes,1,opt,name=result,oneof"` +} +type NodeGetCapabilitiesResponse_Error struct { + Error *Error `protobuf:"bytes,2,opt,name=error,oneof"` +} + +func (*NodeGetCapabilitiesResponse_Result_) isNodeGetCapabilitiesResponse_Reply() {} +func (*NodeGetCapabilitiesResponse_Error) isNodeGetCapabilitiesResponse_Reply() {} + +func (m *NodeGetCapabilitiesResponse) GetReply() isNodeGetCapabilitiesResponse_Reply { + if m != nil { + return m.Reply + } + return nil +} + +func (m *NodeGetCapabilitiesResponse) GetResult() *NodeGetCapabilitiesResponse_Result { + if x, ok := m.GetReply().(*NodeGetCapabilitiesResponse_Result_); ok { + return x.Result + } + return nil +} + +func (m *NodeGetCapabilitiesResponse) GetError() *Error { + if x, ok := m.GetReply().(*NodeGetCapabilitiesResponse_Error); ok { + return x.Error + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeGetCapabilitiesResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeGetCapabilitiesResponse_OneofMarshaler, _NodeGetCapabilitiesResponse_OneofUnmarshaler, _NodeGetCapabilitiesResponse_OneofSizer, []interface{}{ + (*NodeGetCapabilitiesResponse_Result_)(nil), + (*NodeGetCapabilitiesResponse_Error)(nil), + } +} + +func _NodeGetCapabilitiesResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeGetCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *NodeGetCapabilitiesResponse_Result_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case *NodeGetCapabilitiesResponse_Error: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeGetCapabilitiesResponse.Reply has unexpected type %T", x) + } + return nil +} + +func _NodeGetCapabilitiesResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeGetCapabilitiesResponse) + switch tag { + case 1: // reply.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeGetCapabilitiesResponse_Result) + err := b.DecodeMessage(msg) + m.Reply = &NodeGetCapabilitiesResponse_Result_{msg} + return true, err + case 2: // reply.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error) + err := b.DecodeMessage(msg) + m.Reply = &NodeGetCapabilitiesResponse_Error{msg} + return true, err + default: + return false, nil + } +} + +func _NodeGetCapabilitiesResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeGetCapabilitiesResponse) + // reply + switch x := m.Reply.(type) { + case *NodeGetCapabilitiesResponse_Result_: + s := proto.Size(x.Result) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NodeGetCapabilitiesResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeGetCapabilitiesResponse_Result struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *NodeGetCapabilitiesResponse_Result) Reset() { *m = NodeGetCapabilitiesResponse_Result{} } +func (m *NodeGetCapabilitiesResponse_Result) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse_Result) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{37, 0} +} + +func (m *NodeGetCapabilitiesResponse_Result) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.NodeServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} } + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +// ////// +// ////// +type Error struct { + // One of the following fields MUST be specified. + // + // Types that are valid to be assigned to Value: + // *Error_GeneralError_ + // *Error_CreateVolumeError_ + // *Error_DeleteVolumeError_ + // *Error_ControllerPublishVolumeError_ + // *Error_ControllerUnpublishVolumeError_ + // *Error_ValidateVolumeCapabilitiesError_ + // *Error_NodePublishVolumeError_ + // *Error_NodeUnpublishVolumeError_ + // *Error_ProbeNodeError_ + // *Error_GetNodeIdError + Value isError_Value `protobuf_oneof:"value"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isError_Value interface { + isError_Value() +} + +type Error_GeneralError_ struct { + GeneralError *Error_GeneralError `protobuf:"bytes,1,opt,name=general_error,json=generalError,oneof"` +} +type Error_CreateVolumeError_ struct { + CreateVolumeError *Error_CreateVolumeError `protobuf:"bytes,2,opt,name=create_volume_error,json=createVolumeError,oneof"` +} +type Error_DeleteVolumeError_ struct { + DeleteVolumeError *Error_DeleteVolumeError `protobuf:"bytes,3,opt,name=delete_volume_error,json=deleteVolumeError,oneof"` +} +type Error_ControllerPublishVolumeError_ struct { + ControllerPublishVolumeError *Error_ControllerPublishVolumeError `protobuf:"bytes,4,opt,name=controller_publish_volume_error,json=controllerPublishVolumeError,oneof"` +} +type Error_ControllerUnpublishVolumeError_ struct { + ControllerUnpublishVolumeError *Error_ControllerUnpublishVolumeError `protobuf:"bytes,5,opt,name=controller_unpublish_volume_error,json=controllerUnpublishVolumeError,oneof"` +} +type Error_ValidateVolumeCapabilitiesError_ struct { + ValidateVolumeCapabilitiesError *Error_ValidateVolumeCapabilitiesError `protobuf:"bytes,6,opt,name=validate_volume_capabilities_error,json=validateVolumeCapabilitiesError,oneof"` +} +type Error_NodePublishVolumeError_ struct { + NodePublishVolumeError *Error_NodePublishVolumeError `protobuf:"bytes,7,opt,name=node_publish_volume_error,json=nodePublishVolumeError,oneof"` +} +type Error_NodeUnpublishVolumeError_ struct { + NodeUnpublishVolumeError *Error_NodeUnpublishVolumeError `protobuf:"bytes,8,opt,name=node_unpublish_volume_error,json=nodeUnpublishVolumeError,oneof"` +} +type Error_ProbeNodeError_ struct { + ProbeNodeError *Error_ProbeNodeError `protobuf:"bytes,9,opt,name=probe_node_error,json=probeNodeError,oneof"` +} +type Error_GetNodeIdError struct { + GetNodeIdError *Error_GetNodeIDError `protobuf:"bytes,10,opt,name=get_node_id_error,json=getNodeIdError,oneof"` +} + +func (*Error_GeneralError_) isError_Value() {} +func (*Error_CreateVolumeError_) isError_Value() {} +func (*Error_DeleteVolumeError_) isError_Value() {} +func (*Error_ControllerPublishVolumeError_) isError_Value() {} +func (*Error_ControllerUnpublishVolumeError_) isError_Value() {} +func (*Error_ValidateVolumeCapabilitiesError_) isError_Value() {} +func (*Error_NodePublishVolumeError_) isError_Value() {} +func (*Error_NodeUnpublishVolumeError_) isError_Value() {} +func (*Error_ProbeNodeError_) isError_Value() {} +func (*Error_GetNodeIdError) isError_Value() {} + +func (m *Error) GetValue() isError_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Error) GetGeneralError() *Error_GeneralError { + if x, ok := m.GetValue().(*Error_GeneralError_); ok { + return x.GeneralError + } + return nil +} + +func (m *Error) GetCreateVolumeError() *Error_CreateVolumeError { + if x, ok := m.GetValue().(*Error_CreateVolumeError_); ok { + return x.CreateVolumeError + } + return nil +} + +func (m *Error) GetDeleteVolumeError() *Error_DeleteVolumeError { + if x, ok := m.GetValue().(*Error_DeleteVolumeError_); ok { + return x.DeleteVolumeError + } + return nil +} + +func (m *Error) GetControllerPublishVolumeError() *Error_ControllerPublishVolumeError { + if x, ok := m.GetValue().(*Error_ControllerPublishVolumeError_); ok { + return x.ControllerPublishVolumeError + } + return nil +} + +func (m *Error) GetControllerUnpublishVolumeError() *Error_ControllerUnpublishVolumeError { + if x, ok := m.GetValue().(*Error_ControllerUnpublishVolumeError_); ok { + return x.ControllerUnpublishVolumeError + } + return nil +} + +func (m *Error) GetValidateVolumeCapabilitiesError() *Error_ValidateVolumeCapabilitiesError { + if x, ok := m.GetValue().(*Error_ValidateVolumeCapabilitiesError_); ok { + return x.ValidateVolumeCapabilitiesError + } + return nil +} + +func (m *Error) GetNodePublishVolumeError() *Error_NodePublishVolumeError { + if x, ok := m.GetValue().(*Error_NodePublishVolumeError_); ok { + return x.NodePublishVolumeError + } + return nil +} + +func (m *Error) GetNodeUnpublishVolumeError() *Error_NodeUnpublishVolumeError { + if x, ok := m.GetValue().(*Error_NodeUnpublishVolumeError_); ok { + return x.NodeUnpublishVolumeError + } + return nil +} + +func (m *Error) GetProbeNodeError() *Error_ProbeNodeError { + if x, ok := m.GetValue().(*Error_ProbeNodeError_); ok { + return x.ProbeNodeError + } + return nil +} + +func (m *Error) GetGetNodeIdError() *Error_GetNodeIDError { + if x, ok := m.GetValue().(*Error_GetNodeIdError); ok { + return x.GetNodeIdError + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Error) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Error_OneofMarshaler, _Error_OneofUnmarshaler, _Error_OneofSizer, []interface{}{ + (*Error_GeneralError_)(nil), + (*Error_CreateVolumeError_)(nil), + (*Error_DeleteVolumeError_)(nil), + (*Error_ControllerPublishVolumeError_)(nil), + (*Error_ControllerUnpublishVolumeError_)(nil), + (*Error_ValidateVolumeCapabilitiesError_)(nil), + (*Error_NodePublishVolumeError_)(nil), + (*Error_NodeUnpublishVolumeError_)(nil), + (*Error_ProbeNodeError_)(nil), + (*Error_GetNodeIdError)(nil), + } +} + +func _Error_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Error) + // value + switch x := m.Value.(type) { + case *Error_GeneralError_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GeneralError); err != nil { + return err + } + case *Error_CreateVolumeError_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateVolumeError); err != nil { + return err + } + case *Error_DeleteVolumeError_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteVolumeError); err != nil { + return err + } + case *Error_ControllerPublishVolumeError_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ControllerPublishVolumeError); err != nil { + return err + } + case *Error_ControllerUnpublishVolumeError_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ControllerUnpublishVolumeError); err != nil { + return err + } + case *Error_ValidateVolumeCapabilitiesError_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ValidateVolumeCapabilitiesError); err != nil { + return err + } + case *Error_NodePublishVolumeError_: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NodePublishVolumeError); err != nil { + return err + } + case *Error_NodeUnpublishVolumeError_: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NodeUnpublishVolumeError); err != nil { + return err + } + case *Error_ProbeNodeError_: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProbeNodeError); err != nil { + return err + } + case *Error_GetNodeIdError: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GetNodeIdError); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Error.Value has unexpected type %T", x) + } + return nil +} + +func _Error_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Error) + switch tag { + case 1: // value.general_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_GeneralError) + err := b.DecodeMessage(msg) + m.Value = &Error_GeneralError_{msg} + return true, err + case 2: // value.create_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_CreateVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_CreateVolumeError_{msg} + return true, err + case 3: // value.delete_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_DeleteVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_DeleteVolumeError_{msg} + return true, err + case 4: // value.controller_publish_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_ControllerPublishVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_ControllerPublishVolumeError_{msg} + return true, err + case 5: // value.controller_unpublish_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_ControllerUnpublishVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_ControllerUnpublishVolumeError_{msg} + return true, err + case 6: // value.validate_volume_capabilities_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_ValidateVolumeCapabilitiesError) + err := b.DecodeMessage(msg) + m.Value = &Error_ValidateVolumeCapabilitiesError_{msg} + return true, err + case 7: // value.node_publish_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_NodePublishVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_NodePublishVolumeError_{msg} + return true, err + case 8: // value.node_unpublish_volume_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_NodeUnpublishVolumeError) + err := b.DecodeMessage(msg) + m.Value = &Error_NodeUnpublishVolumeError_{msg} + return true, err + case 9: // value.probe_node_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_ProbeNodeError) + err := b.DecodeMessage(msg) + m.Value = &Error_ProbeNodeError_{msg} + return true, err + case 10: // value.get_node_id_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Error_GetNodeIDError) + err := b.DecodeMessage(msg) + m.Value = &Error_GetNodeIdError{msg} + return true, err + default: + return false, nil + } +} + +func _Error_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Error) + // value + switch x := m.Value.(type) { + case *Error_GeneralError_: + s := proto.Size(x.GeneralError) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_CreateVolumeError_: + s := proto.Size(x.CreateVolumeError) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_DeleteVolumeError_: + s := proto.Size(x.DeleteVolumeError) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_ControllerPublishVolumeError_: + s := proto.Size(x.ControllerPublishVolumeError) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_ControllerUnpublishVolumeError_: + s := proto.Size(x.ControllerUnpublishVolumeError) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_ValidateVolumeCapabilitiesError_: + s := proto.Size(x.ValidateVolumeCapabilitiesError) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_NodePublishVolumeError_: + s := proto.Size(x.NodePublishVolumeError) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_NodeUnpublishVolumeError_: + s := proto.Size(x.NodeUnpublishVolumeError) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_ProbeNodeError_: + s := proto.Size(x.ProbeNodeError) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Error_GetNodeIdError: + s := proto.Size(x.GetNodeIdError) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// General Error that MAY be returned by any RPC. +type Error_GeneralError struct { + // Machine parsable error code. + ErrorCode Error_GeneralError_GeneralErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_GeneralError_GeneralErrorCode" json:"error_code,omitempty"` + // When set to true, `caller_must_not_retry` indicates that the + // caller MUST not retry the same call again. This MAY be because + // the call is deemed invalid by the Plugin and no amount of retries + // will cause it to succeed. If this value is false, the caller MAY + // reissue the same call, but SHOULD implement exponential backoff + // on retires. + CallerMustNotRetry bool `protobuf:"varint,2,opt,name=caller_must_not_retry,json=callerMustNotRetry" json:"caller_must_not_retry,omitempty"` + // Human readable description of error, possibly with additional + // information. This string MAY be surfaced by CO to end users. + ErrorDescription string `protobuf:"bytes,3,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_GeneralError) Reset() { *m = Error_GeneralError{} } +func (m *Error_GeneralError) String() string { return proto.CompactTextString(m) } +func (*Error_GeneralError) ProtoMessage() {} +func (*Error_GeneralError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 0} } + +func (m *Error_GeneralError) GetErrorCode() Error_GeneralError_GeneralErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_GeneralError_UNKNOWN +} + +func (m *Error_GeneralError) GetCallerMustNotRetry() bool { + if m != nil { + return m.CallerMustNotRetry + } + return false +} + +func (m *Error_GeneralError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `CreateVolume` specific error. +type Error_CreateVolumeError struct { + // Machine parsable error code. + ErrorCode Error_CreateVolumeError_CreateVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_CreateVolumeError_CreateVolumeErrorCode" json:"error_code,omitempty"` + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_CreateVolumeError) Reset() { *m = Error_CreateVolumeError{} } +func (m *Error_CreateVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_CreateVolumeError) ProtoMessage() {} +func (*Error_CreateVolumeError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 1} } + +func (m *Error_CreateVolumeError) GetErrorCode() Error_CreateVolumeError_CreateVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_CreateVolumeError_UNKNOWN +} + +func (m *Error_CreateVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `DeleteVolume` specific error. +type Error_DeleteVolumeError struct { + // Machine parsable error code. + ErrorCode Error_DeleteVolumeError_DeleteVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_DeleteVolumeError_DeleteVolumeErrorCode" json:"error_code,omitempty"` + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_DeleteVolumeError) Reset() { *m = Error_DeleteVolumeError{} } +func (m *Error_DeleteVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_DeleteVolumeError) ProtoMessage() {} +func (*Error_DeleteVolumeError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 2} } + +func (m *Error_DeleteVolumeError) GetErrorCode() Error_DeleteVolumeError_DeleteVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_DeleteVolumeError_UNKNOWN +} + +func (m *Error_DeleteVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `ControllerPublishVolume` specific error. +type Error_ControllerPublishVolumeError struct { + // Machine parsable error code. + ErrorCode Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode" json:"error_code,omitempty"` + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` + // On `VOLUME_ALREADY_ATTACHED` and `MAX_ATTACHED_NODES` errors, + // this field contains the node(s) that the specified volume is + // already attached to. + NodeIds []*NodeID `protobuf:"bytes,3,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` +} + +func (m *Error_ControllerPublishVolumeError) Reset() { *m = Error_ControllerPublishVolumeError{} } +func (m *Error_ControllerPublishVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_ControllerPublishVolumeError) ProtoMessage() {} +func (*Error_ControllerPublishVolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 3} +} + +func (m *Error_ControllerPublishVolumeError) GetErrorCode() Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_ControllerPublishVolumeError_UNKNOWN +} + +func (m *Error_ControllerPublishVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +func (m *Error_ControllerPublishVolumeError) GetNodeIds() []*NodeID { + if m != nil { + return m.NodeIds + } + return nil +} + +// `ControllerUnpublishVolume` specific error. +type Error_ControllerUnpublishVolumeError struct { + ErrorCode Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_ControllerUnpublishVolumeError) Reset() { *m = Error_ControllerUnpublishVolumeError{} } +func (m *Error_ControllerUnpublishVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_ControllerUnpublishVolumeError) ProtoMessage() {} +func (*Error_ControllerUnpublishVolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 4} +} + +func (m *Error_ControllerUnpublishVolumeError) GetErrorCode() Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_ControllerUnpublishVolumeError_UNKNOWN +} + +func (m *Error_ControllerUnpublishVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `ValidateVolumeCapabilities` specific error. +type Error_ValidateVolumeCapabilitiesError struct { + ErrorCode Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_ValidateVolumeCapabilitiesError) Reset() { *m = Error_ValidateVolumeCapabilitiesError{} } +func (m *Error_ValidateVolumeCapabilitiesError) String() string { return proto.CompactTextString(m) } +func (*Error_ValidateVolumeCapabilitiesError) ProtoMessage() {} +func (*Error_ValidateVolumeCapabilitiesError) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 5} +} + +func (m *Error_ValidateVolumeCapabilitiesError) GetErrorCode() Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_ValidateVolumeCapabilitiesError_UNKNOWN +} + +func (m *Error_ValidateVolumeCapabilitiesError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `NodePublishVolume` specific error. +type Error_NodePublishVolumeError struct { + ErrorCode Error_NodePublishVolumeError_NodePublishVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_NodePublishVolumeError_NodePublishVolumeErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_NodePublishVolumeError) Reset() { *m = Error_NodePublishVolumeError{} } +func (m *Error_NodePublishVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_NodePublishVolumeError) ProtoMessage() {} +func (*Error_NodePublishVolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 6} +} + +func (m *Error_NodePublishVolumeError) GetErrorCode() Error_NodePublishVolumeError_NodePublishVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_NodePublishVolumeError_UNKNOWN +} + +func (m *Error_NodePublishVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `NodeUnpublishVolume` specific error. +type Error_NodeUnpublishVolumeError struct { + ErrorCode Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_NodeUnpublishVolumeError) Reset() { *m = Error_NodeUnpublishVolumeError{} } +func (m *Error_NodeUnpublishVolumeError) String() string { return proto.CompactTextString(m) } +func (*Error_NodeUnpublishVolumeError) ProtoMessage() {} +func (*Error_NodeUnpublishVolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 7} +} + +func (m *Error_NodeUnpublishVolumeError) GetErrorCode() Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_NodeUnpublishVolumeError_UNKNOWN +} + +func (m *Error_NodeUnpublishVolumeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `ProbeNode` specific error. +type Error_ProbeNodeError struct { + ErrorCode Error_ProbeNodeError_ProbeNodeErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_ProbeNodeError_ProbeNodeErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_ProbeNodeError) Reset() { *m = Error_ProbeNodeError{} } +func (m *Error_ProbeNodeError) String() string { return proto.CompactTextString(m) } +func (*Error_ProbeNodeError) ProtoMessage() {} +func (*Error_ProbeNodeError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 8} } + +func (m *Error_ProbeNodeError) GetErrorCode() Error_ProbeNodeError_ProbeNodeErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_ProbeNodeError_UNKNOWN +} + +func (m *Error_ProbeNodeError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +// `GetNodeID` specific error. +type Error_GetNodeIDError struct { + ErrorCode Error_GetNodeIDError_GetNodeIDErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=csi.Error_GetNodeIDError_GetNodeIDErrorCode" json:"error_code,omitempty"` + ErrorDescription string `protobuf:"bytes,2,opt,name=error_description,json=errorDescription" json:"error_description,omitempty"` +} + +func (m *Error_GetNodeIDError) Reset() { *m = Error_GetNodeIDError{} } +func (m *Error_GetNodeIDError) String() string { return proto.CompactTextString(m) } +func (*Error_GetNodeIDError) ProtoMessage() {} +func (*Error_GetNodeIDError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 9} } + +func (m *Error_GetNodeIDError) GetErrorCode() Error_GetNodeIDError_GetNodeIDErrorCode { + if m != nil { + return m.ErrorCode + } + return Error_GetNodeIDError_UNKNOWN +} + +func (m *Error_GetNodeIDError) GetErrorDescription() string { + if m != nil { + return m.ErrorDescription + } + return "" +} + +func init() { + proto.RegisterType((*GetSupportedVersionsRequest)(nil), "csi.GetSupportedVersionsRequest") + proto.RegisterType((*GetSupportedVersionsResponse)(nil), "csi.GetSupportedVersionsResponse") + proto.RegisterType((*GetSupportedVersionsResponse_Result)(nil), "csi.GetSupportedVersionsResponse.Result") + proto.RegisterType((*Version)(nil), "csi.Version") + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.GetPluginInfoResponse") + proto.RegisterType((*GetPluginInfoResponse_Result)(nil), "csi.GetPluginInfoResponse.Result") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.CreateVolumeRequest") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.CreateVolumeResponse") + proto.RegisterType((*CreateVolumeResponse_Result)(nil), "csi.CreateVolumeResponse.Result") + proto.RegisterType((*VolumeCapability)(nil), "csi.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.CapacityRange") + proto.RegisterType((*VolumeInfo)(nil), "csi.VolumeInfo") + proto.RegisterType((*VolumeHandle)(nil), "csi.VolumeHandle") + proto.RegisterType((*Credentials)(nil), "csi.Credentials") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.DeleteVolumeRequest") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.DeleteVolumeResponse") + proto.RegisterType((*DeleteVolumeResponse_Result)(nil), "csi.DeleteVolumeResponse.Result") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.ControllerPublishVolumeRequest") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.ControllerPublishVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeResponse_Result)(nil), "csi.ControllerPublishVolumeResponse.Result") + proto.RegisterType((*NodeID)(nil), "csi.NodeID") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.ControllerUnpublishVolumeRequest") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ControllerUnpublishVolumeResponse_Result)(nil), "csi.ControllerUnpublishVolumeResponse.Result") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.ValidateVolumeCapabilitiesRequest") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Result)(nil), "csi.ValidateVolumeCapabilitiesResponse.Result") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Result)(nil), "csi.ListVolumesResponse.Result") + proto.RegisterType((*ListVolumesResponse_Result_Entry)(nil), "csi.ListVolumesResponse.Result.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.GetCapacityRequest") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.GetCapacityResponse") + proto.RegisterType((*GetCapacityResponse_Result)(nil), "csi.GetCapacityResponse.Result") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerGetCapabilitiesResponse_Result)(nil), "csi.ControllerGetCapabilitiesResponse.Result") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.ControllerServiceCapability.RPC") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.NodePublishVolumeRequest") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.NodePublishVolumeResponse") + proto.RegisterType((*NodePublishVolumeResponse_Result)(nil), "csi.NodePublishVolumeResponse.Result") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeResponse_Result)(nil), "csi.NodeUnpublishVolumeResponse.Result") + proto.RegisterType((*GetNodeIDRequest)(nil), "csi.GetNodeIDRequest") + proto.RegisterType((*GetNodeIDResponse)(nil), "csi.GetNodeIDResponse") + proto.RegisterType((*GetNodeIDResponse_Result)(nil), "csi.GetNodeIDResponse.Result") + proto.RegisterType((*ProbeNodeRequest)(nil), "csi.ProbeNodeRequest") + proto.RegisterType((*ProbeNodeResponse)(nil), "csi.ProbeNodeResponse") + proto.RegisterType((*ProbeNodeResponse_Result)(nil), "csi.ProbeNodeResponse.Result") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeGetCapabilitiesResponse_Result)(nil), "csi.NodeGetCapabilitiesResponse.Result") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.NodeServiceCapability.RPC") + proto.RegisterType((*Error)(nil), "csi.Error") + proto.RegisterType((*Error_GeneralError)(nil), "csi.Error.GeneralError") + proto.RegisterType((*Error_CreateVolumeError)(nil), "csi.Error.CreateVolumeError") + proto.RegisterType((*Error_DeleteVolumeError)(nil), "csi.Error.DeleteVolumeError") + proto.RegisterType((*Error_ControllerPublishVolumeError)(nil), "csi.Error.ControllerPublishVolumeError") + proto.RegisterType((*Error_ControllerUnpublishVolumeError)(nil), "csi.Error.ControllerUnpublishVolumeError") + proto.RegisterType((*Error_ValidateVolumeCapabilitiesError)(nil), "csi.Error.ValidateVolumeCapabilitiesError") + proto.RegisterType((*Error_NodePublishVolumeError)(nil), "csi.Error.NodePublishVolumeError") + proto.RegisterType((*Error_NodeUnpublishVolumeError)(nil), "csi.Error.NodeUnpublishVolumeError") + proto.RegisterType((*Error_ProbeNodeError)(nil), "csi.Error.ProbeNodeError") + proto.RegisterType((*Error_GetNodeIDError)(nil), "csi.Error.GetNodeIDError") + proto.RegisterEnum("csi.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.Error_GeneralError_GeneralErrorCode", Error_GeneralError_GeneralErrorCode_name, Error_GeneralError_GeneralErrorCode_value) + proto.RegisterEnum("csi.Error_CreateVolumeError_CreateVolumeErrorCode", Error_CreateVolumeError_CreateVolumeErrorCode_name, Error_CreateVolumeError_CreateVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_DeleteVolumeError_DeleteVolumeErrorCode", Error_DeleteVolumeError_DeleteVolumeErrorCode_name, Error_DeleteVolumeError_DeleteVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode", Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode_name, Error_ControllerPublishVolumeError_ControllerPublishVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode", Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode_name, Error_ControllerUnpublishVolumeError_ControllerUnpublishVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode", Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode_name, Error_ValidateVolumeCapabilitiesError_ValidateVolumeCapabilitiesErrorCode_value) + proto.RegisterEnum("csi.Error_NodePublishVolumeError_NodePublishVolumeErrorCode", Error_NodePublishVolumeError_NodePublishVolumeErrorCode_name, Error_NodePublishVolumeError_NodePublishVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode", Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode_name, Error_NodeUnpublishVolumeError_NodeUnpublishVolumeErrorCode_value) + proto.RegisterEnum("csi.Error_ProbeNodeError_ProbeNodeErrorCode", Error_ProbeNodeError_ProbeNodeErrorCode_name, Error_ProbeNodeError_ProbeNodeErrorCode_value) + proto.RegisterEnum("csi.Error_GetNodeIDError_GetNodeIDErrorCode", Error_GetNodeIDError_GetNodeIDErrorCode_name, Error_GetNodeIDError_GetNodeIDErrorCode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Identity service + +type IdentityClient interface { + GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) { + out := new(GetSupportedVersionsResponse) + err := grpc.Invoke(ctx, "/csi.Identity/GetSupportedVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := grpc.Invoke(ctx, "/csi.Identity/GetPluginInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Identity service + +type IdentityServer interface { + GetSupportedVersions(context.Context, *GetSupportedVersionsRequest) (*GetSupportedVersionsResponse, error) + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetSupportedVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSupportedVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetSupportedVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Identity/GetSupportedVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetSupportedVersions(ctx, req.(*GetSupportedVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSupportedVersions", + Handler: _Identity_GetSupportedVersions_Handler, + }, + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Controller service + +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/CreateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/DeleteVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerPublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ListVolumes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := grpc.Invoke(ctx, "/csi.Controller/GetCapacity", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller service + +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Node service + +type NodeClient interface { + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) + ProbeNode(ctx context.Context, in *ProbeNodeRequest, opts ...grpc.CallOption) (*ProbeNodeResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodePublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodeUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) { + out := new(GetNodeIDResponse) + err := grpc.Invoke(ctx, "/csi.Node/GetNodeID", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) ProbeNode(ctx context.Context, in *ProbeNodeRequest, opts ...grpc.CallOption) (*ProbeNodeResponse, error) { + out := new(ProbeNodeResponse) + err := grpc.Invoke(ctx, "/csi.Node/ProbeNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodeGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Node service + +type NodeServer interface { + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + GetNodeID(context.Context, *GetNodeIDRequest) (*GetNodeIDResponse, error) + ProbeNode(context.Context, *ProbeNodeRequest) (*ProbeNodeResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_GetNodeID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).GetNodeID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/GetNodeID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).GetNodeID(ctx, req.(*GetNodeIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_ProbeNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).ProbeNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/ProbeNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).ProbeNode(ctx, req.(*ProbeNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "GetNodeID", + Handler: _Node_GetNodeID_Handler, + }, + { + MethodName: "ProbeNode", + Handler: _Node_ProbeNode_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +func init() { proto.RegisterFile("csi.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3231 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5b, 0x4d, 0x6c, 0x23, 0x49, + 0x15, 0x4e, 0xfb, 0x37, 0x79, 0x8e, 0x67, 0xda, 0x95, 0xc9, 0xc4, 0xe9, 0xc9, 0x4c, 0x32, 0x3d, + 0x3f, 0x3b, 0xc3, 0xb2, 0x5e, 0x08, 0x88, 0xd9, 0x99, 0xd9, 0x3f, 0x27, 0xee, 0x38, 0x66, 0x9c, + 0xb6, 0xb7, 0x6d, 0xcf, 0xee, 0x80, 0xd8, 0x56, 0x8f, 0xdd, 0xc9, 0x98, 0x71, 0x6c, 0x6f, 0x77, + 0xc7, 0xda, 0x9c, 0x41, 0x48, 0x20, 0x0e, 0x08, 0x24, 0x16, 0x71, 0x80, 0xc3, 0x72, 0x00, 0x89, + 0x13, 0x08, 0x10, 0x82, 0x0b, 0x20, 0x8e, 0x08, 0x71, 0x61, 0xd1, 0x4a, 0xdc, 0xd8, 0xc3, 0x72, + 0xe2, 0xca, 0x0d, 0xd5, 0x4f, 0xb7, 0xbb, 0xdb, 0xdd, 0xb6, 0x13, 0x07, 0x86, 0x9b, 0xfb, 0xbd, + 0xaa, 0xf7, 0xaa, 0xbe, 0x57, 0xf5, 0x7e, 0xaa, 0xca, 0xb0, 0xd0, 0x34, 0xdb, 0xb9, 0xbe, 0xd1, + 0xb3, 0x7a, 0x28, 0xda, 0x34, 0xdb, 0xe2, 0x65, 0xb8, 0x54, 0xd4, 0xad, 0xda, 0x51, 0xbf, 0xdf, + 0x33, 0x2c, 0xbd, 0xf5, 0x50, 0x37, 0xcc, 0x76, 0xaf, 0x6b, 0x2a, 0xfa, 0x3b, 0x47, 0xba, 0x69, + 0x89, 0x7f, 0xe5, 0x60, 0x2d, 0x98, 0x6f, 0xf6, 0x7b, 0x5d, 0x53, 0x47, 0x5b, 0x90, 0x30, 0x74, + 0xf3, 0xa8, 0x63, 0x65, 0xb9, 0x0d, 0xee, 0x56, 0x6a, 0xf3, 0x56, 0x0e, 0x2b, 0x18, 0xd7, 0x25, + 0xa7, 0x90, 0xf6, 0xbb, 0x73, 0x0a, 0xeb, 0x89, 0x44, 0x88, 0xeb, 0x86, 0xd1, 0x33, 0xb2, 0x11, + 0x22, 0x02, 0x88, 0x08, 0x09, 0x53, 0x76, 0xe7, 0x14, 0xca, 0x12, 0x24, 0x48, 0xd0, 0x7e, 0xe8, + 0x3e, 0x20, 0xd3, 0x96, 0xad, 0x0e, 0x98, 0xf0, 0x2c, 0xb7, 0x11, 0xbd, 0x95, 0xda, 0x5c, 0x24, + 0x5d, 0x99, 0x46, 0x25, 0x63, 0xfa, 0xc7, 0xb0, 0x95, 0x84, 0xb8, 0xa1, 0xf7, 0x3b, 0xc7, 0xe2, + 0x03, 0x48, 0x32, 0x22, 0xba, 0x00, 0xf1, 0x43, 0xed, 0xcb, 0x3d, 0x83, 0xcc, 0x20, 0xad, 0xd0, + 0x0f, 0x42, 0x6d, 0x77, 0xd9, 0xa0, 0x30, 0x15, 0x7f, 0x60, 0x6a, 0x5f, 0xb3, 0x9a, 0x4f, 0xb2, + 0x51, 0x4a, 0x25, 0x1f, 0xe2, 0xab, 0x70, 0xa1, 0xa8, 0x5b, 0xd5, 0xce, 0xd1, 0x41, 0xbb, 0x5b, + 0xea, 0xee, 0xf7, 0x18, 0x7a, 0xe8, 0x26, 0x24, 0xd9, 0x00, 0x19, 0x3a, 0xde, 0xf1, 0xd9, 0x4c, + 0xf1, 0x2f, 0x11, 0x58, 0xf6, 0x09, 0x60, 0xf0, 0xde, 0xf7, 0xc1, 0x7b, 0xd5, 0x86, 0x77, 0xb4, + 0xed, 0xe9, 0x70, 0xfd, 0x13, 0xe7, 0x00, 0x8b, 0x20, 0xd6, 0xd5, 0x0e, 0x75, 0xa2, 0x69, 0x41, + 0x21, 0xbf, 0xd1, 0x0d, 0x38, 0x37, 0xd0, 0xbb, 0xad, 0x9e, 0x61, 0x23, 0x4d, 0x64, 0x2d, 0x28, + 0x69, 0x4a, 0xb5, 0x21, 0x7c, 0x00, 0xf3, 0x87, 0x5a, 0xb7, 0xbd, 0xaf, 0x9b, 0x56, 0x36, 0x4a, + 0x2c, 0xf1, 0xe2, 0xc4, 0x81, 0xe6, 0xf6, 0x58, 0x0f, 0xa9, 0x6b, 0x19, 0xc7, 0x8a, 0x23, 0x40, + 0xb8, 0x0f, 0x69, 0x0f, 0x0b, 0xf1, 0x10, 0x7d, 0xaa, 0x1f, 0xb3, 0x71, 0xe1, 0x9f, 0xd8, 0x0c, + 0x03, 0xad, 0x73, 0xa4, 0xb3, 0xd1, 0xd0, 0x8f, 0x7b, 0x91, 0x97, 0xb8, 0xa1, 0x81, 0xbf, 0x1f, + 0x85, 0xa5, 0x6d, 0x43, 0xd7, 0x2c, 0xfd, 0x61, 0xaf, 0x73, 0x74, 0xa8, 0x9f, 0xd0, 0x26, 0x0e, + 0x1a, 0x11, 0x17, 0x1a, 0x77, 0xe1, 0x5c, 0x53, 0xeb, 0x6b, 0xcd, 0xb6, 0x75, 0xac, 0x1a, 0x5a, + 0xf7, 0x40, 0x27, 0xcb, 0x20, 0xb5, 0x89, 0x88, 0x88, 0x6d, 0xc6, 0x52, 0x30, 0x47, 0x49, 0x37, + 0xdd, 0x9f, 0x68, 0x07, 0x96, 0x06, 0x64, 0x1c, 0x2a, 0xa6, 0x3f, 0x6e, 0x77, 0xda, 0x56, 0x5b, + 0x37, 0xb3, 0x31, 0x02, 0xd6, 0x32, 0x1d, 0x02, 0xe1, 0x6f, 0xdb, 0xec, 0x63, 0x05, 0x0d, 0xbc, + 0x94, 0xb6, 0x6e, 0xa2, 0x5d, 0x80, 0xbe, 0x66, 0x68, 0x87, 0xba, 0xa5, 0x1b, 0x66, 0x36, 0x4e, + 0xba, 0xd3, 0x3d, 0x17, 0x30, 0xd9, 0x5c, 0xd5, 0x69, 0x4a, 0x41, 0x76, 0xf5, 0x45, 0xf7, 0x81, + 0x3f, 0x32, 0x75, 0x43, 0x6d, 0x1a, 0x7a, 0x4b, 0xef, 0x5a, 0x6d, 0xad, 0x63, 0x66, 0x13, 0x64, + 0x3a, 0xbc, 0x2d, 0xcf, 0xa6, 0x2b, 0xe7, 0x71, 0x4b, 0x17, 0x41, 0x78, 0x05, 0xce, 0xfb, 0x64, + 0x9f, 0xc4, 0x4a, 0xe2, 0x6f, 0x38, 0xb8, 0xe0, 0x1d, 0x2f, 0x5b, 0xef, 0xf7, 0x7c, 0xeb, 0x7d, + 0x23, 0x60, 0x6a, 0xb3, 0x2c, 0xf7, 0x7b, 0xce, 0x6a, 0xff, 0x14, 0xa4, 0x98, 0x41, 0xda, 0xdd, + 0xfd, 0x1e, 0x53, 0x77, 0xde, 0x65, 0x08, 0xb2, 0x64, 0x61, 0xe0, 0xfc, 0x1e, 0x2e, 0xad, 0x1f, + 0xc4, 0x80, 0xf7, 0x1b, 0x0b, 0xdd, 0x85, 0xf8, 0xe3, 0x4e, 0xaf, 0xf9, 0xd4, 0xb3, 0x51, 0xfd, + 0xad, 0x72, 0x5b, 0xb8, 0x09, 0xa5, 0xe2, 0x41, 0x91, 0x1e, 0xb8, 0xeb, 0x61, 0xef, 0xa8, 0x6b, + 0xb1, 0x81, 0x87, 0x74, 0xdd, 0xc3, 0x4d, 0x86, 0x5d, 0x49, 0x0f, 0x94, 0x87, 0x94, 0xd6, 0x6c, + 0xea, 0xa6, 0xa9, 0x1e, 0xf6, 0x5a, 0xf6, 0x72, 0xdc, 0x08, 0x16, 0x90, 0x27, 0x0d, 0xf7, 0x7a, + 0x2d, 0x5d, 0x01, 0xcd, 0xf9, 0x2d, 0xa4, 0x21, 0xe5, 0x1a, 0x95, 0x50, 0x84, 0x94, 0x4b, 0x13, + 0x5a, 0x81, 0xe4, 0xbe, 0xa9, 0x5a, 0xc7, 0x7d, 0xdb, 0x2f, 0x24, 0xf6, 0xcd, 0xfa, 0x71, 0x5f, + 0x47, 0xeb, 0x90, 0x22, 0x43, 0x50, 0xf7, 0x3b, 0xda, 0x81, 0x99, 0x8d, 0x6c, 0x44, 0x6f, 0x2d, + 0x28, 0x40, 0x48, 0x3b, 0x98, 0x22, 0x7c, 0xcc, 0x01, 0x0c, 0x55, 0xa2, 0xbb, 0x10, 0x23, 0x43, + 0xc4, 0x52, 0xce, 0x6d, 0xde, 0x98, 0x34, 0xc4, 0x1c, 0x19, 0x27, 0xe9, 0x22, 0xfe, 0x90, 0x83, + 0x18, 0x91, 0x91, 0x82, 0x64, 0x43, 0x7e, 0x20, 0x57, 0xde, 0x94, 0xf9, 0x39, 0x74, 0x11, 0x50, + 0xad, 0x24, 0x17, 0xcb, 0x92, 0x2a, 0x57, 0x0a, 0x92, 0xfa, 0xa6, 0x52, 0xaa, 0x4b, 0x0a, 0xcf, + 0xa1, 0x4b, 0xb0, 0xe2, 0xa6, 0x2b, 0x52, 0xbe, 0x20, 0x29, 0x6a, 0x45, 0x2e, 0x3f, 0xe2, 0x23, + 0x48, 0x80, 0x8b, 0x7b, 0x8d, 0x72, 0xbd, 0x34, 0xca, 0x8b, 0xa2, 0x35, 0xc8, 0xba, 0x78, 0x4c, + 0x06, 0x13, 0x1b, 0xc3, 0x62, 0x5d, 0x5c, 0xfa, 0x93, 0x31, 0xe3, 0x5b, 0x69, 0xc7, 0x0c, 0x18, + 0x29, 0xf1, 0x4d, 0x48, 0x7b, 0x9c, 0x01, 0x76, 0xa3, 0x86, 0xfe, 0xce, 0x51, 0xdb, 0xd0, 0x5b, + 0xea, 0xe3, 0x63, 0x4b, 0x37, 0x09, 0x0c, 0x31, 0x25, 0x6d, 0x53, 0xb7, 0x30, 0x11, 0x63, 0xda, + 0x69, 0x1f, 0xb6, 0x2d, 0xd6, 0x26, 0x42, 0xda, 0x00, 0x21, 0x91, 0x06, 0xe2, 0xdb, 0x00, 0xc3, + 0xc5, 0x89, 0xa5, 0x3a, 0xee, 0xc8, 0x23, 0xd5, 0xa6, 0x52, 0xa9, 0xb7, 0x21, 0xf1, 0x44, 0xeb, + 0xb6, 0x3a, 0x3a, 0x5b, 0x5f, 0x19, 0x17, 0xf6, 0xbb, 0x84, 0xa1, 0xb0, 0x06, 0xe2, 0xf7, 0x38, + 0x58, 0x74, 0x33, 0xd0, 0x39, 0x88, 0xb4, 0x5b, 0xcc, 0xf2, 0x91, 0x76, 0x0b, 0xdd, 0x87, 0xf9, + 0x43, 0xdd, 0xd2, 0x5a, 0x9a, 0xa5, 0x11, 0x93, 0xa7, 0x36, 0xd7, 0x47, 0xa4, 0xe5, 0xf6, 0x58, + 0x0b, 0xdb, 0xb1, 0xb3, 0x4f, 0xe2, 0xd8, 0xdd, 0xac, 0x13, 0xb9, 0x8c, 0x01, 0xa4, 0x5c, 0x0e, + 0x08, 0xe5, 0x20, 0x46, 0x06, 0x41, 0xe3, 0xbe, 0xe0, 0xf7, 0x58, 0xb9, 0x82, 0xa3, 0x9f, 0xb4, + 0x13, 0xee, 0xc0, 0x42, 0xe1, 0x54, 0x7a, 0x7f, 0xc6, 0xc1, 0x52, 0x41, 0xef, 0xe8, 0xa7, 0x8d, + 0x23, 0x9f, 0x83, 0x34, 0xf3, 0x33, 0x93, 0x8c, 0xb0, 0x38, 0x70, 0x23, 0x1f, 0xe4, 0x9e, 0xa3, + 0x53, 0xba, 0x67, 0xf1, 0xeb, 0x1c, 0x5c, 0xf0, 0x0e, 0x7a, 0xac, 0x7f, 0x0d, 0x6a, 0x7a, 0x3a, + 0xff, 0x3a, 0x6f, 0xfb, 0xd7, 0xa1, 0xb7, 0xfc, 0x43, 0x04, 0xae, 0x6c, 0xf7, 0xba, 0x96, 0xd1, + 0xeb, 0x74, 0x74, 0xa3, 0x7a, 0xf4, 0xb8, 0xd3, 0x36, 0x9f, 0xfc, 0x6f, 0xb1, 0xbc, 0x0e, 0xc9, + 0x6e, 0xaf, 0xa5, 0xab, 0xed, 0x16, 0x83, 0x30, 0x45, 0x7a, 0xc8, 0xbd, 0x96, 0x5e, 0x2a, 0x28, + 0x09, 0xcc, 0x2b, 0xb5, 0xd0, 0x16, 0x64, 0xfc, 0x21, 0xfa, 0x38, 0x1b, 0x23, 0xed, 0x43, 0x02, + 0x34, 0x3f, 0xf0, 0x47, 0x01, 0x01, 0xe6, 0x0d, 0x5d, 0x6b, 0xf5, 0xba, 0x9d, 0xe3, 0x6c, 0x7c, + 0x83, 0xbb, 0x35, 0xaf, 0x38, 0xdf, 0x33, 0x05, 0x5c, 0xf1, 0xc3, 0x08, 0xac, 0x87, 0xa2, 0xc8, + 0x8c, 0x2b, 0xf9, 0x8c, 0xfb, 0x3c, 0x15, 0x3b, 0xbe, 0xd7, 0xe9, 0xec, 0xfc, 0xfb, 0x61, 0xda, + 0x68, 0xc0, 0x52, 0x9f, 0x0a, 0x56, 0xbd, 0x01, 0x15, 0x6f, 0xcc, 0xad, 0x13, 0x0c, 0x21, 0xe7, + 0x61, 0x62, 0x0f, 0x47, 0x37, 0x70, 0xa6, 0xef, 0xa7, 0x0b, 0x05, 0xb8, 0x18, 0xdc, 0xf8, 0x74, + 0xb9, 0xa2, 0x05, 0x09, 0xba, 0x16, 0xd0, 0x8b, 0x90, 0x20, 0x7c, 0xbb, 0xa0, 0x58, 0x71, 0x2d, + 0x94, 0xdc, 0x43, 0xc2, 0xa1, 0x83, 0x62, 0xcd, 0x84, 0xbb, 0x90, 0x72, 0x91, 0x4f, 0xe4, 0x59, + 0x3e, 0xe2, 0x60, 0x63, 0x88, 0x4c, 0xa3, 0xdb, 0xff, 0xff, 0xdd, 0x1a, 0x41, 0x4b, 0x37, 0x36, + 0xed, 0xd2, 0x7d, 0x9f, 0x83, 0xab, 0x63, 0xe6, 0xc9, 0x16, 0x6f, 0xd1, 0xb7, 0x78, 0x5f, 0xf0, + 0xad, 0x9c, 0x90, 0x7e, 0x67, 0xe6, 0xa6, 0xfe, 0xc8, 0xc1, 0xd5, 0x87, 0x5a, 0xa7, 0xdd, 0x72, + 0x32, 0x4d, 0x77, 0xde, 0x7d, 0x52, 0x73, 0xf8, 0xb2, 0xcb, 0xc8, 0xc4, 0xec, 0x32, 0xac, 0x40, + 0x88, 0x9e, 0xb0, 0x40, 0x10, 0xff, 0xce, 0x81, 0x38, 0x6e, 0x1e, 0x0c, 0xee, 0x5d, 0x1f, 0xdc, + 0x39, 0xaa, 0x61, 0x62, 0xc7, 0xd3, 0xe1, 0xfd, 0xba, 0xe3, 0x2d, 0xd6, 0x60, 0xc1, 0xa9, 0xca, + 0x89, 0xea, 0x79, 0x65, 0x48, 0x40, 0x59, 0x48, 0x1e, 0xea, 0xa6, 0xa9, 0x1d, 0xd8, 0xdb, 0xc5, + 0xfe, 0x1c, 0xda, 0xe9, 0xab, 0x1c, 0xa0, 0x72, 0xdb, 0x64, 0xf9, 0xe9, 0x89, 0x0d, 0x83, 0xd3, + 0x56, 0xed, 0x5d, 0x55, 0xef, 0x5a, 0x46, 0x9b, 0xa5, 0x58, 0x69, 0x05, 0x0e, 0xb5, 0x77, 0x25, + 0x4a, 0xc1, 0x49, 0x95, 0x69, 0x69, 0x86, 0xd5, 0xee, 0x1e, 0xa8, 0x56, 0xef, 0xa9, 0xde, 0x25, + 0xfb, 0x62, 0x41, 0x49, 0xdb, 0xd4, 0x3a, 0x26, 0x8a, 0xef, 0x47, 0x60, 0xc9, 0x33, 0x0c, 0x86, + 0xeb, 0x5d, 0x1f, 0xae, 0x34, 0x3d, 0x0a, 0x68, 0x79, 0x3a, 0x20, 0xdf, 0x1f, 0xfa, 0xdd, 0xd7, + 0x20, 0x69, 0xcf, 0x82, 0xfa, 0xaa, 0x1b, 0x13, 0x54, 0xe5, 0xa8, 0xe7, 0xb2, 0x7b, 0xa1, 0xcb, + 0x00, 0x5d, 0xfd, 0x5d, 0x8b, 0xcd, 0x92, 0xe2, 0xbd, 0x80, 0x29, 0x64, 0x86, 0xc2, 0x5d, 0x88, + 0x53, 0x9f, 0x36, 0x43, 0xa5, 0xf4, 0xcd, 0x08, 0xa0, 0xa2, 0x6e, 0x39, 0xc9, 0xf0, 0x09, 0x8d, + 0x15, 0xb2, 0x27, 0x22, 0x27, 0x2d, 0x9a, 0x8b, 0x9e, 0xa2, 0x99, 0x6e, 0xa9, 0xe7, 0xec, 0x03, + 0x0a, 0xdf, 0xe0, 0xc6, 0xd5, 0xcc, 0xb3, 0x96, 0xbd, 0xbf, 0xe4, 0x60, 0xc9, 0xa3, 0x71, 0xec, + 0xa2, 0x09, 0x68, 0x79, 0xba, 0x45, 0x73, 0xc7, 0x59, 0x33, 0x2f, 0x00, 0xd2, 0x06, 0x5a, 0xbb, + 0xa3, 0x3d, 0xee, 0x50, 0x4c, 0xb1, 0x70, 0x56, 0x35, 0x64, 0x1c, 0x8e, 0xad, 0x75, 0x68, 0xc7, + 0xcf, 0xbb, 0x23, 0x15, 0x1b, 0xd7, 0x29, 0x5d, 0xa3, 0xf8, 0xb1, 0x27, 0x1c, 0x8c, 0x08, 0x9b, + 0x32, 0x1c, 0x84, 0xf4, 0x3b, 0x1d, 0x40, 0xb2, 0x03, 0x50, 0x01, 0x16, 0x03, 0x96, 0xda, 0x86, + 0x4f, 0x79, 0x4d, 0x37, 0x06, 0xed, 0xa6, 0x7b, 0xd5, 0x79, 0x7a, 0x0d, 0x71, 0x7b, 0x2f, 0x02, + 0x97, 0xc6, 0x74, 0x43, 0x2f, 0x41, 0xd4, 0xe8, 0x37, 0xd9, 0x14, 0xaf, 0x4f, 0xd2, 0x92, 0x53, + 0xaa, 0xdb, 0xbb, 0x73, 0x0a, 0xee, 0x22, 0xfc, 0x82, 0x83, 0xa8, 0x52, 0xdd, 0x46, 0xaf, 0x40, + 0xcc, 0x29, 0xce, 0xcf, 0x6d, 0xde, 0x9e, 0x46, 0x44, 0x0e, 0xd7, 0xef, 0x0a, 0xe9, 0x26, 0xf6, + 0x20, 0x46, 0xaa, 0x79, 0x4f, 0x65, 0x9d, 0x85, 0x0b, 0xdb, 0x8a, 0x94, 0xaf, 0x4b, 0x6a, 0x41, + 0x2a, 0x4b, 0x75, 0x49, 0x7d, 0x58, 0x29, 0x37, 0xf6, 0x24, 0x9e, 0xc3, 0x25, 0x72, 0xb5, 0xb1, + 0x55, 0x2e, 0xd5, 0x76, 0xd5, 0x86, 0x6c, 0xff, 0x62, 0xdc, 0x08, 0xe2, 0x61, 0xb1, 0x5c, 0xaa, + 0xd5, 0x19, 0xa1, 0xc6, 0x47, 0x31, 0xa5, 0x28, 0xd5, 0xd5, 0xed, 0x7c, 0x35, 0xbf, 0x5d, 0xaa, + 0x3f, 0xe2, 0x63, 0x5b, 0x09, 0x3a, 0x5e, 0xf1, 0x1f, 0x51, 0xc8, 0xe2, 0x24, 0xe3, 0x99, 0xd4, + 0x03, 0xad, 0xe0, 0x94, 0x95, 0x3a, 0x86, 0xcf, 0x3a, 0x09, 0x50, 0xd0, 0xd8, 0xa6, 0x4f, 0x52, + 0x71, 0xa8, 0xb1, 0x34, 0xe3, 0x40, 0xb7, 0xd4, 0xbe, 0x66, 0x3d, 0x21, 0xf9, 0xd2, 0x82, 0x02, + 0x94, 0x54, 0xd5, 0xac, 0x27, 0xc1, 0x05, 0x47, 0xfc, 0xf4, 0x05, 0x47, 0x62, 0x8a, 0x82, 0x23, + 0x39, 0xed, 0x09, 0xdf, 0x99, 0xa4, 0xd8, 0xe2, 0x77, 0x38, 0x58, 0x0d, 0x80, 0x92, 0x6d, 0xf2, + 0xd7, 0x7c, 0x9b, 0xfc, 0x46, 0x18, 0xf4, 0x67, 0x9c, 0xeb, 0x7d, 0xc8, 0x81, 0x80, 0xb5, 0x3c, + 0xa3, 0x9c, 0xdb, 0xb7, 0x30, 0xa2, 0x23, 0x0b, 0x63, 0xa6, 0x74, 0xfb, 0x3d, 0x0e, 0x2e, 0x05, + 0x4e, 0x8e, 0x81, 0x9e, 0xf7, 0x81, 0xfe, 0x9c, 0x03, 0xfa, 0x7f, 0x3b, 0xc5, 0xbe, 0x07, 0x7c, + 0x51, 0xb7, 0x58, 0x69, 0x71, 0xc2, 0xa8, 0xf1, 0x13, 0x0e, 0x32, 0xae, 0xce, 0x6c, 0x2e, 0x77, + 0x7c, 0x73, 0xb9, 0x6c, 0x07, 0x4e, 0x6f, 0xbb, 0xd3, 0xcd, 0x20, 0xe7, 0x44, 0x05, 0x57, 0x91, + 0xc4, 0x85, 0x16, 0x49, 0x9e, 0x79, 0x56, 0x8d, 0xde, 0x63, 0x1d, 0xf3, 0x4f, 0x3a, 0xcf, 0xaf, + 0x70, 0x90, 0x71, 0x75, 0x1e, 0x3b, 0xcf, 0x91, 0x76, 0x67, 0x66, 0xa9, 0x02, 0xdd, 0x1f, 0x33, + 0x46, 0xfa, 0x0f, 0xd8, 0x4a, 0x0c, 0x8b, 0xf1, 0xe1, 0x2b, 0xf1, 0x4c, 0xa3, 0xfb, 0xae, 0x63, + 0xc7, 0x57, 0x03, 0xa3, 0xbb, 0xe0, 0xa8, 0x9d, 0x3a, 0xae, 0xff, 0x98, 0x83, 0xe5, 0xc0, 0x0e, + 0x68, 0xd3, 0x1d, 0xd1, 0xaf, 0x84, 0x4b, 0x76, 0xc7, 0xf2, 0x1a, 0x0d, 0xe5, 0x77, 0x3c, 0xa1, + 0xfc, 0xda, 0xf8, 0xbe, 0xee, 0x20, 0xbe, 0x14, 0x10, 0xc4, 0x9d, 0x40, 0xfb, 0xdb, 0x6b, 0x10, + 0x27, 0x80, 0xa0, 0x57, 0x21, 0x7d, 0xa0, 0x77, 0x75, 0x43, 0xeb, 0xa8, 0x14, 0x33, 0x3a, 0xc8, + 0x95, 0x21, 0x66, 0xb9, 0x22, 0xe5, 0xdb, 0x00, 0x2e, 0x1e, 0xb8, 0xbe, 0x91, 0x0c, 0x4b, 0x4d, + 0x72, 0x11, 0x63, 0x07, 0x4d, 0x37, 0xf2, 0x6b, 0x2e, 0x29, 0xee, 0xeb, 0x1a, 0x5b, 0x54, 0xa6, + 0xe9, 0x27, 0x62, 0x79, 0x2d, 0x72, 0xf0, 0xe8, 0x95, 0x17, 0x1d, 0x91, 0xe7, 0x3e, 0x9e, 0x74, + 0xe4, 0xb5, 0xfc, 0x44, 0xd4, 0x87, 0xf5, 0xa6, 0x93, 0xf9, 0xa8, 0xbe, 0x00, 0x4f, 0x65, 0xc7, + 0x5c, 0xeb, 0x8c, 0x8d, 0x35, 0xf8, 0x68, 0xca, 0x56, 0xb3, 0xd6, 0x1c, 0xc3, 0x47, 0x03, 0xb8, + 0xea, 0xd2, 0x78, 0xd4, 0x0d, 0xd4, 0x49, 0x03, 0xfa, 0xed, 0x40, 0x9d, 0x3e, 0x8f, 0x6b, 0x6b, + 0xbd, 0xd2, 0x1c, 0xdb, 0x02, 0x1d, 0x83, 0x38, 0x60, 0x95, 0xba, 0x1a, 0x50, 0x20, 0x31, 0xc5, + 0xf4, 0x6c, 0xf1, 0x13, 0x2e, 0xc5, 0xe1, 0xe5, 0xbd, 0xad, 0x79, 0x7d, 0x30, 0xbe, 0x09, 0x7a, + 0x1b, 0x56, 0x89, 0x2b, 0x0c, 0x9c, 0x6a, 0xd2, 0x75, 0x7f, 0x45, 0x35, 0x8e, 0xc4, 0x72, 0x5b, + 0xd1, 0xc5, 0x6e, 0x20, 0x07, 0xb5, 0xe0, 0x12, 0x91, 0x1f, 0x02, 0xe6, 0x3c, 0xd1, 0x70, 0xcd, + 0xa7, 0x21, 0x04, 0xc6, 0x6c, 0x37, 0x84, 0x87, 0x24, 0xe0, 0xfb, 0xd8, 0x79, 0xaa, 0x44, 0x17, + 0x15, 0xbd, 0x40, 0x44, 0xaf, 0xba, 0x44, 0x3b, 0xfe, 0xd5, 0x16, 0x78, 0xae, 0xef, 0xa1, 0xa0, + 0x1d, 0xc8, 0xe0, 0x28, 0xce, 0x62, 0x03, 0x93, 0x03, 0x23, 0x72, 0x9c, 0x78, 0xe4, 0xc8, 0x39, + 0x60, 0x94, 0x16, 0xa1, 0x08, 0x3f, 0x8a, 0xc0, 0xa2, 0x7b, 0xeb, 0xe1, 0x82, 0x95, 0x08, 0x53, + 0x9b, 0xc3, 0x2b, 0xb3, 0x5b, 0x21, 0xfb, 0xd4, 0xf3, 0xb1, 0x8d, 0xa3, 0xc1, 0x82, 0x6e, 0xff, + 0x44, 0x9f, 0x86, 0xe5, 0xa6, 0x46, 0x56, 0xe7, 0xe1, 0x91, 0x89, 0x47, 0x6a, 0xa9, 0x86, 0x6e, + 0x19, 0xc7, 0x64, 0xd7, 0xce, 0x2b, 0x88, 0x32, 0xf7, 0x8e, 0x4c, 0x4b, 0xee, 0x59, 0x0a, 0xe6, + 0xa0, 0xe7, 0x21, 0x43, 0x75, 0xb7, 0x74, 0xb3, 0x69, 0xb4, 0xfb, 0x16, 0xf6, 0xf3, 0x34, 0x47, + 0xe1, 0x09, 0xa3, 0x30, 0xa4, 0x8b, 0x4f, 0x71, 0x48, 0xf7, 0xaa, 0xf7, 0xd6, 0x12, 0x69, 0x58, + 0x68, 0xc8, 0x05, 0x69, 0xa7, 0x24, 0x4b, 0x05, 0x9e, 0x43, 0xeb, 0x70, 0xa9, 0x21, 0xd7, 0x1a, + 0xd5, 0x6a, 0x45, 0xa9, 0x4b, 0x05, 0x55, 0x91, 0xde, 0x68, 0x48, 0xb8, 0x62, 0x90, 0x94, 0x5a, + 0xa9, 0x22, 0xb3, 0x0b, 0xba, 0x52, 0xad, 0x56, 0x92, 0x8b, 0x84, 0x59, 0x52, 0xa4, 0x82, 0xba, + 0x53, 0x92, 0xca, 0x05, 0x3e, 0x2a, 0x7c, 0x2d, 0x0a, 0x99, 0x11, 0xdf, 0x82, 0xde, 0x08, 0xc0, + 0x6a, 0x73, 0x9c, 0x37, 0x1a, 0xa5, 0xf8, 0x51, 0x0b, 0x84, 0x20, 0x12, 0x02, 0xc1, 0xbf, 0x38, + 0x58, 0x0e, 0x94, 0x38, 0x5a, 0x54, 0xe5, 0xcb, 0x65, 0x55, 0xae, 0xd4, 0xd5, 0xd2, 0x5e, 0xb5, + 0x2c, 0xed, 0x49, 0x72, 0x9d, 0x60, 0xb2, 0x01, 0x6b, 0x95, 0xaa, 0xa4, 0xe4, 0xeb, 0xa5, 0x8a, + 0xac, 0x56, 0x25, 0xb9, 0x80, 0x27, 0xbf, 0x53, 0x51, 0x86, 0x85, 0xd5, 0x0a, 0x2c, 0x95, 0xe4, + 0x87, 0xf9, 0x72, 0xa9, 0xc0, 0x68, 0xaa, 0x9c, 0xdf, 0x93, 0xf8, 0x28, 0xba, 0x02, 0x82, 0x1b, + 0x4e, 0xbb, 0xce, 0x52, 0x95, 0xbc, 0x5c, 0x94, 0xf8, 0x18, 0x5a, 0x85, 0x65, 0xd6, 0x21, 0x5f, + 0x56, 0xa4, 0x7c, 0xe1, 0x91, 0x2a, 0xbd, 0x55, 0xaa, 0xd5, 0x6b, 0x7c, 0x1c, 0x5d, 0x86, 0x55, + 0x77, 0xd7, 0x6a, 0x5e, 0xc9, 0xef, 0x49, 0x75, 0x49, 0x51, 0x1f, 0x48, 0x8f, 0xf8, 0x04, 0xba, + 0x04, 0x2b, 0xb6, 0xca, 0x21, 0xeb, 0x61, 0xbe, 0xdc, 0x90, 0xf8, 0xa4, 0xf0, 0xd3, 0x08, 0x64, + 0x46, 0x9c, 0xf2, 0x44, 0x43, 0x8c, 0xf4, 0x18, 0xa5, 0xcc, 0x64, 0x88, 0xef, 0x72, 0xb0, 0x1c, + 0x28, 0xf1, 0xec, 0x0c, 0xb1, 0x0a, 0xcb, 0x3e, 0x43, 0xec, 0xe6, 0xe5, 0x42, 0x19, 0x9b, 0x62, + 0x08, 0x75, 0xa1, 0x22, 0xd5, 0x88, 0x74, 0x82, 0x35, 0x1f, 0x13, 0x7e, 0x15, 0x83, 0xb5, 0x71, + 0x71, 0x06, 0x1d, 0x04, 0x20, 0xb7, 0x3b, 0x65, 0x90, 0x1a, 0xcb, 0x9c, 0x05, 0x4f, 0x74, 0x13, + 0xe6, 0x99, 0x67, 0xb3, 0xcf, 0xcc, 0x3c, 0x69, 0x6f, 0x92, 0xa6, 0xbd, 0xa6, 0xf8, 0xb7, 0x88, + 0xfb, 0x74, 0x28, 0x78, 0x10, 0xcf, 0xd8, 0x04, 0x68, 0x0d, 0xb2, 0xbe, 0x8d, 0xc0, 0x4e, 0x2f, + 0xa4, 0x02, 0x1f, 0xc7, 0xfb, 0x8b, 0xdc, 0xea, 0xfb, 0xba, 0x25, 0xd0, 0x45, 0x40, 0x7b, 0xf9, + 0xb7, 0xd4, 0x7c, 0xbd, 0x9e, 0xdf, 0xde, 0x95, 0x0a, 0xe4, 0xee, 0xbf, 0xc6, 0x27, 0xf1, 0xee, + 0x70, 0x6f, 0x9e, 0xbd, 0x4a, 0x43, 0xae, 0xab, 0x3b, 0xe5, 0x7c, 0xb1, 0xc6, 0x2f, 0xf8, 0x99, + 0x4c, 0x6f, 0xfd, 0x51, 0x55, 0xe2, 0x01, 0xab, 0x72, 0x33, 0x77, 0x6a, 0x94, 0x91, 0x42, 0x4b, + 0x70, 0xde, 0x9e, 0x17, 0x19, 0x4b, 0xa9, 0xc0, 0xcf, 0x0b, 0x7f, 0x8e, 0xba, 0xef, 0x4e, 0x03, + 0x43, 0xd9, 0xd3, 0x80, 0xb5, 0x53, 0x9e, 0x3a, 0xd9, 0x98, 0xc0, 0x9e, 0x69, 0x3f, 0x7e, 0x3b, + 0x02, 0xe2, 0x64, 0xf1, 0xcf, 0x7a, 0x65, 0x84, 0xd8, 0x3e, 0x1e, 0x64, 0x90, 0x04, 0xba, 0x0d, + 0x37, 0x6c, 0x0f, 0x5c, 0xa9, 0x0f, 0xd7, 0x45, 0xbd, 0xa2, 0xd6, 0xaa, 0xd2, 0x76, 0x69, 0xa7, + 0xc4, 0x16, 0x09, 0x9f, 0x44, 0x17, 0x80, 0x67, 0xfd, 0x9c, 0x48, 0xc6, 0xcf, 0x0b, 0xff, 0x8e, + 0xc0, 0xfa, 0x84, 0x34, 0x0c, 0x1d, 0x06, 0x98, 0x54, 0x9e, 0x3e, 0x8d, 0x9b, 0xc4, 0x9f, 0xc9, + 0xa8, 0xbf, 0xe6, 0xe0, 0xda, 0x14, 0xf2, 0xbd, 0x56, 0x0d, 0x85, 0x9f, 0x1b, 0xb7, 0x93, 0x22, + 0xe3, 0x76, 0x52, 0x34, 0x6c, 0x27, 0xc5, 0x02, 0xa2, 0x65, 0x49, 0xde, 0xa9, 0xf0, 0x71, 0xe1, + 0x1b, 0x51, 0xb8, 0x18, 0x9c, 0x90, 0xa2, 0x2f, 0x06, 0x40, 0xfe, 0xf2, 0xc4, 0x3c, 0x36, 0x84, + 0x3c, 0x13, 0xc0, 0x1f, 0xb1, 0xb3, 0xa9, 0x69, 0xfc, 0xe8, 0xa4, 0x3d, 0xc1, 0x85, 0x23, 0x1f, + 0x19, 0x87, 0x7c, 0x74, 0x1c, 0xf2, 0xb1, 0x30, 0xe4, 0xe3, 0xe8, 0x3c, 0xa4, 0xa8, 0x18, 0x49, + 0x51, 0x2a, 0x0a, 0x9f, 0x08, 0xdf, 0x92, 0x49, 0xe1, 0x77, 0x11, 0x7a, 0x00, 0x1c, 0xe8, 0xd4, + 0xd4, 0x00, 0x73, 0xbc, 0x3e, 0x45, 0xd2, 0x1f, 0xca, 0x98, 0x35, 0xb1, 0x58, 0x1b, 0x27, 0xf8, + 0x0c, 0x8d, 0x92, 0x81, 0x74, 0x43, 0x76, 0x63, 0x18, 0x0d, 0xc7, 0x30, 0x26, 0xfc, 0x93, 0x83, + 0x73, 0xde, 0x22, 0x05, 0x3d, 0x08, 0x40, 0xee, 0x93, 0xa1, 0x35, 0x8d, 0xef, 0x73, 0x26, 0x94, + 0xde, 0x06, 0x34, 0x2a, 0xcd, 0x0b, 0xcd, 0x32, 0x64, 0xb6, 0xf2, 0x05, 0xb5, 0x5a, 0x6e, 0x14, + 0x4b, 0xb2, 0xba, 0x5d, 0x91, 0x77, 0x4a, 0x45, 0x9e, 0x43, 0xd7, 0x61, 0x63, 0x24, 0xe7, 0xdf, + 0xad, 0xd4, 0xea, 0x6a, 0x41, 0xc2, 0xf8, 0x49, 0xf2, 0xf6, 0x23, 0x3e, 0x42, 0x26, 0xeb, 0xad, + 0xa4, 0x26, 0x4e, 0xd6, 0xdb, 0xdc, 0xf7, 0x39, 0xeb, 0x64, 0x47, 0xa5, 0x9d, 0xdd, 0x64, 0xb7, + 0x92, 0xec, 0x44, 0x7d, 0xf3, 0xe7, 0x1c, 0xcc, 0x97, 0xc8, 0xd9, 0xae, 0x75, 0x8c, 0xbe, 0x44, + 0xde, 0x99, 0x8f, 0xbc, 0xac, 0x47, 0x1b, 0x63, 0x1e, 0xdd, 0x93, 0x23, 0x3b, 0xe1, 0xea, 0xc4, + 0x67, 0xf9, 0xe2, 0x1c, 0xda, 0x85, 0xb4, 0xe7, 0xc1, 0x36, 0x5a, 0x0d, 0x7a, 0xc4, 0x4d, 0x05, + 0x0a, 0xe1, 0xef, 0xbb, 0xc5, 0xb9, 0xcd, 0x0f, 0xe2, 0x00, 0xc3, 0xd0, 0x8f, 0x24, 0x58, 0x74, + 0x57, 0x48, 0x28, 0x1b, 0xf6, 0x60, 0x59, 0x58, 0x0d, 0x7d, 0xef, 0x2b, 0xce, 0x61, 0x31, 0xee, + 0xfc, 0x9e, 0x89, 0x09, 0x78, 0x9c, 0xc7, 0xc4, 0x04, 0x3d, 0x6b, 0x13, 0xe7, 0xd0, 0x3e, 0xac, + 0x84, 0xa4, 0xab, 0xe8, 0xda, 0xf8, 0xe7, 0x4a, 0x54, 0xf8, 0xf5, 0x69, 0xde, 0x34, 0x89, 0x73, + 0xa8, 0x03, 0xab, 0xa1, 0xe9, 0x0f, 0xba, 0x31, 0xe9, 0x79, 0x0b, 0xd5, 0x75, 0x73, 0xba, 0x57, + 0x30, 0xe2, 0x1c, 0xea, 0x81, 0x10, 0x1e, 0x97, 0xd1, 0xcd, 0x89, 0xcf, 0x3b, 0xa8, 0xbe, 0xe7, + 0xa6, 0x7c, 0x06, 0x22, 0xce, 0xa1, 0x2d, 0x48, 0xb9, 0xde, 0x1a, 0xa0, 0x95, 0xd1, 0xd7, 0x07, + 0x54, 0x64, 0x36, 0xec, 0x59, 0x02, 0x95, 0xe1, 0xba, 0xe5, 0x66, 0x32, 0x46, 0xef, 0xe4, 0x99, + 0x8c, 0x80, 0x0b, 0x71, 0x3f, 0xcc, 0xbe, 0x83, 0xe3, 0x11, 0x98, 0x83, 0x4f, 0xb4, 0x47, 0x60, + 0x0e, 0x39, 0x7f, 0x16, 0xe7, 0x36, 0xbf, 0x15, 0x85, 0x18, 0xde, 0xf6, 0xa8, 0x0e, 0x99, 0x91, + 0x30, 0x8d, 0x2e, 0x8f, 0xbd, 0x3b, 0x14, 0xae, 0x8c, 0xbf, 0xdf, 0x12, 0xe7, 0xd0, 0x17, 0x60, + 0x29, 0x20, 0xd2, 0xa0, 0xf5, 0xf0, 0x3b, 0x1a, 0x2a, 0x79, 0x63, 0xd2, 0x25, 0x8e, 0x38, 0x87, + 0x5e, 0x86, 0x05, 0xc7, 0x67, 0xa1, 0x65, 0xff, 0x4d, 0x09, 0x95, 0x73, 0x31, 0xf8, 0x02, 0x85, + 0xf6, 0x76, 0xdc, 0x3b, 0xeb, 0xed, 0xbf, 0xe4, 0x60, 0xbd, 0x47, 0xae, 0x25, 0x86, 0xf3, 0xf2, + 0x9b, 0x67, 0x3d, 0xfc, 0xc4, 0xdf, 0x3f, 0xaf, 0x50, 0x93, 0x3c, 0x4e, 0x90, 0xbf, 0x33, 0x7d, + 0xe6, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x50, 0x23, 0x38, 0xdb, 0x34, 0x00, 0x00, +} diff --git a/vendor/github.com/container-storage-interface/spec/logo.png b/vendor/github.com/container-storage-interface/spec/logo.png new file mode 100755 index 0000000000000000000000000000000000000000..a4256afab8f895e6b1a9fa6e1b9ae5aa4e9c90d6 GIT binary patch literal 21136 zcmY(q1yq|s(=dv=TZ%(~;!xbJXrZ{fTX6{P?oiqS#VPLY?(Xg$T!K4X-uL_NfB&0v zlI+eiJF~NAM|Yn{B?W186e1KTC@A!AG7>6KP|%VeZEGZikKb}Tw!Du&I7=~kF(|0& zc+?kT_>Vq?sf>y|6qMIzD5!uCD5$3oP{1J+l$8zG>Uyb^`DgR?mW7YirL7oac-1qFqmvzZ0IiiFhv z4*%#00j)qFM}AgTcXxLdcMcW@XG>N#K0ZFyFYK)B?93k+%r2hxAY%_^dl#zzA@cvy zkuY~Lb+&c{Sv%NM{D-cwiGwRh2nhTSqyO{ypLv3;E&e}F_AdV`)`x(s|9Qg7#`1;r z|Iq#zD)=8LzoN6X`3L9!&=+PC{4eJJ5B9%&1X=&X{QsKF|48~@=!dAnD1xm2XW4{N zzW2~-LqUl`eUlJV^MF2UM+{(+Y`)tRGPdN-fR(|+O3Vv_&@&kAPyj?`xh@g=7=PiR z!HHq3_52D3YKmchF@_3#PGLbQGK0n!Qx=8);|P`DF)=Xw`Yts8I&n8PF*dQU|Mx!K zvbAML*FjxKb^hPVHUCVt=pYO!Du`+7|7uX7KEw(N_+@PEaM~Q5y1Kn@2(69zwAZ^KFKU=-=JCpcJodo_n;!w-z zo3T6l{D`J8e@@ouEE@}A{ z*1&Du{HR<=t}@K8v|v2b-_ClMBSCilZS?GAbn`GtHTnZkr#1Yc7@!q1C>AG;H; zYn-aNLqdEF({>TW=C51Re}QRM1c&pK$#U zQ2X2s3coW8vTFxzsYhidqaMW4L@0s|IT&wJ;KM>9028825#qEca^|hPi1{Q%9*ded ziL7^IN^gW629D*)S8jJBvW3@i|5^C1nW})W$6B48+TMg(b_Y{GcPYjAPOh87Sb|^_ zI8B6YbiAQB|2U-e*WW{D9^*{re%t5->sG}-`4i9F=-c&#n^?3j;}4&dF5}tDDtO#1 zVkeJT7K+zu$6&bUvdrgEN^#RK@?V47c1*MGy`I}nJ=^s+Hc$wey(FbW{C;xcfanL1 zF&#d8xRJT1_2*PsaT8*}JEOPR#wRoE302~F47y(!@+&|7SU9{8^h2LboPV7C&u)pH zf#L20CqwFlzesokBvXOvRQH{#Kyzt3>0H=Sxb^I=h5b^Qz<#5m@(o zN<!a!8j}6O8>d9P)FH&)sH9*!qz^G_HTBq?cLU)^qYJ-?lP&A{PGM+s`xk z9MkLGJ%X)9^Mhm;upvOX52)evqlAzWBV(c6(p?i04AfJvr~-ipB4ozZwftkS#$hHD z-=4^`#0J)?o9b)(=tNZKu`2}VEo#MDlpvWS$)lBq4O#b%6Dlh)b9`=0RlLkbE*bsB z>v;7~y@6wJo^Os1F=G|_^hgQrDkPL%SU{8X1-}L?6J4BW0BeXjNcaN3m3`xSrg-l* z7vM>Jykc@-e9;iI5;<4h6{JNW90HICTxOU%zuH$AwH>H~#8uQSs?tewNZ@~!FO{>L zZ#nwz^t6bpudlaZ?0LRoX0`pbVDV%m9^QS8PsQ`LwCOQdI(Uk9pn%2oK=D6%%~S=S ziO(U)?08lUAme-yb6CL6si15j+)=YVe?>tsx_uFCA2F?-*aA?1{ zgmX0FSi7Rv^qly5^(*3j7wRlpyuYD~?=hJ%sCwnxwB0U## z7TQ`*_^-CA5fl+_M9bE4CvFeO2AxDCuBD{NkAmTz*WmQAlz2r-sR~oUwFaxTE8(@> zD!}esPVJY+;wfF0M->X3x!!gCbwVG>TD`(C@cOCp+fN;NVdKn-wFmZe(3PA={`chM z5x|Q#JMo_e=S6_P=pnam3vRh-;A3@Ht#LulvVFdqEXn5TnS1OWB|BNW6E$9~aUvGj z((F^14Z`0ozJVLMNw)ogHpW|FmwXkA+3>y2;3z|;$HdHpujW*phwZ8Ky8<5V4$>lL z`4OdyFt^+wyEx;HU4LfI0IcTAUq`47Ln28JpMLl+w@&0&+AFYS7VREuWt#?uRt2Ch zhSGTtm}^)@TADR5CE~h^$z$O)evwddGo~mp9_I+%<1yVoj6r|c#z;P9ug!nvE>m%NbEd@fLn zz!@r}`me=RBG5YKkW8Z?n_Atam3EG`jFjJZ$|tk%&ZW6P)~NEnB~5D8iKWN^|8z_n z+90K3;Qr&7Hbpl18Po4bYUeclS?V|fG$Vj2^&N(}kjsW7*$Ei)OcRwl>h(ron(V=P zx-se)xs(4M{SHq056};dk4s#U@yKo+_NG%812NZ-g=3p(@!Sk0`@vnd;o-R5-|fO zzAHvO+`z7NW8WG*dRRyqbYt%q$5~D#pVkFDsoQT3~A#u6$ zMfHGN_$7PHC_F0hCYSUZ*x3T*eIm zh?czA1&2GS(wjmBmQVCq@2`iGI9FB5qVydW?!i{?yDN>B2-wM#{969jr}Pg0Tw=W^ zC+s~9o5;D9v)`9(OymL#{@`5R-;p|Um+F4eNiN1wRQkk`;9Q8?KOdb$YjB_4f`X5n zC4aGB?98yDtIC5}Ln;$%jpK~{y6&=fjo=>*UvmYs3HD4}I~xvl@fwUnqR0YdlqD7o;N`u^iY*YqADcBhp|g)mBg^a-`sFVS9NDY4e7 ziEZwIGw_occKvgUW`JS2F2-&z(K~OfHMmST9I8Nw?aro$!75Ih&=OP;q9BSN;yKhW zkF}}7yUJ8^4hSX|SAxKRDZOE6IkEwYAFAH_E?qU`%&`&^G_U0cql?vg?H4y94)mYb zbtQ|PV2+t3V$*FyG6eggDm__TB0HNn8FVktZiyNBMH!ugo00<)vs22MDz0?U-;l>J zE$1`3Ux~6`@HS+a)^Fi3pamM6`2D%Gcu+X}N%8N{n$Z8QJ1YD@?YGud8~TOo70iN- z(9*z@e%w)8IJ)YLrqN^evO@odL3=#M6m;L*m&-$MKXkh4E^!XbZ_I~$kI6ns(D94+ zpz@b46@18xD%A2A*pJ9J1n?-`F5O%IU?eS(PXJ!|Xv z3-2dRAi}jb>b3~fBqBGRdclzwzx`2ucLk?Tpe;t;tV6&HQ%BDoEYL|V-X&-ZlWK?j$8XeWUn)Q%IctBK#p3D1^l*9%_vl^686 zcXzPSCmHE^9u@+39>H62vjYu9uua}Gls$GiSh>5L9DrT0fhGVg!(`{d-P@^RG(3z9 z`^mTqHLSg({uB8L0hff?rxb<#gueBEilGAi#nZ}uM-0QIUn^4$pNDLY5${=QSB+bJ zl+SZG&sJ;8ol(GZowWXcBu6E{3gqd8Y!N zFg`&$Vbf>A*+W;tG6tf5N>`XO_h=FJ-9C}Er*KHm(PQakk> zt4#Ee->+oK*k|oL25%1O0nm~$EOh&MoX?z@J5S@CZZGXEN;fCkI$v<{0fIb8(+G={ zYT6$<$WP1!lzR@=)LsiLVe2l;eq8KfFVW4XhnHp?)vHYX+{-K#kYJ)k!MUSPDy zcx@!8pJ*w1u<3me=WwYE6gDsYK4rnkMzN_f()Np$fRu4vNb(cIhd zS3^=AKM;>hbE=5DP6Csr0Re7yVspciv2|uN2{M*#vV5dM73(czsnW-i7OYdYUDr+C zc4;zo3QK-PB3-18Jf>ss8EQZn)`z&oQ7q=eTBLKmN$Sk5tE>;zC5T3d=fBq30zvE{ zU#0@EwdXJ`T;1{$&ZwPNyQ!t-tTsU)Sc|aAlPHp6NO5S-$Z*&=94~xI1LG_+ZkVVRn7nIy2&y z@I02@X5R7-u!yOIt|?y0)Ax7ifht8?giOz^QY>*%}9GiBE2KuP|-KO z96v)Fr~=Gy*056%RpxFoy*uTJZKUoTIiwCX_lja(FukaNeE})|sJ`QQU|{8xoF7jr z2R}hO;!lYJBKrmS3Kiy-?I=d5$OfN|PA&n%e(Km*2{wY6*D)`6pZO3ETXs<>KBDMa zNafJMqbRi(T@#TlyhH?WK+1ee$Px%lQ21rqP1rSL4Ruf4)gXGuF0h-PRAgV03z4c~ zd0_iVS4LRej3obdh!ZT~`g0>T8{#I(-)DTJc7Jqec|=S$|1G%;uHU3v@w2QJ4cCT^ zdG2Lx>d3Y}@{4Qw=4h&9$v3ugT>FY9j4`x^6{CHgaWtxxN z)lsfd=L)Bh#GU*Doq98<+$FyYz{F>n6d z&ld#55!klqdg}!LMva;t?w4F$bRI>{ryT1!MAS+UCYiCGoulY5^Mg@y1@d0 z5d%Gr3fep7QB@K#m_gy)N1`Ft{M)t+Gh%YDm9YAYi3Uz-^m=C$DvL3CB0A$k->qq$ zD3z9F89R;<8M|n3s3T|nmowQT)c(I6D8hNkA9CMx{K`<(1bD#X%!hZa6w4z2y zMTOhorn0;d?7T0UmOgT>^{O>*n{rB~+8woeaTR=T@D}uMWuDqX#`2Gryii{ZznHKI zkV{h&FAcA4K_6gdXHG^hqm_AWqeDdhoM>#E;6C2n^GE7K)FYe6bLl<9AQ5Qhe45)} z!JCZrP6l!6Dnu;oOnB;+QzK{AIPxIeF8eOfAfKsn3S)O^Ad>X?1#ILphhW-+W{cdU zdvIrvfs&%v%)ABo))=n2CB{h*lLS#BDyk9fa8TY}|0^Gg53&pq7&q+u?!_-^$+8kl zyjxy5<3sBAtYK=*&7|D^lGO1QnQ{T;oohJ3!X*)Yjp_d7%FTwrlfBv>O7(%DYd4t1 z+$iq+jQvX9$Gjow&gdu36Ejj3N!HKR8cp!|!UXgS3Z$G>bDuAp_B9L&gKh(o3>kWR z92X*b{={39=#bKt=V`xu;Y4fn*^DW)Fj)-57k$)jcbte^in(yvs~A+s>#T4m2$Z;+ zwpCbseIs8&TSMG)uy#_=MaesbX$*Xadq*XM)r5U)q?%D|St{$u;3SYfqcVAec|e{5 zSE!4&SS>smei+s2hQDR(AJ~uD6YVVa5yunK7C$+RoGyJ|DKl44)?X{58kzSrGs2$Nkd?d{IXRcFLe- z#Lq%!2$s)OJhZo8z3-AWI4Tam2tOurlA=LN>SLo%+8uG_)OAZN`@ z#Wtz~L;){9aR2E@ZU(Wl|J22V(?4*u>U^mrF|Ze$k{RL`^+YvkEk}a z+*p*zR8{T#erJ^>5!#IU=z?N!R^WJ`1s6_fJ&~-Fov1^!t`p}W{veS`uyV+yu_JR6 zk=19Kgj=}CD;$Xk2bjSi{QwE4GLi%AXH)RsyVTooX*u7x(|jAsUSS3)pvuSUFs@W59T6TNpSv~7b!269<&02B@FZ8j@i`Y zBW?HP7KJ0alFp(632|hCg5dW{u0t}|O2m8UaSa`?I0Vl6kGn7R?8OJ)(jo^$+rdX*N+FU5Odsz;Iv0RNU|WO3dVTrt)J`CEbWT*9xx- zmKa3O@}V!spLR_OV#mg;gJi(AD<>m~ zzdAoXf4!<-58o%7v*$4&ex{FFs1WFut1v1k5m>@g3q&*2xeVUbBI~SdO}=HuebSo< zJ&NOHU2o!}xDe|NHdCUSk&`F6K$v5$RCK`a#X4@NPDYMnK%>PR)j!S3x|F5maPeUW znop8DUjC@jm!K`F`r(eJgeZ*cgH{IDK(Y#T-?HTeA%d_@Sp`JLa6+-bxd*v)b%>gk za^isNX*@{UFxJri6yPV&w>4~TVSh`=Mc5VTLUDwiYJRo`^QO=lf<;n_SiebJ9R$vc zf;nebk3pPH_Ry^G{_wYMv!5I^c!Km+>Ec$34CdSx1a7Hn`Lqnk>D?-tTAHsIDXwhQ z9Ohl*A@mTDEQHB61LR65x_FQ3!pUMSw2!Q8Hp|--C3WT^GBN z*Wo9?vhL%dba{e8RAt8{lm~*AFmI`eT{W{X!?nf~odH+38p$n+BF$M&1Tk8bk9h$n zNafzuSKER1qhg96HIgFps?WtKvpYnpzUHg1$WsxGe<3$4u@k)lYEbY0tIR^q>{br> zo!D-?=)t<)BTdE2Be=k4`8t8$+eWWm#E6{D|$h z(Rx^e9|zEtoIWc1U<4Y!P#p``doW?sXBIiQnixk#D1^AW{Tt@tfw8?3_=nkKeMThS0FvJp4o(c6cHB#~7|xCaA4tFA%@R z#UT}qE3(k)N+z4a#Gv19z%Q~RG^T$dG@AF#` zOvgYvk#39p3720YGm zM%e2iw8C|&9CFgq12b8 z&^Ts(B&CKwF|JETaoJ0*X-7RsP-#=I<0rTb(Ueavtj1?Jg=HrLPNHcdF1Mq!q6Jc{F+=+rGv)lq% zvYFA`=eC4C%4$;ivqRQ0r0-J^+gw z%4?}~tr-a9Ie3i&8)j_0lltXr4ru-*ovNYE?I8BO3GH@RZFAJp>*ai{$Y1$hxZf9F z7k~hX$FooO%k*)@{yKBGlIRE0(KmHQCVoknFVN+=50-wCif@x4e=TYdn6=LjPvlsC z`WN>5Xin8@*|pKDwed%M(a`Zf_1BVwOlAyqx8cZEm?lJy*m5Al^eXc<0%{BH8UG`s zczyBi?vYVS3T7hs(4JFWJ+^#-q_pqw<;eZgQF~wle=$PdkDGvbZD$~k&16oT%SSv+ zmutJ!cbD%cJ#CARb+mI@^$;x{U{zrt-owV^7=gt-@AtQg9RB*GM}zatL28K-*rAV5l=#Y-ltq${ua)>Ii_~1xi4+pszn!#LgxaJRbH-1} zj0mo4FuBCCr?=CvPt~(}DJ_~kfTOIAk>*}|!ubZS)j$6mLh$33lbss^dnzzP><>Do zGWH&-a_~_4<&sGI#5Yg{bpPD3E*6a!TMFjn3 zEp^=SPa%@j<@i_G%0Pe4DcJTTGE4O_2V5UvcmqUz}hY$iVDm^b+-4| z%Vwk^g_%K+vR@Qc&MViHuF%Np%tu03uuB|y$y_DUMD|x?A_*t{-p>SOM%7$S1pC!@ zi&)g{*W8KufktOjY!BYHVQJyj8fqC_5_e2>-)MSuz?ukScGxOq)3 z#jQOCrR|f7ZkEHpo9nphW7R-dDW}ck8{GKX&D-kYy!)Vth&e6e#;R18G8)Oy(hOHL z%yYWNotA3c&}N9%8Xd;JD<)-o9kaj0*9gtp*K(jXU=f@@qj%)|QJx`#deHZo(lY&T z4{4v3 zU)e)nC-CeXBc>*x06}uyucEHACmvo)_cB!kOZaQ3Oc}?_x*b@DlxBV^By3i0#fa{c z@I(>sFjBHlkdXeIz#9V9~o2Z1L6w!n36 zn3`MREini*D{7iBY^0(nY|KOHgzbGrFK7MHIN8Y(1N?QUDc34wf>JrahX*~C#zV8#*)Kj~S zPG8&vZTAPl;fCOmiAQXlhvJpYr1rAJk?vbK?YgyAz6aH&OlMTu`ZG1o8zP6$MooIl zU%%yE#9sAC=Y~PK%e%34(0g#}J-z;k2_&4*ILJQ5$Zz-h&UWi;EXi~}O1I2YU}(IrSH5wQEjyaX4c?mI|&dvvp#vphB!I0T|~gthuy z`FYEI%fk_;>V`K%>s2CW#+xHM#g?Nv2?~3cQFKX{XmNDJllC&N@c0qy=Y|M^)2gEm zRl%&+!|@Xa&;YnseLEyIln3YIU2f>{yW31TNY=xNwiUHZPCq4hS@s| z&u>WPo9zYn&9B}=sn%Qf=h1vC1JxoNSa_J6BVL57IC~Aps4SKSr#Vx4O1qZS5mjYu zQqyw=dX3(krfOc48W5X_S;u}-FWDqo0gkrR;g`-NjVnl!C$U2^41$l=_7 zV~1Vaz8=0@?)qAw>(EjicoRPi?^({$zi;6o7lwZ|=@MrEUoVG#LTx@fQUIPxo#h`j;fmrVp9j1XrCk__xhPrZ_$ zNLvZ4kLem_P-7q?bUE0z0`!?d+T5zR6}AaJVu?%lcjr|SUFJ+Ao@D0O&HD3n0b-#7 zsDkEZe~T>J!yoRljki1s{EX-W(%ue1y4a=@Vj?+!tFAgho}Mf|rk_5Yumuv23@{kl z83Q|;)|puob{qmM${59bUm+z?r`VIZL{)!Cj+>&Tf}j_-GDWuVr9~Fojf;Qr!h`RQ zCZ9IwFy%49pjkNNO@{>y@(Z zlcS58-%E`tq$Ms!rTlb0AyK(v5kduXq18Sj-IGwVLl$OOlLiVDDkE zttr#2jTka-=9|}?f_CO2Uyfg6({~1+c$J+s<9f_^&>gXJ5E_R{4Db}~Xn>E*5A?hU zr^qF4{rarqH(xR`DFQ8YSfPOZQl870!|$;>8N`d#{>RT ziIvFV4U%X`LdRoqPA~HxxQ%W7b}aFTEN5W$Q<4(PWP}`v*kWGU7qMFI?%=t#{jFP1 zBCJLES2lF+evuu1hPL;X+iagsWz{cb!Dxob%NUr713nZdOuukKXTmAIy-MxE$(r@W zu~jaoCd@sZEk@Up1gBKL%g?MM+ieTfpme<4T&b_Q zK44aT^7tL?m&3aKDW>>Tndn!EL@h^T;8!$^OLlP~-%WFire`g-DH<`NE+GYuio#Tl zpm?Ow$T&Bl>SKV7;S8?>K#?A)v?Xx?JJQ)BOz_#O<{ZFzs=`7!E8f!ox8drlUR)M} z`PsDbDRQeNGyCN8`;_rGOnMOuuZ111R;~OWUoC{tT+8~P2Hl_iPI;ct!PfGuu$#nZYZuZ;&B)BYoB$uf4yA8z#DdS7UYZTR|yT&J)%e$w}&qg*JG8- z^@=w0hgTCh!Ufdl+wZR-bwDwZPUO?GuLMP*KTY?x@{q{VR31-0=r)@>H%VxShxd-8 zDv6J+u}r;5oLO(XsX0<~Jgieo|q|d)zfYmBR zJO+pfg+v}o4Zpq6Jzwo*?FFkHsUgzWQ%Z;_&UEJqubhV-x~|T=ky^ zC;NZjvDo8H?)*5nG;z`w^prAooA_1Bvr8k5vQgfiw&xZzS;Z9npxh#TP|M92b6|ME zjhby=uOj8xe9e(obM)Z8)P7JufdR4^x6QBvGB@k`(qb~5&3Y?cy2)u84v=K5j(tr^ zIwvx^t>GhEgdUdxt(VQ)ia*lFyIDp_lutBpR@MIz!5(72g$vMBXY%p)5k}>P)j*VF zCzXV_2b6aIpqRm`zCe~p%7O-F7(z$pqfYj8YAmGjzG7f`a{ZP@;)ObFDau#XOVovv z4qOkkXu7P~u@5S7MyojM_7C##BX~D|FvR4BgP)S!xwFQ1P!2D~K?UB)Zr~!`3wAR8 zZYz&Jdk&Ar%HhY+k*5%_wq7~@6ntF~CV8_x5i2{etGUaK!FqT1bhJIaJ+@bZhp{~g?%3rG4Z9WTJ$hAha8?WiWE;6 z#n{a!4kMqO=ifN+_ln6W?SWIZ*KfE?({CQR(PJ(=| zfF&3@|CFJWNYi$$RAS_*nfu(Xy&bh9@NFLCp6wLH|kMWHjTGx?YPgZQ~-f^bOeCp`LCbCqzndoSG>^w1D3ju184-&KBC zz`mt}N)GNdS5DJ6nV|WEgA!au2lP43VAF0CN1-A>e?mka85EGru5uA1C&qLhv_SYuD_RkiA=aPC|OjJ zqe-RV50##j+w4AdARbbH)t2C6QBE8Xa;y&J#r#WqCFx-9-U7iPJ0grd(8P6jQ!rDu zcBbZqxBB^p^+FgESl`uP^gh&%I_M3R?Pq`F{y;_l8jpIoD zVi)HRr<3|)xWv3L0s`I=7nLwK-zkeqY{VCK4;t?h33%KsW6|g7DJNWu??MceXi(Oh z2it8O7~w*&s!~WNh!V&=v-w5PHjv){TJ4S+u4jYH>s-%su5&isve|b^d=Ahxz4rkY z6EnRlsFw9)8lAY1THm6?GfBlMQC9_X`IhC)xY=%U@4cdMt(W&AJLWU zat}y)$GJ|{y_E|NhrV;0Bs=f9{DI}lueY825C?w1`blP7x>JOER0)ypvh0=%kYO_@ z03}Y$NcY|IL!hffu#{yHy30jF9^EM-PjX#GRqv)SmoctvfebEuQ}NLlY;-A7?o1SP_ao{n08TCZG@#DXj zpUs+r$88m3f11C+I0|t`C0|1kV8tibB)ct>ODottNIYi|>DEMhuf5-T0bi>XO7#J2 z=u*9=m0=C!CWj@;ISq1$O%%}_@)X0YfVmzWKqc(y@A=A}NtSIaPK}{F`~x4c(`8dl zz8Sl>EKh&rqV7=vUAz29}FLaZ=r&(^Hu>&zDI^J2xHAo0iJOo>*haG+}H(B#D6 z`VZbL#8%bb<}KEcEdCj;tJ(uhHA|GF7B4-aD%R&9s$k!LoIs-sD_Ht{vlPo8x z&b&^jmL_yR``uG0#k!?)-`(>>HllY5P`&UTjS_Fv(0jbccuRn#TXK=dYUO)T|_EJk|a7Q|?RQQE5Ayu7cHw z+ISn~c+)9Gg&x&u{8P*oV#8feh(Wq%qG_Gv6UkCXO7f8__&p7=86z3D<#&gy_qSn8 z;uCarM89=q7rY(7hkI|5-FZ!%k3S&B;`Mt~TsPAcD(LML$+(+j;&U6)dF6N5J6vN| zdb_g(>+hf|k?mRak3F-keP2AewePfx$QSzVnwx2-etpH!P^%1Cf*YK|jOr+kKl z+y`ykyqQHz9}rO%6!`W2VOI|jujki^q-I=2-ca-4H5G|GCH%u}y_hI?VXf{C8ksr$ z;WJPq!C-iAxi;O8+{&OIH1v#Ha$ypg(qLM zQ)#TA{=nRREYE}KKyf+P<(|&!ev}|j_;w{F3Ds54_Gv7UG?Vc*J>hjFgJ7}^SH-2w*ZQp}L1X2WOL;y&{6Wu&1${5XY$bv@@Tl{PF~e zK0SZ_!z^w4r69KcX7cQ8I2}uf<`h`_(Z%>(Dh$^-kfdHNW~ln2DPcDJY6HxaUw`(O zIg2EalTylet8Omsw)Npp*M#!MwMpZX*HW{0cuI?zNM`3RONP$*+L)G*MWo(P&{~0W zIC=fZ?ai~kV7i~@y>atJOB;fT2wKwP8c99S34N2=JG9H=?WEGzjIV6QFn4d2HVF-m2V0_ZYe<>=QoHWDHyO=}! zyB_*?nSj1jW|GlqovmAZbvZ%I?Y4OD{wKy$Acbwm@jr6%#CfZ#Aw}aBrhjQLzau|x z?mo4FSo}j9VV9*ZPX+^yIJC)RD|vr;>3`k)vbMuDq6lsTCLA4_m^bp5Ojwj^T&9K+ z66%nZ@sC?cbq>5~R>>7y&5be8x9VwmADO{*Wl> zgh;nEV~XsW9F~ZPJ=mii?$+^-y4q%n_))R+ty$Yatwfj}aQ*??WOA@DD!4oO34=AA z+FZW{nul&*&RyvG9`NDu11oCAUt!j~j2?xvg^$X{s}l~F1%yxmlXAgouprecj;UJo z5#6cDs!Sd|gg=dFRA$}dqL>D{UbE7eiJgRp&BBjdIQhfSLcN8E)6C_hw-NCmeIAru zB`QT{H`pm~s5&T)~~sM|Y&$j9=|yUGDONM0t*kdJ$Aduh^2 z*3dt0NfKle$G`~v^~bMjwEC{umcI1JpMHko75H*2d8?+~E6Po9G!Z?(h9XYy@p)GwWK$MWvJ$EYnQ!T$lH<{e94X1`CYG zN}IlD+7aQo3OVRWc4A@7%Ez6aYiAV(kli9g2w_>mm7SGUD%spBp)kTQ$|5qWD@B)gXhf(;k6fh3WMLxbMA`Z^ zybhlv9K;TrAb%TN1;F~|Pvip~fk%4+NODu%*+9!{l(|(_V%6QfWQGVruqlYsd{2v7 z-f#~QOZBVk&IYnH1nId8Yk(se^Bes1vc3GVjA0Hj2E|@B>fH+xcSKjIL+$zRCH}A zMd=>Rmp3TSLLM9syh|i~Cu7#?c$%Y@k!v%p*mxVVF^(eAc@9C7!M9!ZqNZK8VYO0f zNiz&D2#(lGq!C!^w2#D~isk##ZZ=Vz*h&cXou(b7j9O)r?~XmdxZ{nJqo*u+(#FFV zx8k(bKBm1tj9tJ)NcUWpZ!IMlu`^fcapJq4wbxeKNl?>;X;9N|)oh)rXyy4q`n)ef z$Kwf!cPJAq0TtlZY#@$VC7v(ts%uNc4JWP@16Be5>gjk}S`sH13Oz0(FeyzlSU=!c zGPuspraltgM{{zi8U1@`8nLCE^73b=#>e_u#4;w*@$#Dy#gChx93wnI-hnr9gS&M^ zn)bM}()>e0DJ(=M$HedCSjvV-)H=tRaCH*ptN>P8SzS2Vpuzx)zcZ>znYTw}WwgjP z(mXlM9{dY(z8kmOwSK}5c}U%t)(-?vgK3=XREoGIlop0LJVqW8hm#eI&FHQ`cs&*IvQ7tl+D4X=ajk+JRRctCT_x(k{Lob2{t=7{QX^{;op14E^J}Yx2rYNTI>oTj zXr$HyGvV}ryGs5JdmkB2Tz@nlL?0{}htPN)1lnr~230c}>sxW$+l4PC!JQS-+m*S^ z`0^Z7;8Nv(`L-CGxbs3jY;Nf{yLS<-Y2bfPGI8*R2qTIOuRva$HpG`tpZ;x^MaA|C zUGlMACBE$&#(v^cV=cAeSjS_@V2V#!;$?7i!?>1;60{N=5KX7M3f3<1&Ais)WcC## zJBsuiwhY4=zk12@_fD__HPI)8eCtVLPO4X>pOHi4$KuiT08bjaQzv^19Bf)9S&*(u z{;d>|vI;glG+bo`FP@0BIBM3PLR)tJL_%D9=7Y|odu|p>5TL3xUsk+x7P@sCElhRW zCaI}H2#rwQ`#3{U3Nht%gQkK{cVVeXjyR4LT@;tpyA-Bte5v#C!IB__UzA{LKS6BI zIC;uEem$WpPS6yj1y`Q9P>Fi=yKc`VRuU79N6bf@llCn`Mc~mKcg|dpIKp7jFC;sN z$e!R9j3PHeM~)Bm)<*iDxR%BLyID8#rqGp1_8_XglzkybD7XC%IUW8ut(*7NnXINL z(JLP^?4w(V`TFQZH;!vdAX(;6v~r^=)cPl^FnvbrnDz1dc6CKwqwtyxp+ck*V&QN0 zig`KA-74)}N;n_GS{CcFPpaz1pD!I+yIxZPxQRQo-ZhcA;ge$qFvagN-eR4)9ZdIC z8#|W>Uo&p(A7<}CA|h$@o$N)_8ko|N*w?lZD_;7L zL^_V;`J0&42RUVuqr8@h<}o39soEOn{Hw~Kik)gOrC{zz>5glTqh+#0oML(p_o}e# zkk04}bf#*0e?G%w*yIDcV6E3-Q)XJrOyqhLU%N=zgxwFxvt5P!WHoe^UiiwjtjmT2 zp<^KrJBUilQrr0msU4{XiBPV0t}_-%S@`zj)`G!d!=lW@D0=0Hu%L6ny~AlgptgMs z^18vF0;*ExPC1kZJ#P#-LewaE?ri4wLn3QoJX!OGR*oNlqWWc<@OtQ+RL_PKkwl*b zQ*t{~;g7wR6huE>iDTZjrITX04V<|JR-ssx(Ps`ptRZMa!TNzKxrEgth7vc1Ti6ur zrCHPwU-ByRgujDF3LKz;SEhjy=4nN7v@oHOc)Ol|+wrBr5qlSi9QgCQl-Vgcr#TIc zH6KN(d1C{!^KVX!3|@rSd*2s>A5EP8u9rPCtiURLj))#J_6s2x_0g{!EriA2lo(R9 z@YZnApPNp(eTB1g%YCT2`B6JppMZ+~Dj|V&k`j;Mk<7tJ+zxVQZ5}kAH(d89-ye%b z?(9lJtdjucGcSvOA*36H(N*9Z?30Dg{HbxXaN z9PN;ZBO-`jc3=za1L;ry`b5Xc{7-yKP9^5Wy@wN5@}eQsz!wT$)Ke6RuwQUi8evyT zOZuWmy5E+>Y)t=_EA%{1iY9+vfRapV{DeyR}L!G*2XVHct>M7ZZuM6z@+K$ z5z~gorWp?Jk>v1R|L)Qd#eyEYu^xoF{b}@1h&#iR-IfAuh8JHQW|B|eQfroc-2r=e zi;sHWzH}8DHrojM=(ufeusBR&pT+$@0b3EI?h`Pp(gWx~KQ?R&RvF>*1k4&8bPh4< z&N=UT^L%=c_<{6u&yUWPqO+5)5#uFf_etM>}nPKj}1! zyEi&&sVbtp61)~jCYzk7oE{f5j9agV&J{;JG%nIx{ager!c!g>f>VK(+QMl-OVq=_ zW8hdIiIbV~mz)ER0?Jo}*ahlspz}0PdZ9iXuc|0)Hf&tYa5fxP+1bcwcK|;C01%Z) zL_t)mX&wybu+f9WtzCL9aAM<)r*+NhIFWBK2W2*p*m>1qeY(aVc`5 zz9S;h?CMs)>Y--o$LXAibW-^e5sMA-_)&3MiNbX)P9mbvcvF483Jw7`fj0oProx+v z@=>{xc=>U6BVEi-#CI;(8ED2G$Bvx82J8a1iQJkEDs4D!!}_ale+_8HdNYtnZ^cuX z>ZYU4Ww(vN3Si+lCy4a78S8XyJR#GWdZ;4vDOZ)krj_wpihJjdWprM*GCcolPafXA z;Z~4>+efiw$|(hg+9VU_@kK#3Um9>9B9@2T4yYXNbZvnNNG4#noim3{>N&{m7kR1M zqHMNpst2+iAr76=w>2tUl}Y)Da>b_;Il@C9nKMFlcFTEHcdwFQDvZzh5NnD2ICG&^ zm$^PlxmKd^W_HtH5y^iF{1}K7M?t%yyeBKaQC=!@8OVTs(ANZt*GyEWu#b(CnMgnc zcpf+q+yHh4B9dl#O;>-Ft!s(ff&n0DmX`?d32-40iArR9iF%Ty+X=V)9|7+Nx-6h@ zm8IkQ;0Pe{d^?a>+VE`ruk0Wj>xW~yHXhGymR$BrFiY7r&Bk?%L{PbAwB$d@+bU~A zj`C|f5UNe!CrXu|#B6b`(NgLvZo0hI*X9tKF&@tjFyTC@X`u=ruF6Q*1K{rQp73=y zy#-PUZ(I!DIdn|#5BR#^`-bv-U67@wR8Uh^ThnwB=}5wNr7P^U=yRjvYlA2DuIcYF za=HsT-%GnYz%JtS|Dbz{dX}kYVeHiGP8F|wIoAyP`1q6tR^zsFir17`VV9ir$Vl*5 z@3QW%yH`0qEH&Rm~_^V+bKi5i9jN^u&A;#-=7aBl=eQ0AYxJ~4*<>(&ZCLlsH62a+oZdPuQa1p|VKzaWXXm)ljh!ZKNPOScR-pd&cKGMtTrTpYp`qzSk!Li^JpuER`?4cVmj{@1{N}&3gE;y^A z`1*mnJ@RQ}W9g(HrQN{qf$4&C?xc|-b5dHM*$K0hXAWNW?KLv~KCAO?-r6rHy``0EH)W#XP7rjOku89KIa zE3VM#+b>G18$qFq6G*96>|96fwuo@kBf*mThlL<>;Pbd!EK!&pie3DJNECDO13 zIigZfCapfI^(e2o9K~j~-?QYZyEfV#G6J^%J8GZD=q(%cs&@Rr6T5dLOdb|b%LI>h6~-Ob|9 zm;X$3QG419b0+yrR@Zj(j>n6;>+?@Vde^t7;#nWHdex_$&Uq_crjuXOG%9G`L?onv zx%%K`zv$=NB7=Oqio#{w*5b5uwlA}#SdtqY9%&!M`(jw)%2e9>Ti@XDGrW;pg5c7l zqdjXEgY?$$)Vqkk8=o^+kC~REUzXz@JM}!k>9Kuhgv4cf1)uVIhr4?J$_KJG=98#p z2#IaQBGv4uY+`N~8lB@`)KXO+*y1M9ZQKX>3ZI+)scz;(xlDQpSbC2~1q=(@HBH)XD6Zw^~$FN41uoTQ(1k z1ZOWhe(Z<^B*dnwb^E#$3NOFqr>Y;R_4T`(!}!~FU2h#(kU;QQSFFRLa|)QUGyL3Tm%knd%etJd-O<< zi^H2d#1RpST=o26cm()OUVG zZ@3_9qy~?K5dXo_R+Le_j@D8cmlBQ$hqj)_b9aeFfFhF7Vo=u65uzx@J+Mq2?J*V0 zT&OkQ*@+bfk`be@1&?}if$=)wl~J0-AmtKX25Ee<_Kw0jU6FYiBTlw zLr4j{RTs^yM3%xhp%{_XK4+K`0WnqEa%imI&8B#=%GXR*I`j{ZymxpkxPxn-BJGeE z6<~SUvChv3Qr=ENT|{vD;-2=140!FX!ecd+Leb}K6RZ$Wg^<7%FX85hmjBd_r8J*$ z;D>>1y|?uaj6SMslshx~HFY}2js-dvXl;RcCq=2FS=k?WoM!n~2XAE(_rYaaI)*Y` zVsvCBLUIx(lE}-EncQyH7Vjxb8 zt8&BJ4~_a)^mixNe8CZM$?|aN?2qXXCo+YZzm>~zQlhjx8>Elrr}u(%Q>AOH=sljO z1g}}+#F1ZV7VcgEr0Rv+DW za(Bx6oFa`C1HER*lSPq!mVcP6_OFH6u{LyE=X!rTkEN4SgvznV+&ax}Js#ISulBP3 zUj3gPenK-`5xOPq96J{1SYT=vn4EB?mfv(@2X-BA&94mo7>d~jLj2uw?))-qlzt+| z*4G*xjfpORQ5r{^jX9;{KaMgl=lN``fS-S&BSewOcxsnKe?*87jkDA8(}$1Yv#Yy~ z6@vSjHKqyW%{#s$qb2ot-qo~KhlQyY@mZw&B^KW77^QJ96moZ&_+`EJ$W5Cvcgc_H zKXK$gI)oUSvh%lNftgxBOtNT1%RXIeltQaM-hU+Xwqv-~xNL++XL!s;vmfazf@-XH z)faX#cXZ^S4?1Z!=xK(S<*gVtfB5{evxYt`(qnejl6YN?1L5m{D-ZX&muGrUfDjMw z%kpZUK4|4R{m_l4ZsKBk#Qhq%TgTJYR?SuuY>uHa^p?vNUOI8avVrhDaC?WlQPv&5 zjs-dvXx{>@hhqEr%vwx)xLX;He4=NAQYm#LgtIT?xF*Et@hqZ(+=gNtrN36QniYn1 zaX#WGJa~B9#U_qk4}?%)Ki*m|1pk7%j$h-8Ll=@%k(EajRX)uH0%9R}_o>Eqo>Bl;x+6{#@Be zA8){YuC*=w+bg;gt7C!Zt_9j3qVs4YMz_93r|t6HwXvZ?`qtsel|9*6dQV90)qPz) z&pyM9kfex0(?4TBsi`o%;UegrXN3=!f)P0CmP9hRcr@8_>H1&z_lRbQloQ@MnL_yp z3SZAVYdb(9TTy`x@m>RWmS+j|I-h20V-V2O93O&d{=$UumU5;1+vk0CY^AA6-qjhO z`AzkxS*^D9-#H@3fMxYix_?(Tx_&oowiD$1Vm6_5#HP8U$hi!Aa|jubAo$Z0iHiT+ z35TLy2Q0INz3=@VBD}mSQNFzAlu&ol*8E@)y(1k9bSyB77SK>!^dfT9h7H@D&pXTE z(zL2`a0p+l^ZJ&h66x-IxsvRvBugc~INHC?&}y5>S!5&GXyaCK)2xWD=e?M%!R&gd zGL>GdvpriSlF3)1^2>Q`g$Km_4cPQy4d#W~OD+YpxkVmMT#fs2UTk@QS99)S+W1HI zu>EuRoFpNL?t*K3vHCiNp`E`S3v?_nTNYRnq!_PkmDobh3MA3=2FX<7#><$l+h%b} z-;BJ_Df`UWnweCol&+)`<>lGcdQ&FXs2(EQ-I<9J-m;!5uOl4`bSyAC7FcqmSYK^N zM4I!R;K(BkKHgoulgF&h2J6e6>|At@B1mDvgJ zSfFEpnOk7Vlj6)xFt0gq*%OIVb<_e2N#OHpflgr^3v?{du|UTH9Sd|U(6K> (@jieyu) +* Saad Ali <> (@saad-ali) +* James DeFelice <> (@jdef) +* + +## Notational Conventions + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as described in [RFC 2119](http://tools.ietf.org/html/rfc2119) (Bradner, S., "Key words for use in RFCs to Indicate Requirement Levels", BCP 14, RFC 2119, March 1997). + +The key words "unspecified", "undefined", and "implementation-defined" are to be interpreted as described in the [rationale for the C99 standard](http://www.open-std.org/jtc1/sc22/wg14/www/C99RationaleV5.10.pdf#page=18). + +An implementation is not compliant if it fails to satisfy one or more of the MUST, REQUIRED, or SHALL requirements for the protocols it implements. +An implementation is compliant if it satisfies all the MUST, REQUIRED, and SHALL requirements for the protocols it implements. + +## Terminology + +| Term | Definition | +|-------------------|--------------------------------------------------| +| Volume | A unit of storage that will be made available inside of a CO-managed container, via the CSI. | +| Block Volume | A volume that will appear as a block device inside the container. | +| Mounted Volume | A volume that will be mounted using the specified file system and appear as a directory inside the container. | +| CO | Container Orchestration system, communicates with Plugins using CSI service RPCs. | +| SP | Storage Provider, the vendor of a CSI plugin implementation. | +| RPC | [Remote Procedure Call](https://en.wikipedia.org/wiki/Remote_procedure_call). | +| Node | A host where the user workload will be running, uniquely identifiable from the perspective of a Plugin by a `NodeID`. | +| Plugin | Aka “plugin implementation”, a gRPC endpoint that implements the CSI Services. | +| Plugin Supervisor | Process that governs the lifecycle of a Plugin, MAY be the CO. | +| Workload | The atomic unit of "work" scheduled by a CO. This may be a container or a collection of containers. | + +## Objective + +To define an industry standard “Container Storage Interface” (CSI) that will enable storage vendors (SP) to develop a plugin once and have it work across a number of container orchestration (CO) systems. + +### Goals in MVP + +The Container Storage Interface (CSI) will + +* Enable SP authors to write one CSI compliant Plugin that “just works” across all COs that implement CSI. +* Define API (RPCs) that enable: + * Dynamic provisioning and deprovisioning of a volume. + * Attaching or detaching a volume from a node. + * Mounting/unmounting a volume from a node. + * Consumption of both block and mountable volumes. + * Local storage providers (e.g., device mapper, lvm). +* Define plugin protocol RECOMMENDATIONS. + * Describe a process by which a Supervisor configures a Plugin. + * Container deployment considerations (`CAP_SYS_ADMIN`, mount namespace, etc.). + +### Non-Goals in MVP + +The Container Storage Interface (CSI) explicitly will not define, provide, or dictate in v0.1: + +* Specific mechanisms by which a Plugin Supervisor manages the lifecycle of a Plugin, including: + * How to maintain state (e.g. what is attached, mounted, etc.). + * How to deploy, install, upgrade, uninstall, monitor, or respawn (in case of unexpected termination) Plugins. +* A first class message structure/field to represent "grades of storage" (aka "storage class"). +* Protocol-level authentication and authorization. +* Packaging of a Plugin. +* POSIX compliance: CSI provides no guarantee that volumes provided are POSIX compliant filesystems. + Compliance is determined by the Plugin implementation (and any backend storage system(s) upon which it depends). + CSI SHALL NOT obstruct a Plugin Supervisor or CO from interacting with Plugin-managed volumes in a POSIX-compliant manner. + +## Solution Overview + +This specification defines an interface along with the minimum operational and packaging recommendations for a storage provider (SP) to implement a CSI compatible plugin. +The interface declares the RPCs that a plugin must expose: this is the **primary focus** of the CSI specification. +Any operational and packaging recommendations offer additional guidance to promote cross-CO compatibility. + +### Architecture + +The primary focus of this specification is on the **protocol** between a CO and a Plugin. +It SHOULD be possible to ship cross-CO compatible Plugins for a variety of deployment architectures. +A CO should be equipped to handle both centralized and headless plugins, as well as split-component and unified plugins. +Several of these possibilities are illustrated in the following figures. + +``` + CO "Master" Host ++-------------------------------------------+ +| | +| +------------+ +------------+ | +| | CO | gRPC | Controller | | +| | +-----------> Plugin | | +| +------------+ +------------+ | +| | ++-------------------------------------------+ + + CO "Node" Host(s) ++-------------------------------------------+ +| | +| +------------+ +------------+ | +| | CO | gRPC | Node | | +| | +-----------> Plugin | | +| +------------+ +------------+ | +| | ++-------------------------------------------+ + +Figure 1: The Plugin runs on all nodes in the cluster: a centralized +Controller Plugin is available on the CO master host and the Node +Plugin is available on all of the CO Nodes. +``` + +``` + CO "Node" Host(s) ++-------------------------------------------+ +| | +| +------------+ +------------+ | +| | CO | gRPC | Controller | | +| | +--+--------> Plugin | | +| +------------+ | +------------+ | +| | | +| | | +| | +------------+ | +| | | Node | | +| +--------> Plugin | | +| +------------+ | +| | ++-------------------------------------------+ + +Figure 2: Headless Plugin deployment, only the CO Node hosts run +Plugins. Separate, split-component Plugins supply the Controller +Service and the Node Service respectively. +``` + +``` + CO "Node" Host(s) ++-------------------------------------------+ +| | +| +------------+ +------------+ | +| | CO | gRPC | Controller | | +| | +-----------> Node | | +| +------------+ | Plugin | | +| +------------+ | +| | ++-------------------------------------------+ + +Figure 3: Headless Plugin deployment, only the CO Node hosts run +Plugins. A unified Plugin component supplies both the Controller +Service and Node Service. +``` + +### Volume Lifecycle + +``` + CreateVolume +------------+ DeleteVolume + +------------->| CREATED +--------------+ + | +---+----+---+ | + | Controller | | Controller v ++++ Publish | | Unpublish +++ +|X| Volume | | Volume | | ++-+ +---v----+---+ +-+ + | NODE_READY | + +---+----^---+ + Node | | Node + Publish | | Unpublish + Volume | | Volume + +---v----+---+ + | PUBLISHED | + +------------+ + +Figure 4: The lifecycle of a dynamically provisioned volume, from +creation to destruction. +``` + +``` + Controller Controller + Publish Unpublish + Volume +------------+ Volume + +------------->+ NODE_READY +--------------+ + | +---+----^---+ | + | Node | | Node v ++++ Publish | | Unpublish +++ +|X| <-+ Volume | | Volume | | ++++ | +---v----+---+ +-+ + | | | PUBLISHED | + | | +------------+ + +----+ + Validate + Volume + Capabilities + +Figure 5: The lifecycle of a pre-provisioned volume that requires +controller to publish to a node (`ControllerPublishVolume`) prior to +publishing on the node (`NodePublishVolume`). +``` + +``` + +-+ +-+ + |X| | | + +++ +^+ + | | + Node | | Node +Publish | | Unpublish + Volume | | Volume + +---v----+---+ + | PUBLISHED | + +------------+ + +Figure 6: Plugins may forego other lifecycle steps by contraindicating +them via the capabilities API. Interactions with the volumes of such +plugins is reduced to `NodePublishVolume` and `NodeUnpublishVolume` +calls. +``` + +The above diagrams illustrate a general expectation with respect to how a CO MAY manage the lifecycle of a volume via the API presented in this specification. +Plugins should expose all RPCs for an interface: Controller plugins should implement all RPCs for the `Controller` service. +Unsupported RPCs should return an appropriate error code that indicates such (e.g. `CALL_NOT_IMPLEMENTED`). +The full list of plugin capabilities is documented in the `ControllerGetCapabilities` and `NodeGetCapabilities` RPCs. + +## Container Storage Interface + +This section describes the interface between COs and Plugins. + +### RPC Interface + +A CO interacts with an Plugin through RPCs. +Each SP MUST provide: + +* **Node Plugin**: A gRPC endpoint serving CSI RPCs that MUST be run on the Node whereupon an SP-provisioned volume will be published. +* **Controller Plugin**: A gRPC endpoint serving CSI RPCs that MAY be run anywhere. +* In some circumstances a single gRPC endpoint MAY serve all CSI RPCs (see Figure 3 in [Architecture](#architecture)). + +```protobuf +syntax = "proto3"; +package csi; +``` + +There are three sets of RPCs: + +* **Identity Service**: Both the Node Plugin and the Controller Plugin MUST implement this sets of RPCs. +* **Controller Service**: The Controller Plugin MUST implement this sets of RPCs. +* **Node Service**: The Node Plugin MUST implement this sets of RPCs. + +```protobuf +service Identity { + rpc GetSupportedVersions (GetSupportedVersionsRequest) + returns (GetSupportedVersionsResponse) {} + + rpc GetPluginInfo(GetPluginInfoRequest) + returns (GetPluginInfoResponse) {} +} + +service Controller { + rpc CreateVolume (CreateVolumeRequest) + returns (CreateVolumeResponse) {} + + rpc DeleteVolume (DeleteVolumeRequest) + returns (DeleteVolumeResponse) {} + + rpc ControllerPublishVolume (ControllerPublishVolumeRequest) + returns (ControllerPublishVolumeResponse) {} + + rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) + returns (ControllerUnpublishVolumeResponse) {} + + rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) + returns (ValidateVolumeCapabilitiesResponse) {} + + rpc ListVolumes (ListVolumesRequest) + returns (ListVolumesResponse) {} + + rpc GetCapacity (GetCapacityRequest) + returns (GetCapacityResponse) {} + + rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) + returns (ControllerGetCapabilitiesResponse) {} +} + +service Node { + rpc NodePublishVolume (NodePublishVolumeRequest) + returns (NodePublishVolumeResponse) {} + + rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) + returns (NodeUnpublishVolumeResponse) {} + + rpc GetNodeID (GetNodeIDRequest) + returns (GetNodeIDResponse) {} + + rpc ProbeNode (ProbeNodeRequest) + returns (ProbeNodeResponse) {} + + rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) + returns (NodeGetCapabilitiesResponse) {} +} +``` + +In general the Cluster Orchestrator (CO) is responsible for ensuring that there is no more than one call “in-flight” per volume at a given time. +However, in some circumstances, the CO may lose state (for example when the CO crashes and restarts), and may issue multiple calls simultaneously for the same volume. +The plugin should handle this as gracefully as possible. +The error code `OPERATION_PENDING_FOR_VOLUME` may be returned by the plugin in this case (see general error code section for details). + +### Identity Service RPC + +Identity service RPCs allow a CO to negotiate an API protocol version that MAY be used for subsequent RPCs across all CSI services with respect to a particular CSI plugin. +The general flow of the success case is as follows (protos illustrated in YAML for brevity): + +1. CO queries supported versions via Identity RPC. The CO is expected to gracefully handle, in the manner of its own choosing, the case wherein the returned `supported_versions` from the plugin are not supported by the CO. + +``` + # CO --(GetSupportedVersions)--> Plugin + request: {} + response: + result: + supported_versions: + - major: 0 + minor: 1 + patch: 0 +``` + +2. CO queries metadata via Identity RPC, using a supported API protocol version (as per the reply from the prior step): the requested `version` MUST match an entry from the aforementioned `supported_versions` array. + +``` + # CO --(GetPluginInfo)--> Plugin + request: + version: + major: 0 + minor: 1 + patch: 0 + response: + result: + name: org.foo.whizbang/super-plugin + vendor_version: blue-green + manifest: + baz: qaz +``` + +#### `GetSupportedVersions` + +A Plugin SHALL reply with a list of supported CSI versions. +The initial version of the CSI specification is 0.1.0 (in *major.minor.patch* format). +A CO MAY execute plugin RPCs in the manner prescribed by any such supported CSI version. +The versions returned by this call are orthogonal to any vendor-specific version metadata (see `vendor_version` in `GetPluginInfoResponse`). + +NOTE: Changes to this RPC should be approached very conservatively since the request/response protobufs here are critical for proper client-server version negotiation. +Future changes to this RPC MUST **guarantee** backwards compatibility. + +```protobuf +message GetSupportedVersionsRequest { +} + +message GetSupportedVersionsResponse { + message Result { + // All the CSI versions that the Plugin supports. This field is + // REQUIRED. + repeated Version supported_versions = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a version in Semantic Version 2.0 format. +// (http://semver.org/spec/v2.0.0.html) +message Version { + uint32 major = 1; // This field is REQUIRED. + uint32 minor = 2; // This field is REQUIRED. + uint32 patch = 3; // This field is REQUIRED. +} +``` + +#### `GetPluginInfo` + +```protobuf +message GetPluginInfoRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message GetPluginInfoResponse { + message Result { + // This field is REQUIRED. + string name = 1; + + // This field is REQUIRED. Value of this field is opaque to the CO. + string vendor_version = 2; + + // This field is OPTIONAL. Values are opaque to the CO. + map manifest = 3; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +### Controller Service RPC + +#### `CreateVolume` + +A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_VOLUME` controller capability. +This RPC will be called by the CO to provision a new volume on behalf of a user (to be consumed as either a block device or a mounted filesystem). +This operation MUST be idempotent. + +```protobuf +message CreateVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + string name = 2; + + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange capacity_range = 3; + + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + repeated VolumeCapability volume_capabilities = 4; + + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + map parameters = 5; + + // End user credentials used to authenticate/authorize volume creation + // request. + // This field is OPTIONAL. + Credentials user_credentials = 6; +} + +message CreateVolumeResponse { + message Result { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identifying the volume. This field is REQUIRED. + VolumeInfo volume_info = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specify a capability of a volume. +message VolumeCapability { + // Indicate that the volume will be accessed via the block device API. + message BlockVolume { + // Intentionally empty, for now. + } + + // Indicate that the volume will be accessed via the filesystem API. + message MountVolume { + // The filesystem type. This field is OPTIONAL. + string fs_type = 1; + + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. + repeated string mount_flags = 2; + } + + // Specify how a volume can be accessed. + message AccessMode { + enum Mode { + UNKNOWN = 0; + + // Can be published as read/write at one node at a time. + SINGLE_NODE_WRITER = 1; + + // Can be published as readonly at one node at a time. + SINGLE_NODE_READER_ONLY = 2; + + // Can be published as readonly at multiple nodes simultaneously. + MULTI_NODE_READER_ONLY = 3; + + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + MULTI_NODE_SINGLE_WRITER = 4; + + // Can be published as read/write at multiple nodes + // simultaneously. + MULTI_NODE_MULTI_WRITER = 5; + } + + // This field is REQUIRED. + Mode mode = 1; + } + + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + oneof access_type { + BlockVolume block = 1; + MountVolume mount = 2; + } + + // This is a REQUIRED field. + AccessMode access_mode = 3; +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +message CapacityRange { + // Volume must be at least this big. + uint64 required_bytes = 1; + + // Volume must not be bigger than this. + uint64 limit_bytes = 2; +} + +// The information about a provisioned volume. +message VolumeInfo { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set, it indicates that the capacity of the volume is unknown (e.g., + // NFS share). If set, it MUST be non-zero. + uint64 capacity_bytes = 1; + + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + VolumeHandle handle = 2; +} + +// VolumeHandle objects are generated by Plugin implementations to +// serve two purposes: first, to efficiently identify a volume known +// to the Plugin implementation; second, to capture additional, opaque, +// possibly sensitive information associated with a volume. It SHALL +// NOT change over the lifetime of the volume and MAY be safely cached +// by the CO. +// Since this object will be passed around by the CO, it is RECOMMENDED +// that each Plugin keeps the information herein as small as possible. +// The total bytes of a serialized VolumeHandle must be less than 1 MiB. +message VolumeHandle { + // ID is the identity of the provisioned volume specified by the + // Plugin. This field is REQUIRED. + // This information SHALL NOT be considered sensitive such that, for + // example, the CO MAY generate log messages that include this data. + string id = 1; + + // Metadata captures additional, possibly sensitive, information about + // a volume in the form of key-value pairs. This field is OPTIONAL. + // Since this field MAY contain sensitive information, the CO MUST NOT + // leak this information to untrusted entities. + map metadata = 2; +} + +// A standard way to encode credential data. The total bytes of the +// values in the Data field must be less than 1 Mebibyte. +message Credentials { + // Data contains the credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is REQUIRED. + map data = 1; +} +``` + +#### `DeleteVolume` + +A Controller Plugin MUST implement this RPC call if it has `CREATE_DELETE_VOLUME` capability. +This RPC will be called by the CO to deprovision a volume. +If successful, the storage space associated with the volume MUST be released and all the data in the volume SHALL NOT be accessible anymore. + +This operation MUST be idempotent. +This operation SHOULD be best effort in the sense that if the Plugin is certain that the volume as well as the artifacts associated with the volume do not exist anymore, it SHOULD return a success. + +```protobuf +message DeleteVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // End user credentials used to authenticate/authorize volume deletion + // request. + // This field is OPTIONAL. + Credentials user_credentials = 3; +} + +message DeleteVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `ControllerPublishVolume` + +A Controller Plugin MUST implement this RPC call if it has `PUBLISH_UNPUBLISH_VOLUME` controller capability. +This RPC will be called by the CO when it wants to place a workload that uses the volume onto a node. +The Plugin SHOULD perform the work that is necessary for making the volume available on the given node. +The Plugin MUST NOT assume that this RPC will be executed on the node where the volume will be used. + +This operation MUST be idempotent. +If the operation failed or the CO does not know if the operation has failed or not, it MAY choose to call `ControllerPublishVolume` again or choose to call `ControllerUnpublishVolume`. + +The CO MAY call this RPC for publishing a volume to multiple nodes if the volume has `MULTI_NODE` capability (i.e., `MULTI_NODE_READER_ONLY`, `MULTI_NODE_SINGLE_WRITER` or `MULTI_NODE_MULTI_WRITER`). + +```protobuf +message ControllerPublishVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume to be used on a node. + // This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + NodeID node_id = 3; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 4; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 5; + + // End user credentials used to authenticate/authorize controller + // publish request. + // This field is OPTIONAL. + Credentials user_credentials = 6; +} + +message ControllerPublishVolumeResponse { + message Result { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodePublishVolume` call for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + map publish_volume_info = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +message NodeID { + // Information about a node in the form of key-value pairs. This + // information is opaque to the CO. Given this information will be + // passed around by the CO, it is RECOMMENDED that each Plugin keeps + // this information as small as possible. This field is REQUIRED. + map values = 1; +} +``` + +#### `ControllerUnpublishVolume` + +Controller Plugin MUST implement this RPC call if it has `PUBLISH_UNPUBLISH_VOLUME` controller capability. +This RPC is a reverse operation of `ControllerPublishVolume`. +It MUST be called after `NodeUnpublishVolume` on the volume is called and succeeds. +The Plugin SHOULD perform the work that is necessary for making the volume ready to be consumed by a different node. +The Plugin MUST NOT assume that this RPC will be executed on the node where the volume was previously used. + +This RPC is typically called by the CO when the workload using the volume is being moved to a different node, or all the workload using the volume on a node has finished. + +This operation MUST be idempotent. +If this operation failed, or the CO does not know if the operation failed or not, it can choose to call `ControllerUnpublishVolume` again. + +```protobuf +message ControllerUnpublishVolumeRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // The handle of the volume. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The ID of the node. This field is OPTIONAL. The CO SHALL set (or + // clear) this field to match the `NodeID` returned by `GetNodeID`. + // `GetNodeID` is allowed to omit `NodeID` from a successful `Result`; + // in such cases the CO SHALL NOT specify this field. + // + // If `GetNodeID` does not omit `NodeID` from a successful `Result`, + // the CO MAY omit this field as well, indicating that it does not + // know which node the volume was previously used. The Plugin SHOULD + // return an Error if this is not supported. + NodeID node_id = 3; + + // End user credentials used to authenticate/authorize controller + // unpublish request. + // This field is OPTIONAL. + Credentials user_credentials = 4; +} + +message ControllerUnpublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `ValidateVolumeCapabilities` + +A Controller Plugin MUST implement this RPC call. +This RPC will be called by the CO to check if a pre-provisioned volume has all the capabilities that the CO wants. +This RPC call SHALL return `supported` only if all the volume capabilities specified in the request are supported. +This operation MUST be idempotent. + +```protobuf +message ValidateVolumeCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The information about the volume to check. This is a REQUIRED + // field. + VolumeInfo volume_info = 2; + + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + repeated VolumeCapability volume_capabilities = 3; +} + +message ValidateVolumeCapabilitiesResponse { + message Result { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + bool supported = 1; + + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + string message = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `ListVolumes` + +A Controller Plugin MUST implement this RPC call if it has `LIST_VOLUMES` capability. +The Plugin SHALL return the information about all the volumes that it knows about. + +```protobuf +message ListVolumesRequest { + // The API version assumed by the CO. This field is REQUIRED. + Version version = 1; + + // If specified, the Plugin MUST NOT return more entries than this + // number in the response. If the actual number of entries is more + // than this number, the Plugin MUST set `next_token` in the response + // which can be used to get the next page of entries in the subsequent + // `ListVolumes` call. This field is OPTIONAL. If not specified, it + // means there is no restriction on the number of entries that can be + // returned. + uint32 max_entries = 2; + + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + string starting_token = 3; +} + +message ListVolumesResponse { + message Result { + message Entry { + VolumeInfo volume_info = 1; + } + + repeated Entry entries = 1; + + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + string next_token = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `GetCapacity` + +A Controller Plugin MUST implement this RPC call if it has `GET_CAPACITY` controller capability. +The RPC allows the CO to query the capacity of the storage pool from which the controller provisions volumes. + +```protobuf +message GetCapacityRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + repeated VolumeCapability volume_capabilities = 2; + + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + map parameters = 3; +} + +message GetCapacityResponse { + message Result { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + uint64 available_capacity = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `ControllerGetCapabilities` + +A Controller Plugin MUST implement this RPC call. This RPC allows the CO to check the supported capabilities of controller service provided by the Plugin. + +```protobuf +message ControllerGetCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message ControllerGetCapabilitiesResponse { + message Result { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + repeated ControllerServiceCapability capabilities = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a capability of the controller service. +message ControllerServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + CREATE_DELETE_VOLUME = 1; + PUBLISH_UNPUBLISH_VOLUME = 2; + LIST_VOLUMES = 3; + GET_CAPACITY = 4; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +``` + +### Node Service RPC + +#### `NodePublishVolume` + +This RPC is called by the CO when a workload that wants to use the specified volume is placed (scheduled) on a node. +The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. +This RPC MAY be called by the CO multiple times on the same node for the same volume with possibly different `target_path` and/or auth credentials. +If the corresponding Controller Plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability, the CO MUST guarantee that this RPC is called after `ControllerPublishVolume` is called for the given volume on the given node and returns a success. + +This operation MUST be idempotent. +If this RPC failed, or the CO does not know if it failed or not, it MAY choose to call `NodePublishVolume` again, or choose to call `NodeUnpublishVolume`. + +```protobuf +message NodePublishVolumeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The handle of the volume to publish. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + map publish_volume_info = 3; + + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // This is a REQUIRED field. + string target_path = 4; + + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability volume_capability = 5; + + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + bool readonly = 6; + + // End user credentials used to authenticate/authorize node publish + // request. This field is OPTIONAL. + Credentials user_credentials = 7; +} + +message NodePublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `NodeUnpublishVolume` + +A Node Plugin MUST implement this RPC call. +This RPC is a reverse operation of `NodePublishVolume`. +This RPC MUST undo the work by the corresponding `NodePublishVolume`. +This RPC SHALL be called by the CO at least once for each `target_path` that was successfully setup via `NodePublishVolume`. +If the corresponding Controller Plugin has `PUBLISH_UNPUBLISH_VOLUME` controller capability, the CO SHOULD issue all `NodeUnpublishVolume` (as specified above) before calling `ControllerUnpublishVolume` for the given node and the given volume. +The Plugin SHALL assume that this RPC will be executed on the node where the volume is being used. + +This RPC is typically called by the CO when the workload using the volume is being moved to a different node, or all the workload using the volume on a node has finished. + +This operation MUST be idempotent. +If this RPC failed, or the CO does not know if it failed or not, it can choose to call `NodeUnpublishVolume` again. + +```protobuf +message NodeUnpublishVolumeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; + + // The handle of the volume. This field is REQUIRED. + VolumeHandle volume_handle = 2; + + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + string target_path = 3; + + // End user credentials used to authenticate/authorize node unpublish + // request. This field is OPTIONAL. + Credentials user_credentials = 4; +} + +message NodeUnpublishVolumeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `GetNodeID` + +A Node Plugin MUST implement this RPC call. +The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. +The CO SHOULD call this RPC for the node at which it wants to place the workload. +The result of this call will be used by CO in `ControllerPublishVolume`. + +```protobuf +message GetNodeIDRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message GetNodeIDResponse { + message Result { + // The ID of the node which SHALL be used by CO in + // `ControllerPublishVolume`. This is an OPTIONAL field. If unset, + // the CO SHALL leave the `node_id` field unset in + // `ControllerPublishVolume`. + NodeID node_id = 1; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `ProbeNode` + +A Node Plugin MUST implement this RPC call. +The Plugin SHALL assume that this RPC will be executed on the node where the volume will be used. +The CO SHOULD call this RPC for the node at which it wants to place the workload. +This RPC allows the CO to probe the readiness of the Plugin on the node where the volumes will be used. +The Plugin SHOULD verify if it has everything it needs (binaries, kernel module, drivers, etc.) to run on that node, and return a success if the validation succeeds. +The CO MAY use this RPC to probe which machines can support specific Plugins and schedule workloads accordingly. + +```protobuf +message ProbeNodeRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message ProbeNodeResponse { + message Result {} + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} +``` + +#### `NodeGetCapabilities` + +A Node Plugin MUST implement this RPC call. +This RPC allows the CO to check the supported capabilities of node service provided by the Plugin. + +```protobuf +message NodeGetCapabilitiesRequest { + // The API version assumed by the CO. This is a REQUIRED field. + Version version = 1; +} + +message NodeGetCapabilitiesResponse { + message Result { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + repeated NodeServiceCapability capabilities = 2; + } + + // One of the following fields MUST be specified. + oneof reply { + Result result = 1; + Error error = 2; + } +} + +// Specifies a capability of the node service. +message NodeServiceCapability { + message RPC { + enum Type { + UNKNOWN = 0; + } + + Type type = 1; + } + + oneof type { + // RPC that the controller supports. + RPC rpc = 1; + } +} +``` + +### Error Type/Codes + +Standard error types that Plugins MUST reply with. +Allows CO to implement smarter error handling and fault resolution. + +```protobuf +message Error { + // General Error that MAY be returned by any RPC. + message GeneralError { + enum GeneralErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GeneralErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that an undefined error occurred. More human-readable + // information MAY be provided in the `error_description` field. + // The `caller_must_not_retry` field MUST be set appropriately by + // the Plugin to provide callers expected recovery behavior. + // + // Recovery behavior: Caller MAY retry (with exponential backoff), + // if `caller_must_not_retry` is set to false. Otherwise, the + // caller MUST not reissue the same request. + UNDEFINED = 1; + + // Indicates that the version specified in the request is not + // supported by the Plugin. The `caller_must_not_retry` field MUST + // be set to true. + // + // Recovery behavior: Caller MUST NOT retry; caller SHOULD call + // `GetSupportedVersions` to discover which CSI versions the + // Plugin supports. + UNSUPPORTED_REQUEST_VERSION = 2; + + // Indicates that a required field is missing from the request. + // More human-readable information MAY be provided in the + // `error_description` field. The `caller_must_not_retry` field + // MUST be set to true. + // + // Recovery behavior: Caller MUST fix the request by adding the + // missing required field before retrying. + MISSING_REQUIRED_FIELD = 3; + } + + // Machine parsable error code. + GeneralErrorCode error_code = 1; + + // When set to true, `caller_must_not_retry` indicates that the + // caller MUST not retry the same call again. This MAY be because + // the call is deemed invalid by the Plugin and no amount of retries + // will cause it to succeed. If this value is false, the caller MAY + // reissue the same call, but SHOULD implement exponential backoff + // on retires. + bool caller_must_not_retry = 2; + + // Human readable description of error, possibly with additional + // information. This string MAY be surfaced by CO to end users. + string error_description = 3; + } + + // `CreateVolume` specific error. + message CreateVolumeError { + enum CreateVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `CreateVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified volume name is not allowed by the + // Plugin. More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the name before retrying. + INVALID_VOLUME_NAME = 3; + + // Indicates that the capacity range is not allowed by the Plugin. + // More human-readable information MAY be provided in the + // `error_description` field. + // + // Recovery behavior: Caller MUST fix the capacity range before // + // retrying. + UNSUPPORTED_CAPACITY_RANGE = 4; + + // Indicates that a volume corresponding to the specified volume + // name already exists. + // + // Recovery behavior: Caller MAY assume the `CreateVolume` + // call succeeded. + VOLUME_ALREADY_EXISTS = 5; + + // Indicates that a key in the opaque key/value parameters field + // is not supported by the Plugin. More human-readable information + // MAY be provided in the `error_description` field. This MAY + // occur, for example, due to caller error, Plugin version skew, + // etc. + // + // Recovery behavior: Caller MUST remove the unsupported key/value + // pair from the list of parameters before retrying. + UNSUPPORTED_PARAMETER_KEY = 6; + + // Indicates that a value in one of the opaque key/value pairs + // parameter contains invalid data. More human-readable + // information (such as the corresponding key) MAY be provided in + // the `error_description` field. + // + // Recovery behavior: Caller MUST fix the invalid value before + // retrying. + INVALID_PARAMETER_VALUE = 7; + } + + // Machine parsable error code. + CreateVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + } + + // `DeleteVolume` specific error. + message DeleteVolumeError { + enum DeleteVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `DeleteVolumeErrorCode` code that an older CSI client is not + // aware of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD assume the `DeleteVolume` call + // succeeded. + VOLUME_DOES_NOT_EXIST = 4; + } + + // Machine parsable error code. + DeleteVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + } + + // `ControllerPublishVolume` specific error. + message ControllerPublishVolumeError { + enum ControllerPublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerPublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 4; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to another node and does not + // support multi-node attach. If this error code is returned, the + // Plugin MUST also specify the `node_id` of the node the volume + // is already attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from the other node. Caller + // SHOULD ensure the specified volume is not attached to any other + // node before retrying with exponential back off. + VOLUME_ALREADY_PUBLISHED = 5; + + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying with exponential backoff. + NODE_DOES_NOT_EXIST = 6; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` is already attached to the maximum supported + // number of nodes and therefore this operation can not be + // completed until the volume is detached from at least one of the + // existing nodes. When this error code is returned, the Plugin + // MUST also specify the `NodeId` of all the nodes the volume is + // attached to. + // + // Recovery behavior: Caller MAY use the provided `node_ids` + // information to detach the volume from one other node before + // retrying with exponential backoff. + MAX_ATTACHED_NODES = 7; + + UNSUPPORTED_MOUNT_FLAGS = 9; + UNSUPPORTED_VOLUME_TYPE = 10; + UNSUPPORTED_FS_TYPE = 11; + + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin, or the Plugin does not support the + // operation without a `NodeID`. More human-readable information + // MAY be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + INVALID_NODE_ID = 8; + } + + // Machine parsable error code. + ControllerPublishVolumeErrorCode error_code = 1; + + // Human readable description of error, possibly with additional + // information. This string maybe surfaced by CO to end users. + string error_description = 2; + + // On `VOLUME_ALREADY_ATTACHED` and `MAX_ATTACHED_NODES` errors, + // this field contains the node(s) that the specified volume is + // already attached to. + repeated NodeID node_ids = 3; + } + + // `ControllerUnpublishVolume` specific error. + message ControllerUnpublishVolumeError { + enum ControllerUnpublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ControllerUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that the call is either not implemented by the Plugin + // or disabled in the Plugin’s current mode of operation. + // + // Recovery behavior: Caller MUST not retry; caller MAY call + // `ControllerGetCapabilities` or `NodeGetCapabilities` to + // discover Plugin capabilities. + CALL_NOT_IMPLEMENTED = 1; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 2; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 3; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 4; + + // Indicates that a node corresponding to the specified `NodeID` + // does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `NodeID` is + // correct and that the node is available and has not been + // terminated or deleted before retrying. + NODE_DOES_NOT_EXIST = 5; + + // Indicates that the specified `NodeID` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `NodeID` before + // retrying. + INVALID_NODE_ID = 6; + + VOLUME_NOT_ATTACHED_TO_SPECIFIED_NODE = 7; + + // Indicates that the Plugin does not support the operation + // without a `NodeID`. + // + // Recovery behavior: Caller MUST specify the `NodeID` before + // retrying. + NODE_ID_REQUIRED = 8; + } + + ControllerUnpublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `ValidateVolumeCapabilities` specific error. + message ValidateVolumeCapabilitiesError { + enum ValidateVolumeCapabilitiesErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ValidateVolumeCapabilitiesErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that a volume corresponding to the specified + // `VolumeInfo` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeInfo` + // is correct and that the volume is accessable and has not been + // deleted before retrying. + VOLUME_DOES_NOT_EXIST = 1; + + UNSUPPORTED_MOUNT_FLAGS = 2; + UNSUPPORTED_VOLUME_TYPE = 3; + UNSUPPORTED_FS_TYPE = 4; + + // Indicates that the specified `VolumeInfo` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeInfo` before + // retrying. + INVALID_VOLUME_INFO = 5; + } + + ValidateVolumeCapabilitiesErrorCode error_code = 1; + string error_description = 2; + } + + // `NodePublishVolume` specific error. + message NodePublishVolumeError { + enum NodePublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodePublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 1; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 2; + + UNSUPPORTED_MOUNT_FLAGS = 3; + UNSUPPORTED_VOLUME_TYPE = 4; + UNSUPPORTED_FS_TYPE = 5; + MOUNT_ERROR = 6; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 7; + } + + NodePublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `NodeUnpublishVolume` specific error. + message NodeUnpublishVolumeError { + enum NodeUnpublishVolumeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `NodeUnpublishVolumeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + // Indicates that there is a already an operation pending for the + // specified volume. In general the Cluster Orchestrator (CO) is + // responsible for ensuring that there is no more than one call + // “in-flight” per volume at a given time. However, in some + // circumstances, the CO MAY lose state (for example when the CO + // crashes and restarts), and MAY issue multiple calls + // simultaneously for the same volume. The Plugin, SHOULD handle + // this as gracefully as possible, and MAY return this error code + // to reject secondary calls. + // + // Recovery behavior: Caller SHOULD ensure that there are no other + // calls pending for the specified volume, and then retry with + // exponential back off. + OPERATION_PENDING_FOR_VOLUME = 1; + + // Indicates that a volume corresponding to the specified + // `VolumeHandle` does not exist. + // + // Recovery behavior: Caller SHOULD verify that the `VolumeHandle` + // is correct and that the volume is accessible and has not been + // deleted before retrying with exponential back off. + VOLUME_DOES_NOT_EXIST = 2; + + UNMOUNT_ERROR = 3; + + // Indicates that the specified `VolumeHandle` is not allowed or + // understood by the Plugin. More human-readable information MAY + // be provided in the `error_description` field. + // + // Recovery behavior: Caller MUST fix the `VolumeHandle` before + // retrying. + INVALID_VOLUME_HANDLE = 4; + } + + NodeUnpublishVolumeErrorCode error_code = 1; + string error_description = 2; + } + + // `ProbeNode` specific error. + message ProbeNodeError { + enum ProbeNodeErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `ProbeNodeErrorCode` code that an older CSI + // client is not aware of, the client will see this code (the + // default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + BAD_PLUGIN_CONFIG = 1; + MISSING_REQUIRED_HOST_DEPENDENCY = 2; + } + + ProbeNodeErrorCode error_code = 1; + string error_description = 2; + } + + // `GetNodeID` specific error. + message GetNodeIDError { + enum GetNodeIDErrorCode { + // Default value for backwards compatibility. SHOULD NOT be + // returned by Plugins. However, if a Plugin returns a + // `GetNodeIDErrorCode` code that an older CSI client is not aware + // of, the client will see this code (the default fallback). + // + // Recovery behavior: Caller SHOULD consider updating CSI client + // to match Plugin CSI version. + UNKNOWN = 0; + + BAD_PLUGIN_CONFIG = 1; + MISSING_REQUIRED_HOST_DEPENDENCY = 2; + } + + GetNodeIDErrorCode error_code = 1; + string error_description = 2; + } + + // One of the following fields MUST be specified. + oneof value { + GeneralError general_error = 1; + + CreateVolumeError create_volume_error = 2; + DeleteVolumeError delete_volume_error = 3; + ControllerPublishVolumeError controller_publish_volume_error = 4; + + ControllerUnpublishVolumeError + controller_unpublish_volume_error = 5; + + ValidateVolumeCapabilitiesError + validate_volume_capabilities_error = 6; + + NodePublishVolumeError node_publish_volume_error = 7; + NodeUnpublishVolumeError node_unpublish_volume_error = 8; + ProbeNodeError probe_node_error = 9; + GetNodeIDError get_node_id_error = 10; + } +} +``` + +## Protocol + +### Connectivity + +* A CO SHALL communicate with a Plugin using gRPC to access the `Identity`, and (optionally) the `Controller` and `Node` services. + * proto3 SHOULD be used with gRPC, as per the [official recommendations](http://www.grpc.io/docs/guides/#protocol-buffer-versions). + * All Plugins SHALL implement the REQUIRED Identity service RPCs. + Support for OPTIONAL RPCs is reported by the `ControllerGetCapabilities` and `NodeGetCapabilities` RPC calls. +* The CO SHALL provide the listen-address for the Plugin by way of the `CSI_ENDPOINT` environment variable. + Plugin components SHALL create, bind, and listen for RPCs on the specified listen address. + * Only UNIX Domain Sockets may be used as endpoints. + This will likely change in a future version of this specification to support non-UNIX platforms. +* All supported RPC services MUST be available at the listen address of the Plugin. + +### Security + +* The CO operator and Plugin Supervisor SHOULD take steps to ensure that any and all communication between the CO and Plugin Service are secured according to best practices. +* Communication between a CO and a Plugin SHALL be transported over UNIX Domain Sockets. + * gRPC is compatible with UNIX Domain Sockets; it is the responsibility of the CO operator and Plugin Supervisor to properly secure access to the Domain Socket using OS filesystem ACLs and/or other OS-specific security context tooling. + * SP’s supplying stand-alone Plugin controller appliances, or other remote components that are incompatible with UNIX Domain Sockets must provide a software component that proxies communication between a UNIX Domain Socket and the remote component(s). + Proxy components transporting communication over IP networks SHALL be responsible for securing communications over such networks. +* Both the CO and Plugin SHOULD avoid accidental leakage of sensitive information (such as redacting such information from log files). + +### Debugging + +* Debugging and tracing are supported by external, CSI-independent additions and extensions to gRPC APIs, such as [OpenTracing](https://github.com/grpc-ecosystem/grpc-opentracing). + +## Configuration and Operation + +### General Configuration + +* The `CSI_ENDPOINT` environment variable SHALL be supplied to the Plugin by the Plugin Supervisor. +* An operator SHALL configure the CO to connect to the Plugin via the listen address identified by `CSI_ENDPOINT` variable. +* With exception to sensitive data, Plugin configuration SHOULD be specified by environment variables, whenever possible, instead of by command line flags or bind-mounted/injected files. + + +#### Plugin Bootstrap Example + +* Supervisor -> Plugin: `CSI_ENDPOINT=unix://path/to/unix/domain/socket.sock`. +* Operator -> CO: use plugin at endpoint `unix://path/to/unix/domain/socket.sock`. +* CO: monitor `/path/to/unix/domain/socket.sock`. +* Plugin: read `CSI_ENDPOINT`, create UNIX socket at specified path, bind and listen. +* CO: observe that socket now exists, establish connection. +* CO: invoke `GetSupportedVersions`. + +#### Filesystem + +* Plugins SHALL NOT specify requirements that include or otherwise reference directories and/or files on the root filesystem of the CO. +* Plugins SHALL NOT create additional files or directories adjacent to the UNIX socket specified by `CSI_ENDPOINT`; violations of this requirement constitute "abuse". + * The Plugin Supervisor is the ultimate authority of the directory in which the UNIX socket endpoint is created and MAY enforce policies to prevent and/or mitigate abuse of the directory by Plugins. + +### Supervised Lifecycle Management + +* For Plugins packaged in software form: + * Plugin Packages SHOULD use a well-documented container image format (e.g., Docker, OCI). + * The chosen package image format MAY expose configurable Plugin properties as environment variables, unless otherwise indicated in the section below. + Variables so exposed SHOULD be assigned default values in the image manifest. + * A Plugin Supervisor MAY programmatically evaluate or otherwise scan a Plugin Package’s image manifest in order to discover configurable environment variables. + * A Plugin SHALL NOT assume that an operator or Plugin Supervisor will scan an image manifest for environment variables. + +#### Environment Variables + +* Variables defined by this specification SHALL be identifiable by their `CSI_` name prefix. +* Configuration properties not defined by the CSI specification SHALL NOT use the same `CSI_` name prefix; this prefix is reserved for common configuration properties defined by the CSI specification. +* The Plugin Supervisor SHOULD supply all recommended CSI environment variables to a Plugin. +* The Plugin Supervisor SHALL supply all required CSI environment variables to a Plugin. + +##### `CSI_ENDPOINT` + +Network endpoint at which a Plugin SHALL host CSI RPC services. The general format is: + + {scheme}://{authority}{endpoint} + +The following address types SHALL be supported by Plugins: + + unix://path/to/unix/socket.sock + +Note: All UNIX endpoints SHALL end with `.sock`. See [gRPC Name Resolution](https://github.com/grpc/grpc/blob/master/doc/naming.md). + +This variable is REQUIRED. + +#### Operational Recommendations + +The Plugin Supervisor expects that a Plugin SHALL act as a long-running service vs. an on-demand, CLI-driven process. + +Supervised plugins MAY be isolated and/or resource-bounded. + +##### Logging + +* Plugins SHOULD generate log messages to ONLY standard output and/or standard error. + * In this case the Plugin Supervisor SHALL assume responsibility for all log lifecycle management. +* Plugin implementations that deviate from the above recommendation SHALL clearly and unambiguously document the following: + * Logging configuration flags and/or variables, including working sample configurations. + * Default log destination(s) (where do the logs go if no configuration is specified?) + * Log lifecycle management ownership and related guidance (size limits, rate limits, rolling, archiving, expunging, etc.) applicable to the logging mechanism embedded within the Plugin. +* Plugins SHOULD NOT write potentially sensitive data to logs (e.g. `Credentials`, `VolumeHandle.Metadata`). + +##### Available Services + +* Plugin Packages MAY support all or a subset of CSI services; service combinations MAY be configurable at runtime by the Plugin Supervisor. +* Misconfigured plugin software SHOULD fail-fast with an OS-appropriate error code. + +##### Linux Capabilities + +* Plugin Supervisor SHALL guarantee that plugins will have `CAP_SYS_ADMIN` capability on Linux when running on Nodes. +* Plugins SHOULD clearly document any additionally required capabilities and/or security context. + +##### Namespaces + +* A Plugin SHOULD NOT assume that it is in the same [Linux namespaces](https://en.wikipedia.org/wiki/Linux_namespaces) as the Plugin Supervisor. + The CO MUST clearly document the [mount propagation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) requirements for Node Plugins and the Plugin Supervisor SHALL satisfy the CO’s requirements. + +##### Cgroup Isolation + +* A Plugin MAY be constrained by cgroups. +* An operator or Plugin Supervisor MAY configure the devices cgroup subsystem to ensure that a Plugin may access requisite devices. +* A Plugin Supervisor MAY define resource limits for a Plugin. + +##### Resource Requirements + +* SPs SHOULD unambiguously document all of a Plugin’s resource requirements. diff --git a/vendor/github.com/golang/mock/.gitignore b/vendor/github.com/golang/mock/.gitignore new file mode 100644 index 000000000..4eb2f7950 --- /dev/null +++ b/vendor/github.com/golang/mock/.gitignore @@ -0,0 +1,17 @@ +# Object files and binaries from go. +*.[568] + +# Library files. +*.a + +# Any file prefixed by an underscore. +*/_* + +# Vim temporary files. +.*.swp + +# The mockgen binary. +mockgen/mockgen + +# A binary produced by gotest. +#gomock/[568]\.out diff --git a/vendor/github.com/golang/mock/.travis.yml b/vendor/github.com/golang/mock/.travis.yml new file mode 100644 index 000000000..543ce129b --- /dev/null +++ b/vendor/github.com/golang/mock/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + # we intend to support only the latest version and perhaps the previous one + - 1.7 + - 1.8 + +script: + - go build ./... + - go install github.com/golang/mock/mockgen + - ./ci/check_go_fmt.sh + - ./ci/check_go_generate.sh + - go test -v ./... diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS new file mode 100644 index 000000000..660b8ccc8 --- /dev/null +++ b/vendor/github.com/golang/mock/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of GoMock authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alex Reece +Google Inc. diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS new file mode 100644 index 000000000..def849cab --- /dev/null +++ b/vendor/github.com/golang/mock/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute (and typically +# have contributed) code to the gomock repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Aaron Jacobs +Alex Reece +David Symonds +Ryan Barrett diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/github.com/golang/mock/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/golang/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/mock/README.md b/vendor/github.com/golang/mock/README.md new file mode 100644 index 000000000..daf4f975d --- /dev/null +++ b/vendor/github.com/golang/mock/README.md @@ -0,0 +1,86 @@ +gomock [![Build Status](https://travis-ci.org/golang/mock.svg?branch=master)](https://travis-ci.org/golang/mock) +====== + +GoMock is a mocking framework for the [Go programming language][golang]. It +integrates well with Go's built-in `testing` package, but can be used in other +contexts too. + + +Installation +------------ + +Once you have [installed Go][golang-install], run these commands +to install the `gomock` package and the `mockgen` tool: + + go get github.com/golang/mock/gomock + go get github.com/golang/mock/mockgen + + +Documentation +------------- + +After installing, you can use `go doc` to get documentation: + + go doc github.com/golang/mock/gomock + +Alternatively, there is an online reference for the package hosted on GoPkgDoc +[here][gomock-ref]. + + +Running mockgen +--------------- + +`mockgen` has two modes of operation: source and reflect. +Source mode generates mock interfaces from a source file. +It is enabled by using the -source flag. Other flags that +may be useful in this mode are -imports and -aux_files. + +Example: + + mockgen -source=foo.go [other options] + +Reflect mode generates mock interfaces by building a program +that uses reflection to understand interfaces. It is enabled +by passing two non-flag arguments: an import path, and a +comma-separated list of symbols. + +Example: + + mockgen database/sql/driver Conn,Driver + +The `mockgen` command is used to generate source code for a mock +class given a Go source file containing interfaces to be mocked. +It supports the following flags: + + * `-source`: A file containing interfaces to be mocked. + + * `-destination`: A file to which to write the resulting source code. If you + don't set this, the code is printed to standard output. + + * `-package`: The package to use for the resulting mock class + source code. If you don't set this, the package name is `mock_` concatenated + with the package of the input file. + + * `-imports`: A list of explicit imports that should be used in the resulting + source code, specified as a comma-separated list of elements of the form + `foo=bar/baz`, where `bar/baz` is the package being imported and `foo` is + the identifier to use for the package in the generated source code. + + * `-aux_files`: A list of additional files that should be consulted to + resolve e.g. embedded interfaces defined in a different file. This is + specified as a comma-separated list of elements of the form + `foo=bar/baz.go`, where `bar/baz.go` is the source file and `foo` is the + package name of that file used by the -source file. + +* `-build_flags`: (reflect mode only) Flags passed verbatim to `go build`. + +For an example of the use of `mockgen`, see the `sample/` directory. In simple +cases, you will need only the `-source` flag. + + +TODO: Brief overview of how to create mock objects and set up expectations, and +an example. + +[golang]: http://golang.org/ +[golang-install]: http://golang.org/doc/install.html#releases +[gomock-ref]: http://godoc.org/github.com/golang/mock/gomock diff --git a/vendor/github.com/golang/mock/ci/check_go_fmt.sh b/vendor/github.com/golang/mock/ci/check_go_fmt.sh new file mode 100755 index 000000000..ae4028ac2 --- /dev/null +++ b/vendor/github.com/golang/mock/ci/check_go_fmt.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# This script is used by the CI to check if the code is gofmt formatted. + +set -euo pipefail + +GOFMT_DIFF=$(IFS=$'\n'; gofmt -d $( find . -type f -name '*.go' ) ) +if [[ -n "${GOFMT_DIFF}" ]]; then + echo "${GOFMT_DIFF}" + echo + echo "The go source files aren't gofmt formatted." + exit 1 +fi diff --git a/vendor/github.com/golang/mock/ci/check_go_generate.sh b/vendor/github.com/golang/mock/ci/check_go_generate.sh new file mode 100755 index 000000000..a0212e28a --- /dev/null +++ b/vendor/github.com/golang/mock/ci/check_go_generate.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# This script is used by the CI to check if 'go generate ./...' is up to date. +# +# Note: If the generated files aren't up to date then this script updates +# them despite printing an error message so running it the second time +# might not print any errors. This isn't very useful locally during development +# but it works well with the CI that downloads a fresh version of the repo +# each time before executing this script. + +set -euo pipefail + +TEMP_DIR=$( mktemp -d ) +function cleanup() { + rm -rf "${TEMP_DIR}" +} +trap cleanup EXIT + +cp -r . "${TEMP_DIR}/" +go generate ./... +if ! diff -r . "${TEMP_DIR}"; then + echo + echo "The generated files aren't up to date." + echo "Update them with the 'go generate ./...' command." + exit 1 +fi diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/github.com/golang/mock/gomock/call.go new file mode 100644 index 000000000..cc8dfffeb --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/call.go @@ -0,0 +1,258 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" + "strings" +) + +// Call represents an expected call to a mock. +type Call struct { + t TestReporter // for triggering test failures on invalid call setup + + receiver interface{} // the receiver of the method call + method string // the name of the method + methodType reflect.Type // the type of the method + args []Matcher // the args + rets []interface{} // the return values (if any) + + preReqs []*Call // prerequisite calls + + // Expectations + minCalls, maxCalls int + + numCalls int // actual number made + + // Actions + doFunc reflect.Value + setArgs map[int]reflect.Value +} + +// AnyTimes allows the expectation to be called 0 or more times +func (c *Call) AnyTimes() *Call { + c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity + return c +} + +// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also +// sets the maximum number of calls to infinity. +func (c *Call) MinTimes(n int) *Call { + c.minCalls = n + if c.maxCalls == 1 { + c.maxCalls = 1e8 + } + return c +} + +// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also +// sets the minimum number of calls to 0. +func (c *Call) MaxTimes(n int) *Call { + c.maxCalls = n + if c.minCalls == 1 { + c.minCalls = 0 + } + return c +} + +// Do declares the action to run when the call is matched. +// It takes an interface{} argument to support n-arity functions. +func (c *Call) Do(f interface{}) *Call { + // TODO: Check arity and types here, rather than dying badly elsewhere. + c.doFunc = reflect.ValueOf(f) + return c +} + +func (c *Call) Return(rets ...interface{}) *Call { + mt := c.methodType + if len(rets) != mt.NumOut() { + c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d", + c.receiver, c.method, len(rets), mt.NumOut()) + } + for i, ret := range rets { + if got, want := reflect.TypeOf(ret), mt.Out(i); got == want { + // Identical types; nothing to do. + } else if got == nil { + // Nil needs special handling. + switch want.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + // ok + default: + c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable", + i, c.receiver, c.method, want) + } + } else if got.AssignableTo(want) { + // Assignable type relation. Make the assignment now so that the generated code + // can return the values with a type assertion. + v := reflect.New(want).Elem() + v.Set(reflect.ValueOf(ret)) + rets[i] = v.Interface() + } else { + c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v", + i, c.receiver, c.method, got, want) + } + } + + c.rets = rets + return c +} + +func (c *Call) Times(n int) *Call { + c.minCalls, c.maxCalls = n, n + return c +} + +// SetArg declares an action that will set the nth argument's value, +// indirected through a pointer. +func (c *Call) SetArg(n int, value interface{}) *Call { + if c.setArgs == nil { + c.setArgs = make(map[int]reflect.Value) + } + mt := c.methodType + // TODO: This will break on variadic methods. + // We will need to check those at invocation time. + if n < 0 || n >= mt.NumIn() { + c.t.Fatalf("SetArg(%d, ...) called for a method with %d args", n, mt.NumIn()) + } + // Permit setting argument through an interface. + // In the interface case, we don't (nay, can't) check the type here. + at := mt.In(n) + switch at.Kind() { + case reflect.Ptr: + dt := at.Elem() + if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) { + c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v", n, vt, dt) + } + case reflect.Interface: + // nothing to do + default: + c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface type %v", n, at) + } + c.setArgs[n] = reflect.ValueOf(value) + return c +} + +// isPreReq returns true if other is a direct or indirect prerequisite to c. +func (c *Call) isPreReq(other *Call) bool { + for _, preReq := range c.preReqs { + if other == preReq || preReq.isPreReq(other) { + return true + } + } + return false +} + +// After declares that the call may only match after preReq has been exhausted. +func (c *Call) After(preReq *Call) *Call { + if c == preReq { + c.t.Fatalf("A call isn't allowed to be it's own prerequisite") + } + if preReq.isPreReq(c) { + c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq) + } + + c.preReqs = append(c.preReqs, preReq) + return c +} + +// Returns true iff the minimum number of calls have been made. +func (c *Call) satisfied() bool { + return c.numCalls >= c.minCalls +} + +// Returns true iff the maximum number of calls have been made. +func (c *Call) exhausted() bool { + return c.numCalls >= c.maxCalls +} + +func (c *Call) String() string { + args := make([]string, len(c.args)) + for i, arg := range c.args { + args[i] = arg.String() + } + arguments := strings.Join(args, ", ") + return fmt.Sprintf("%T.%v(%s)", c.receiver, c.method, arguments) +} + +// Tests if the given call matches the expected call. +func (c *Call) matches(args []interface{}) bool { + if len(args) != len(c.args) { + return false + } + for i, m := range c.args { + if !m.Matches(args[i]) { + return false + } + } + + // Check that all prerequisite calls have been satisfied. + for _, preReqCall := range c.preReqs { + if !preReqCall.satisfied() { + return false + } + } + + return true +} + +// dropPrereqs tells the expected Call to not re-check prerequite calls any +// longer, and to return its current set. +func (c *Call) dropPrereqs() (preReqs []*Call) { + preReqs = c.preReqs + c.preReqs = nil + return +} + +func (c *Call) call(args []interface{}) (rets []interface{}, action func()) { + c.numCalls++ + + // Actions + if c.doFunc.IsValid() { + doArgs := make([]reflect.Value, len(args)) + ft := c.doFunc.Type() + for i := 0; i < len(args); i++ { + if args[i] != nil { + doArgs[i] = reflect.ValueOf(args[i]) + } else { + // Use the zero value for the arg. + doArgs[i] = reflect.Zero(ft.In(i)) + } + } + action = func() { c.doFunc.Call(doArgs) } + } + for n, v := range c.setArgs { + reflect.ValueOf(args[n]).Elem().Set(v) + } + + rets = c.rets + if rets == nil { + // Synthesize the zero value for each of the return args' types. + mt := c.methodType + rets = make([]interface{}, mt.NumOut()) + for i := 0; i < mt.NumOut(); i++ { + rets[i] = reflect.Zero(mt.Out(i)).Interface() + } + } + + return +} + +// InOrder declares that the given calls should occur in order. +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].After(calls[i-1]) + } +} diff --git a/vendor/github.com/golang/mock/gomock/call_test.go b/vendor/github.com/golang/mock/gomock/call_test.go new file mode 100644 index 000000000..3ae7263cb --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/call_test.go @@ -0,0 +1,47 @@ +package gomock + +import "testing" + +type mockTestReporter struct { + errorCalls int + fatalCalls int +} + +func (o *mockTestReporter) Errorf(format string, args ...interface{}) { + o.errorCalls++ +} + +func (o *mockTestReporter) Fatalf(format string, args ...interface{}) { + o.fatalCalls++ +} + +func TestCall_After(t *testing.T) { + t.Run("SelfPrereqCallsFatalf", func(t *testing.T) { + tr1 := &mockTestReporter{} + + c := &Call{t: tr1} + c.After(c) + + if tr1.fatalCalls != 1 { + t.Errorf("number of fatal calls == %v, want 1", tr1.fatalCalls) + } + }) + + t.Run("LoopInCallOrderCallsFatalf", func(t *testing.T) { + tr1 := &mockTestReporter{} + tr2 := &mockTestReporter{} + + c1 := &Call{t: tr1} + c2 := &Call{t: tr2} + c1.After(c2) + c2.After(c1) + + if tr1.errorCalls != 0 || tr1.fatalCalls != 0 { + t.Error("unexpected errors") + } + + if tr2.fatalCalls != 1 { + t.Errorf("number of fatal calls == %v, want 1", tr2.fatalCalls) + } + }) +} diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/github.com/golang/mock/gomock/callset.go new file mode 100644 index 000000000..1b7de4c0b --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/callset.go @@ -0,0 +1,76 @@ +// Copyright 2011 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +// callSet represents a set of expected calls, indexed by receiver and method +// name. +type callSet map[interface{}]map[string][]*Call + +// Add adds a new expected call. +func (cs callSet) Add(call *Call) { + methodMap, ok := cs[call.receiver] + if !ok { + methodMap = make(map[string][]*Call) + cs[call.receiver] = methodMap + } + methodMap[call.method] = append(methodMap[call.method], call) +} + +// Remove removes an expected call. +func (cs callSet) Remove(call *Call) { + methodMap, ok := cs[call.receiver] + if !ok { + return + } + sl := methodMap[call.method] + for i, c := range sl { + if c == call { + // quick removal; we don't need to maintain call order + if len(sl) > 1 { + sl[i] = sl[len(sl)-1] + } + methodMap[call.method] = sl[:len(sl)-1] + break + } + } +} + +// FindMatch searches for a matching call. Returns nil if no call matched. +func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) *Call { + methodMap, ok := cs[receiver] + if !ok { + return nil + } + calls, ok := methodMap[method] + if !ok { + return nil + } + + // Search through the unordered set of calls expected on a method on a + // receiver. + for _, call := range calls { + // A call should not normally still be here if exhausted, + // but it can happen if, for instance, .Times(0) was used. + // Pretend the call doesn't match. + if call.exhausted() { + continue + } + if call.matches(args) { + return call + } + } + + return nil +} diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go new file mode 100644 index 000000000..6bff78d1d --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/controller.go @@ -0,0 +1,183 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// GoMock - a mock framework for Go. +// +// Standard usage: +// (1) Define an interface that you wish to mock. +// type MyInterface interface { +// SomeMethod(x int64, y string) +// } +// (2) Use mockgen to generate a mock from the interface. +// (3) Use the mock in a test: +// func TestMyThing(t *testing.T) { +// mockCtrl := gomock.NewController(t) +// defer mockCtrl.Finish() +// +// mockObj := something.NewMockMyInterface(mockCtrl) +// mockObj.EXPECT().SomeMethod(4, "blah") +// // pass mockObj to a real object and play with it. +// } +// +// By default, expected calls are not enforced to run in any particular order. +// Call order dependency can be enforced by use of InOrder and/or Call.After. +// Call.After can create more varied call order dependencies, but InOrder is +// often more convenient. +// +// The following examples create equivalent call order dependencies. +// +// Example of using Call.After to chain expected call order: +// +// firstCall := mockObj.EXPECT().SomeMethod(1, "first") +// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) +// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) +// +// Example of using InOrder to declare expected call order: +// +// gomock.InOrder( +// mockObj.EXPECT().SomeMethod(1, "first"), +// mockObj.EXPECT().SomeMethod(2, "second"), +// mockObj.EXPECT().SomeMethod(3, "third"), +// ) +// +// TODO: +// - Handle different argument/return types (e.g. ..., chan, map, interface). +package gomock + +import ( + "fmt" + "reflect" + "sync" +) + +// A TestReporter is something that can be used to report test failures. +// It is satisfied by the standard library's *testing.T. +type TestReporter interface { + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) +} + +// A Controller represents the top-level control of a mock ecosystem. +// It defines the scope and lifetime of mock objects, as well as their expectations. +// It is safe to call Controller's methods from multiple goroutines. +type Controller struct { + mu sync.Mutex + t TestReporter + expectedCalls callSet +} + +func NewController(t TestReporter) *Controller { + return &Controller{ + t: t, + expectedCalls: make(callSet), + } +} + +func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { + recv := reflect.ValueOf(receiver) + for i := 0; i < recv.Type().NumMethod(); i++ { + if recv.Type().Method(i).Name == method { + return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...) + } + } + ctrl.t.Fatalf("gomock: failed finding method %s on %T", method, receiver) + // In case t.Fatalf does not panic. + panic(fmt.Sprintf("gomock: failed finding method %s on %T", method, receiver)) +} + +func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { + // TODO: check arity, types. + margs := make([]Matcher, len(args)) + for i, arg := range args { + if m, ok := arg.(Matcher); ok { + margs[i] = m + } else if arg == nil { + // Handle nil specially so that passing a nil interface value + // will match the typed nils of concrete args. + margs[i] = Nil() + } else { + margs[i] = Eq(arg) + } + } + + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + call := &Call{t: ctrl.t, receiver: receiver, method: method, methodType: methodType, args: margs, minCalls: 1, maxCalls: 1} + + ctrl.expectedCalls.Add(call) + return call +} + +func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + expected := ctrl.expectedCalls.FindMatch(receiver, method, args) + if expected == nil { + ctrl.t.Fatalf("no matching expected call: %T.%v(%v)", receiver, method, args) + } + + // Two things happen here: + // * the matching call no longer needs to check prerequite calls, + // * and the prerequite calls are no longer expected, so remove them. + preReqCalls := expected.dropPrereqs() + for _, preReqCall := range preReqCalls { + ctrl.expectedCalls.Remove(preReqCall) + } + + rets, action := expected.call(args) + if expected.exhausted() { + ctrl.expectedCalls.Remove(expected) + } + + // Don't hold the lock while doing the call's action (if any) + // so that actions may execute concurrently. + // We use the deferred Unlock to capture any panics that happen above; + // here we add a deferred Lock to balance it. + ctrl.mu.Unlock() + defer ctrl.mu.Lock() + if action != nil { + action() + } + + return rets +} + +func (ctrl *Controller) Finish() { + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + // If we're currently panicking, probably because this is a deferred call, + // pass through the panic. + if err := recover(); err != nil { + panic(err) + } + + // Check that all remaining expected calls are satisfied. + failures := false + for _, methodMap := range ctrl.expectedCalls { + for _, calls := range methodMap { + for _, call := range calls { + if !call.satisfied() { + ctrl.t.Errorf("missing call(s) to %v", call) + failures = true + } + } + } + } + if failures { + ctrl.t.Fatalf("aborting test due to missing call(s)") + } +} diff --git a/vendor/github.com/golang/mock/gomock/controller_test.go b/vendor/github.com/golang/mock/gomock/controller_test.go new file mode 100644 index 000000000..57f79572c --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/controller_test.go @@ -0,0 +1,475 @@ +// Copyright 2011 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/mock/gomock" +) + +type ErrorReporter struct { + t *testing.T + log []string + failed bool + fatalToken struct{} +} + +func NewErrorReporter(t *testing.T) *ErrorReporter { + return &ErrorReporter{t: t} +} + +func (e *ErrorReporter) reportLog() { + for _, entry := range e.log { + e.t.Log(entry) + } +} + +func (e *ErrorReporter) assertPass(msg string) { + if e.failed { + e.t.Errorf("Expected pass, but got failure(s): %s", msg) + e.reportLog() + } +} + +func (e *ErrorReporter) assertFail(msg string) { + if !e.failed { + e.t.Errorf("Expected failure, but got pass: %s", msg) + } +} + +// Use to check that code triggers a fatal test failure. +func (e *ErrorReporter) assertFatal(fn func()) { + defer func() { + err := recover() + if err == nil { + var actual string + if e.failed { + actual = "non-fatal failure" + } else { + actual = "pass" + } + e.t.Error("Expected fatal failure, but got a", actual) + } else if token, ok := err.(*struct{}); ok && token == &e.fatalToken { + // This is okay - the panic is from Fatalf(). + return + } else { + // Some other panic. + panic(err) + } + }() + + fn() +} + +// recoverUnexpectedFatal can be used as a deferred call in test cases to +// recover from and display a call to ErrorReporter.Fatalf(). +func (e *ErrorReporter) recoverUnexpectedFatal() { + err := recover() + if err == nil { + // No panic. + } else if token, ok := err.(*struct{}); ok && token == &e.fatalToken { + // Unexpected fatal error happened. + e.t.Error("Got unexpected fatal error(s). All errors up to this point:") + e.reportLog() + return + } else { + // Some other panic. + panic(err) + } +} + +func (e *ErrorReporter) Logf(format string, args ...interface{}) { + e.log = append(e.log, fmt.Sprintf(format, args...)) +} + +func (e *ErrorReporter) Errorf(format string, args ...interface{}) { + e.Logf(format, args...) + e.failed = true +} + +func (e *ErrorReporter) Fatalf(format string, args ...interface{}) { + e.Logf(format, args...) + e.failed = true + panic(&e.fatalToken) +} + +// A type purely for use as a receiver in testing the Controller. +type Subject struct{} + +func (s *Subject) FooMethod(arg string) int { + return 0 +} + +func (s *Subject) BarMethod(arg string) int { + return 0 +} + +func assertEqual(t *testing.T, expected interface{}, actual interface{}) { + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %+v, but got %+v", expected, actual) + } +} + +func createFixtures(t *testing.T) (reporter *ErrorReporter, ctrl *gomock.Controller) { + // reporter acts as a testing.T-like object that we pass to the + // Controller. We use it to test that the mock considered tests + // successful or failed. + reporter = NewErrorReporter(t) + ctrl = gomock.NewController(reporter) + return +} + +func TestNoCalls(t *testing.T) { + reporter, ctrl := createFixtures(t) + ctrl.Finish() + reporter.assertPass("No calls expected or made.") +} + +func TestExpectedMethodCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + reporter.assertPass("Expected method call made.") +} + +func TestUnexpectedMethodCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + + ctrl.Finish() +} + +func TestRepeatedCall(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument").Times(3) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertPass("After expected repeated method calls.") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + ctrl.Finish() + reporter.assertFail("After calling one too many times.") +} + +func TestUnexpectedArgCount(t *testing.T) { + reporter, ctrl := createFixtures(t) + defer reporter.recoverUnexpectedFatal() + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + // This call is made with the wrong number of arguments... + ctrl.Call(subject, "FooMethod", "argument", "extra_argument") + }) + reporter.assertFatal(func() { + // ... so is this. + ctrl.Call(subject, "FooMethod") + }) + reporter.assertFatal(func() { + // The expected call wasn't made. + ctrl.Finish() + }) +} + +func TestAnyTimes(t *testing.T) { + reporter, ctrl := createFixtures(t) + subject := new(Subject) + + ctrl.RecordCall(subject, "FooMethod", "argument").AnyTimes() + for i := 0; i < 100; i++ { + ctrl.Call(subject, "FooMethod", "argument") + } + reporter.assertPass("After 100 method calls.") + ctrl.Finish() +} + +func TestMinTimes1(t *testing.T) { + // It fails if there are no calls + reporter, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + reporter.assertFatal(func() { + ctrl.Finish() + }) + + // It succeeds if there is one call + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + // It succeeds if there are many calls + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(1) + for i := 0; i < 100; i++ { + ctrl.Call(subject, "FooMethod", "argument") + } + ctrl.Finish() +} + +func TestMaxTimes1(t *testing.T) { + // It succeeds if there are no calls + _, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Finish() + + // It succeeds if there is one call + _, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() + + //It fails if there are more + reporter, ctrl := createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(1) + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + ctrl.Finish() +} + +func TestMinMaxTimes(t *testing.T) { + // It fails if there are less calls than specified + reporter, ctrl := createFixtures(t) + subject := new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(2).MaxTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Finish() + }) + + // It fails if there are more calls than specified + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MinTimes(2).MaxTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + reporter.assertFatal(func() { + ctrl.Call(subject, "FooMethod", "argument") + }) + + // It succeeds if there is just the right number of calls + reporter, ctrl = createFixtures(t) + subject = new(Subject) + ctrl.RecordCall(subject, "FooMethod", "argument").MaxTimes(2).MinTimes(2) + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Call(subject, "FooMethod", "argument") + ctrl.Finish() +} + +func TestDo(t *testing.T) { + _, ctrl := createFixtures(t) + subject := new(Subject) + + doCalled := false + var argument string + ctrl.RecordCall(subject, "FooMethod", "argument").Do( + func(arg string) { + doCalled = true + argument = arg + }) + if doCalled { + t.Error("Do() callback called too early.") + } + + ctrl.Call(subject, "FooMethod", "argument") + + if !doCalled { + t.Error("Do() callback not called.") + } + if "argument" != argument { + t.Error("Do callback received wrong argument.") + } + + ctrl.Finish() +} + +func TestReturn(t *testing.T) { + _, ctrl := createFixtures(t) + subject := new(Subject) + + // Unspecified return should produce "zero" result. + ctrl.RecordCall(subject, "FooMethod", "zero") + ctrl.RecordCall(subject, "FooMethod", "five").Return(5) + + assertEqual( + t, + []interface{}{0}, + ctrl.Call(subject, "FooMethod", "zero")) + + assertEqual( + t, + []interface{}{5}, + ctrl.Call(subject, "FooMethod", "five")) + ctrl.Finish() +} + +func TestUnorderedCalls(t *testing.T) { + reporter, ctrl := createFixtures(t) + defer reporter.recoverUnexpectedFatal() + subjectTwo := new(Subject) + subjectOne := new(Subject) + + ctrl.RecordCall(subjectOne, "FooMethod", "1") + ctrl.RecordCall(subjectOne, "BarMethod", "2") + ctrl.RecordCall(subjectTwo, "FooMethod", "3") + ctrl.RecordCall(subjectTwo, "BarMethod", "4") + + // Make the calls in a different order, which should be fine. + ctrl.Call(subjectOne, "BarMethod", "2") + ctrl.Call(subjectTwo, "FooMethod", "3") + ctrl.Call(subjectTwo, "BarMethod", "4") + ctrl.Call(subjectOne, "FooMethod", "1") + + reporter.assertPass("After making all calls in different order") + + ctrl.Finish() + + reporter.assertPass("After finish") +} + +func commonTestOrderedCalls(t *testing.T) (reporter *ErrorReporter, ctrl *gomock.Controller, subjectOne, subjectTwo *Subject) { + reporter, ctrl = createFixtures(t) + + subjectOne = new(Subject) + subjectTwo = new(Subject) + + gomock.InOrder( + ctrl.RecordCall(subjectOne, "FooMethod", "1").AnyTimes(), + ctrl.RecordCall(subjectTwo, "FooMethod", "2"), + ctrl.RecordCall(subjectTwo, "BarMethod", "3"), + ) + + return +} + +func TestOrderedCallsCorrect(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + ctrl.Call(subjectOne, "FooMethod", "1") + ctrl.Call(subjectTwo, "FooMethod", "2") + ctrl.Call(subjectTwo, "BarMethod", "3") + + ctrl.Finish() + + reporter.assertPass("After finish") +} + +func TestOrderedCallsInCorrect(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + ctrl.Call(subjectOne, "FooMethod", "1") + reporter.assertFatal(func() { + ctrl.Call(subjectTwo, "BarMethod", "3") + }) +} + +// Test that calls that are prerequites to other calls but have maxCalls > +// minCalls are removed from the expected call set. +func TestOrderedCallsWithPreReqMaxUnbounded(t *testing.T) { + reporter, ctrl, subjectOne, subjectTwo := commonTestOrderedCalls(t) + + // Initially we should be able to call FooMethod("1") as many times as we + // want. + ctrl.Call(subjectOne, "FooMethod", "1") + ctrl.Call(subjectOne, "FooMethod", "1") + + // But calling something that has it as a prerequite should remove it from + // the expected call set. This allows tests to ensure that FooMethod("1") is + // *not* called after FooMethod("2"). + ctrl.Call(subjectTwo, "FooMethod", "2") + + // Therefore this call should fail: + reporter.assertFatal(func() { + ctrl.Call(subjectOne, "FooMethod", "1") + }) +} + +func TestCallAfterLoopPanic(t *testing.T) { + _, ctrl := createFixtures(t) + + subject := new(Subject) + + firstCall := ctrl.RecordCall(subject, "FooMethod", "1") + secondCall := ctrl.RecordCall(subject, "FooMethod", "2") + thirdCall := ctrl.RecordCall(subject, "FooMethod", "3") + + gomock.InOrder(firstCall, secondCall, thirdCall) + + defer func() { + err := recover() + if err == nil { + t.Error("Call.After creation of dependency loop did not panic.") + } + }() + + // This should panic due to dependency loop. + firstCall.After(thirdCall) +} + +func TestPanicOverridesExpectationChecks(t *testing.T) { + ctrl := gomock.NewController(t) + reporter := NewErrorReporter(t) + + reporter.assertFatal(func() { + ctrl.RecordCall(new(Subject), "FooMethod", "1") + defer ctrl.Finish() + reporter.Fatalf("Intentional panic") + }) +} + +func TestSetArgWithBadType(t *testing.T) { + rep, ctrl := createFixtures(t) + defer ctrl.Finish() + + s := new(Subject) + // This should catch a type error: + rep.assertFatal(func() { + ctrl.RecordCall(s, "FooMethod", "1").SetArg(0, "blah") + }) + ctrl.Call(s, "FooMethod", "1") +} + +func TestTimes0(t *testing.T) { + rep, ctrl := createFixtures(t) + defer ctrl.Finish() + + s := new(Subject) + ctrl.RecordCall(s, "FooMethod", "arg").Times(0) + rep.assertFatal(func() { + ctrl.Call(s, "FooMethod", "arg") + }) +} diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go new file mode 100644 index 000000000..e8b1ddccf --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/matchers.go @@ -0,0 +1,99 @@ +//go:generate mockgen -destination mock_matcher/mock_matcher.go github.com/golang/mock/gomock Matcher + +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" +) + +// A Matcher is a representation of a class of values. +// It is used to represent the valid or expected arguments to a mocked method. +type Matcher interface { + // Matches returns whether x is a match. + Matches(x interface{}) bool + + // String describes what the matcher matches. + String() string +} + +type anyMatcher struct{} + +func (anyMatcher) Matches(x interface{}) bool { + return true +} + +func (anyMatcher) String() string { + return "is anything" +} + +type eqMatcher struct { + x interface{} +} + +func (e eqMatcher) Matches(x interface{}) bool { + return reflect.DeepEqual(e.x, x) +} + +func (e eqMatcher) String() string { + return fmt.Sprintf("is equal to %v", e.x) +} + +type nilMatcher struct{} + +func (nilMatcher) Matches(x interface{}) bool { + if x == nil { + return true + } + + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice: + return v.IsNil() + } + + return false +} + +func (nilMatcher) String() string { + return "is nil" +} + +type notMatcher struct { + m Matcher +} + +func (n notMatcher) Matches(x interface{}) bool { + return !n.m.Matches(x) +} + +func (n notMatcher) String() string { + // TODO: Improve this if we add a NotString method to the Matcher interface. + return "not(" + n.m.String() + ")" +} + +// Constructors +func Any() Matcher { return anyMatcher{} } +func Eq(x interface{}) Matcher { return eqMatcher{x} } +func Nil() Matcher { return nilMatcher{} } +func Not(x interface{}) Matcher { + if m, ok := x.(Matcher); ok { + return notMatcher{m} + } + return notMatcher{Eq(x)} +} diff --git a/vendor/github.com/golang/mock/gomock/matchers_test.go b/vendor/github.com/golang/mock/gomock/matchers_test.go new file mode 100644 index 000000000..29b97fb9c --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/matchers_test.go @@ -0,0 +1,70 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock_test + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + mock_matcher "github.com/golang/mock/gomock/mock_matcher" +) + +func TestMatchers(t *testing.T) { + type e interface{} + type testCase struct { + matcher gomock.Matcher + yes, no []e + } + tests := []testCase{ + testCase{gomock.Any(), []e{3, nil, "foo"}, nil}, + testCase{gomock.Eq(4), []e{4}, []e{3, "blah", nil, int64(4)}}, + testCase{gomock.Nil(), + []e{nil, (error)(nil), (chan bool)(nil), (*int)(nil)}, + []e{"", 0, make(chan bool), errors.New("err"), new(int)}}, + testCase{gomock.Not(gomock.Eq(4)), []e{3, "blah", nil, int64(4)}, []e{4}}, + } + for i, test := range tests { + for _, x := range test.yes { + if !test.matcher.Matches(x) { + t.Errorf(`test %d: "%v %s" should be true.`, i, x, test.matcher) + } + } + for _, x := range test.no { + if test.matcher.Matches(x) { + t.Errorf(`test %d: "%v %s" should be false.`, i, x, test.matcher) + } + } + } +} + +// A more thorough test of notMatcher +func TestNotMatcher(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMatcher := mock_matcher.NewMockMatcher(ctrl) + notMatcher := gomock.Not(mockMatcher) + + mockMatcher.EXPECT().Matches(4).Return(true) + if match := notMatcher.Matches(4); match { + t.Errorf("notMatcher should not match 4") + } + + mockMatcher.EXPECT().Matches(5).Return(false) + if match := notMatcher.Matches(5); !match { + t.Errorf("notMatcher should match 5") + } +} diff --git a/vendor/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go b/vendor/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go new file mode 100644 index 000000000..d0bf885fb --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/mock_matcher/mock_matcher.go @@ -0,0 +1,56 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/golang/mock/gomock (interfaces: Matcher) + +package mock_gomock + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockMatcher is a mock of Matcher interface +type MockMatcher struct { + ctrl *gomock.Controller + recorder *MockMatcherMockRecorder +} + +// MockMatcherMockRecorder is the mock recorder for MockMatcher +type MockMatcherMockRecorder struct { + mock *MockMatcher +} + +// NewMockMatcher creates a new mock instance +func NewMockMatcher(ctrl *gomock.Controller) *MockMatcher { + mock := &MockMatcher{ctrl: ctrl} + mock.recorder = &MockMatcherMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (_m *MockMatcher) EXPECT() *MockMatcherMockRecorder { + return _m.recorder +} + +// Matches mocks base method +func (_m *MockMatcher) Matches(_param0 interface{}) bool { + ret := _m.ctrl.Call(_m, "Matches", _param0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Matches indicates an expected call of Matches +func (_mr *MockMatcherMockRecorder) Matches(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Matches", reflect.TypeOf((*MockMatcher)(nil).Matches), arg0) +} + +// String mocks base method +func (_m *MockMatcher) String() string { + ret := _m.ctrl.Call(_m, "String") + ret0, _ := ret[0].(string) + return ret0 +} + +// String indicates an expected call of String +func (_mr *MockMatcherMockRecorder) String() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "String", reflect.TypeOf((*MockMatcher)(nil).String)) +} diff --git a/vendor/github.com/golang/mock/mockgen/mockgen.go b/vendor/github.com/golang/mock/mockgen/mockgen.go new file mode 100644 index 000000000..3b08c1411 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/mockgen.go @@ -0,0 +1,442 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MockGen generates mock implementations of Go interfaces. +package main + +// TODO: This does not support recursive embedded interfaces. +// TODO: This does not support embedding package-local interfaces in a separate file. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "go/token" + "io" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + + "github.com/golang/mock/mockgen/model" +) + +const ( + gomockImportPath = "github.com/golang/mock/gomock" +) + +var ( + source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.") + destination = flag.String("destination", "", "Output file; defaults to stdout.") + packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.") + selfPackage = flag.String("self_package", "", "If set, the package this mock will be part of.") + + debugParser = flag.Bool("debug_parser", false, "Print out parser results only.") +) + +func main() { + flag.Usage = usage + flag.Parse() + + var pkg *model.Package + var err error + if *source != "" { + pkg, err = ParseFile(*source) + } else { + if flag.NArg() != 2 { + usage() + log.Fatal("Expected exactly two arguments") + } + pkg, err = Reflect(flag.Arg(0), strings.Split(flag.Arg(1), ",")) + } + if err != nil { + log.Fatalf("Loading input failed: %v", err) + } + + if *debugParser { + pkg.Print(os.Stdout) + return + } + + dst := os.Stdout + if len(*destination) > 0 { + f, err := os.Create(*destination) + if err != nil { + log.Fatalf("Failed opening destination file: %v", err) + } + defer f.Close() + dst = f + } + + packageName := *packageOut + if packageName == "" { + // pkg.Name in reflect mode is the base name of the import path, + // which might have characters that are illegal to have in package names. + packageName = "mock_" + sanitize(pkg.Name) + } + + g := new(generator) + if *source != "" { + g.filename = *source + } else { + g.srcPackage = flag.Arg(0) + g.srcInterfaces = flag.Arg(1) + } + if err := g.Generate(pkg, packageName); err != nil { + log.Fatalf("Failed generating mock: %v", err) + } + if _, err := dst.Write(g.Output()); err != nil { + log.Fatalf("Failed writing to destination: %v", err) + } +} + +func usage() { + io.WriteString(os.Stderr, usageText) + flag.PrintDefaults() +} + +const usageText = `mockgen has two modes of operation: source and reflect. + +Source mode generates mock interfaces from a source file. +It is enabled by using the -source flag. Other flags that +may be useful in this mode are -imports and -aux_files. +Example: + mockgen -source=foo.go [other options] + +Reflect mode generates mock interfaces by building a program +that uses reflection to understand interfaces. It is enabled +by passing two non-flag arguments: an import path, and a +comma-separated list of symbols. +Example: + mockgen database/sql/driver Conn,Driver + +` + +type generator struct { + buf bytes.Buffer + indent string + + filename string // may be empty + srcPackage, srcInterfaces string // may be empty + + packageMap map[string]string // map from import path to package name +} + +func (g *generator) p(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, g.indent+format+"\n", args...) +} + +func (g *generator) in() { + g.indent += "\t" +} + +func (g *generator) out() { + if len(g.indent) > 0 { + g.indent = g.indent[0 : len(g.indent)-1] + } +} + +func removeDot(s string) string { + if len(s) > 0 && s[len(s)-1] == '.' { + return s[0 : len(s)-1] + } + return s +} + +// sanitize cleans up a string to make a suitable package name. +func sanitize(s string) string { + t := "" + for _, r := range s { + if t == "" { + if unicode.IsLetter(r) || r == '_' { + t += string(r) + continue + } + } else { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + t += string(r) + continue + } + } + t += "_" + } + if t == "_" { + t = "x" + } + return t +} + +func (g *generator) Generate(pkg *model.Package, pkgName string) error { + g.p("// Code generated by MockGen. DO NOT EDIT.") + if g.filename != "" { + g.p("// Source: %v", g.filename) + } else { + g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces) + } + g.p("") + + // Get all required imports, and generate unique names for them all. + im := pkg.Imports() + im[gomockImportPath] = true + im["reflect"] = true + + // Sort keys to make import alias generation predictable + sorted_paths := make([]string, len(im), len(im)) + x := 0 + for pth := range im { + sorted_paths[x] = pth + x++ + } + sort.Strings(sorted_paths) + + g.packageMap = make(map[string]string, len(im)) + localNames := make(map[string]bool, len(im)) + for _, pth := range sorted_paths { + base := sanitize(path.Base(pth)) + + // Local names for an imported package can usually be the basename of the import path. + // A couple of situations don't permit that, such as duplicate local names + // (e.g. importing "html/template" and "text/template"), or where the basename is + // a keyword (e.g. "foo/case"). + // try base0, base1, ... + pkgName := base + i := 0 + for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() { + pkgName = base + strconv.Itoa(i) + i++ + } + + g.packageMap[pth] = pkgName + localNames[pkgName] = true + } + + g.p("package %v", pkgName) + g.p("") + g.p("import (") + g.in() + for path, pkg := range g.packageMap { + if path == *selfPackage { + continue + } + g.p("%v %q", pkg, path) + } + for _, path := range pkg.DotImports { + g.p(". %q", path) + } + g.out() + g.p(")") + + for _, intf := range pkg.Interfaces { + if err := g.GenerateMockInterface(intf); err != nil { + return err + } + } + + return nil +} + +// The name of the mock type to use for the given interface identifier. +func mockName(typeName string) string { + return "Mock" + typeName +} + +func (g *generator) GenerateMockInterface(intf *model.Interface) error { + mockType := mockName(intf.Name) + + g.p("") + g.p("// %v is a mock of %v interface", mockType, intf.Name) + g.p("type %v struct {", mockType) + g.in() + g.p("ctrl *gomock.Controller") + g.p("recorder *%vMockRecorder", mockType) + g.out() + g.p("}") + g.p("") + + g.p("// %vMockRecorder is the mock recorder for %v", mockType, mockType) + g.p("type %vMockRecorder struct {", mockType) + g.in() + g.p("mock *%v", mockType) + g.out() + g.p("}") + g.p("") + + // TODO: Re-enable this if we can import the interface reliably. + //g.p("// Verify that the mock satisfies the interface at compile time.") + //g.p("var _ %v = (*%v)(nil)", typeName, mockType) + //g.p("") + + g.p("// New%v creates a new mock instance", mockType) + g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType) + g.in() + g.p("mock := &%v{ctrl: ctrl}", mockType) + g.p("mock.recorder = &%vMockRecorder{mock}", mockType) + g.p("return mock") + g.out() + g.p("}") + g.p("") + + // XXX: possible name collision here if someone has EXPECT in their interface. + g.p("// EXPECT returns an object that allows the caller to indicate expected use") + g.p("func (_m *%v) EXPECT() *%vMockRecorder {", mockType, mockType) + g.in() + g.p("return _m.recorder") + g.out() + g.p("}") + + g.GenerateMockMethods(mockType, intf, *selfPackage) + + return nil +} + +func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) { + for _, m := range intf.Methods { + g.p("") + g.GenerateMockMethod(mockType, m, pkgOverride) + g.p("") + g.GenerateMockRecorderMethod(mockType, m) + } +} + +// GenerateMockMethod generates a mock method implementation. +// If non-empty, pkgOverride is the package in which unqualified types reside. +func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride string) error { + args := make([]string, len(m.In)) + argNames := make([]string, len(m.In)) + for i, p := range m.In { + name := p.Name + if name == "" { + name = fmt.Sprintf("_param%d", i) + } + ts := p.Type.String(g.packageMap, pkgOverride) + args[i] = name + " " + ts + argNames[i] = name + } + if m.Variadic != nil { + name := m.Variadic.Name + if name == "" { + name = fmt.Sprintf("_param%d", len(m.In)) + } + ts := m.Variadic.Type.String(g.packageMap, pkgOverride) + args = append(args, name+" ..."+ts) + argNames = append(argNames, name) + } + argString := strings.Join(args, ", ") + + rets := make([]string, len(m.Out)) + for i, p := range m.Out { + rets[i] = p.Type.String(g.packageMap, pkgOverride) + } + retString := strings.Join(rets, ", ") + if len(rets) > 1 { + retString = "(" + retString + ")" + } + if retString != "" { + retString = " " + retString + } + + g.p("// %v mocks base method", m.Name) + g.p("func (_m *%v) %v(%v)%v {", mockType, m.Name, argString, retString) + g.in() + + callArgs := strings.Join(argNames, ", ") + if callArgs != "" { + callArgs = ", " + callArgs + } + if m.Variadic != nil { + // Non-trivial. The generated code must build a []interface{}, + // but the variadic argument may be any type. + g.p("_s := []interface{}{%s}", strings.Join(argNames[:len(argNames)-1], ", ")) + g.p("for _, _x := range %s {", argNames[len(argNames)-1]) + g.in() + g.p("_s = append(_s, _x)") + g.out() + g.p("}") + callArgs = ", _s..." + } + if len(m.Out) == 0 { + g.p(`_m.ctrl.Call(_m, "%v"%v)`, m.Name, callArgs) + } else { + g.p(`ret := _m.ctrl.Call(_m, "%v"%v)`, m.Name, callArgs) + + // Go does not allow "naked" type assertions on nil values, so we use the two-value form here. + // The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T. + // Happily, this coincides with the semantics we want here. + retNames := make([]string, len(rets)) + for i, t := range rets { + retNames[i] = fmt.Sprintf("ret%d", i) + g.p("%s, _ := ret[%d].(%s)", retNames[i], i, t) + } + g.p("return " + strings.Join(retNames, ", ")) + } + + g.out() + g.p("}") + return nil +} + +func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) error { + nargs := len(m.In) + args := make([]string, nargs) + for i := 0; i < nargs; i++ { + args[i] = "arg" + strconv.Itoa(i) + } + argString := strings.Join(args, ", ") + if nargs > 0 { + argString += " interface{}" + } + if m.Variadic != nil { + if nargs > 0 { + argString += ", " + } + argString += fmt.Sprintf("arg%d ...interface{}", nargs) + } + + g.p("// %v indicates an expected call of %v", m.Name, m.Name) + g.p("func (_mr *%vMockRecorder) %v(%v) *gomock.Call {", mockType, m.Name, argString) + g.in() + + callArgs := strings.Join(args, ", ") + if nargs > 0 { + callArgs = ", " + callArgs + } + if m.Variadic != nil { + if nargs == 0 { + // Easy: just use ... to push the arguments through. + callArgs = ", arg0..." + } else { + // Hard: create a temporary slice. + g.p("_s := append([]interface{}{%s}, arg%d...)", strings.Join(args, ", "), nargs) + callArgs = ", _s..." + } + } + g.p(`return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "%s", reflect.TypeOf((*%s)(nil).%s)%s)`, m.Name, mockType, m.Name, callArgs) + + g.out() + g.p("}") + return nil +} + +// Output returns the generator's output, formatted in the standard Go style. +func (g *generator) Output() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String()) + } + return src +} diff --git a/vendor/github.com/golang/mock/mockgen/model/model.go b/vendor/github.com/golang/mock/mockgen/model/model.go new file mode 100644 index 000000000..0e69d66e6 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/model/model.go @@ -0,0 +1,431 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains the data model necessary for generating mock implementations. +package model + +import ( + "encoding/gob" + "fmt" + "io" + "reflect" + "strings" +) + +// Package is a Go package. It may be a subset. +type Package struct { + Name string + Interfaces []*Interface + DotImports []string +} + +func (pkg *Package) Print(w io.Writer) { + fmt.Fprintf(w, "package %s\n", pkg.Name) + for _, intf := range pkg.Interfaces { + intf.Print(w) + } +} + +// Imports returns the imports needed by the Package as a set of import paths. +func (pkg *Package) Imports() map[string]bool { + im := make(map[string]bool) + for _, intf := range pkg.Interfaces { + intf.addImports(im) + } + return im +} + +// Interface is a Go interface. +type Interface struct { + Name string + Methods []*Method +} + +func (intf *Interface) Print(w io.Writer) { + fmt.Fprintf(w, "interface %s\n", intf.Name) + for _, m := range intf.Methods { + m.Print(w) + } +} + +func (intf *Interface) addImports(im map[string]bool) { + for _, m := range intf.Methods { + m.addImports(im) + } +} + +// Method is a single method of an interface. +type Method struct { + Name string + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +func (m *Method) Print(w io.Writer) { + fmt.Fprintf(w, " - method %s\n", m.Name) + if len(m.In) > 0 { + fmt.Fprintf(w, " in:\n") + for _, p := range m.In { + p.Print(w) + } + } + if m.Variadic != nil { + fmt.Fprintf(w, " ...:\n") + m.Variadic.Print(w) + } + if len(m.Out) > 0 { + fmt.Fprintf(w, " out:\n") + for _, p := range m.Out { + p.Print(w) + } + } +} + +func (m *Method) addImports(im map[string]bool) { + for _, p := range m.In { + p.Type.addImports(im) + } + if m.Variadic != nil { + m.Variadic.Type.addImports(im) + } + for _, p := range m.Out { + p.Type.addImports(im) + } +} + +// Parameter is an argument or return parameter of a method. +type Parameter struct { + Name string // may be empty + Type Type +} + +func (p *Parameter) Print(w io.Writer) { + n := p.Name + if n == "" { + n = `""` + } + fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, "")) +} + +// Type is a Go type. +type Type interface { + String(pm map[string]string, pkgOverride string) string + addImports(im map[string]bool) +} + +func init() { + gob.Register(&ArrayType{}) + gob.Register(&ChanType{}) + gob.Register(&FuncType{}) + gob.Register(&MapType{}) + gob.Register(&NamedType{}) + gob.Register(&PointerType{}) + gob.Register(PredeclaredType("")) +} + +// ArrayType is an array or slice type. +type ArrayType struct { + Len int // -1 for slices, >= 0 for arrays + Type Type +} + +func (at *ArrayType) String(pm map[string]string, pkgOverride string) string { + s := "[]" + if at.Len > -1 { + s = fmt.Sprintf("[%d]", at.Len) + } + return s + at.Type.String(pm, pkgOverride) +} + +func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) } + +// ChanType is a channel type. +type ChanType struct { + Dir ChanDir // 0, 1 or 2 + Type Type +} + +func (ct *ChanType) String(pm map[string]string, pkgOverride string) string { + s := ct.Type.String(pm, pkgOverride) + if ct.Dir == RecvDir { + return "<-chan " + s + } + if ct.Dir == SendDir { + return "chan<- " + s + } + return "chan " + s +} + +func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) } + +// ChanDir is a channel direction. +type ChanDir int + +const ( + RecvDir ChanDir = 1 + SendDir ChanDir = 2 +) + +// FuncType is a function type. +type FuncType struct { + In, Out []*Parameter + Variadic *Parameter // may be nil +} + +func (ft *FuncType) String(pm map[string]string, pkgOverride string) string { + args := make([]string, len(ft.In)) + for i, p := range ft.In { + args[i] = p.Type.String(pm, pkgOverride) + } + if ft.Variadic != nil { + args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride)) + } + rets := make([]string, len(ft.Out)) + for i, p := range ft.Out { + rets[i] = p.Type.String(pm, pkgOverride) + } + retString := strings.Join(rets, ", ") + if nOut := len(ft.Out); nOut == 1 { + retString = " " + retString + } else if nOut > 1 { + retString = " (" + retString + ")" + } + return "func(" + strings.Join(args, ", ") + ")" + retString +} + +func (ft *FuncType) addImports(im map[string]bool) { + for _, p := range ft.In { + p.Type.addImports(im) + } + if ft.Variadic != nil { + ft.Variadic.Type.addImports(im) + } + for _, p := range ft.Out { + p.Type.addImports(im) + } +} + +// MapType is a map type. +type MapType struct { + Key, Value Type +} + +func (mt *MapType) String(pm map[string]string, pkgOverride string) string { + return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride) +} + +func (mt *MapType) addImports(im map[string]bool) { + mt.Key.addImports(im) + mt.Value.addImports(im) +} + +// NamedType is an exported type in a package. +type NamedType struct { + Package string // may be empty + Type string // TODO: should this be typed Type? +} + +func (nt *NamedType) String(pm map[string]string, pkgOverride string) string { + // TODO: is this right? + if pkgOverride == nt.Package { + return nt.Type + } + return pm[nt.Package] + "." + nt.Type +} +func (nt *NamedType) addImports(im map[string]bool) { + if nt.Package != "" { + im[nt.Package] = true + } +} + +// PointerType is a pointer to another type. +type PointerType struct { + Type Type +} + +func (pt *PointerType) String(pm map[string]string, pkgOverride string) string { + return "*" + pt.Type.String(pm, pkgOverride) +} +func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) } + +// PredeclaredType is a predeclared type such as "int". +type PredeclaredType string + +func (pt PredeclaredType) String(pm map[string]string, pkgOverride string) string { return string(pt) } +func (pt PredeclaredType) addImports(im map[string]bool) {} + +// The following code is intended to be called by the program generated by ../reflect.go. + +func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { + if it.Kind() != reflect.Interface { + return nil, fmt.Errorf("%v is not an interface", it) + } + intf := &Interface{} + + for i := 0; i < it.NumMethod(); i++ { + mt := it.Method(i) + // TODO: need to skip unexported methods? or just raise an error? + m := &Method{ + Name: mt.Name, + } + + var err error + m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type) + if err != nil { + return nil, err + } + + intf.Methods = append(intf.Methods, m) + } + + return intf, nil +} + +// t's Kind must be a reflect.Func. +func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) { + nin := t.NumIn() + if t.IsVariadic() { + nin-- + } + var p *Parameter + for i := 0; i < nin; i++ { + p, err = parameterFromType(t.In(i)) + if err != nil { + return + } + in = append(in, p) + } + if t.IsVariadic() { + p, err = parameterFromType(t.In(nin).Elem()) + if err != nil { + return + } + variadic = p + } + for i := 0; i < t.NumOut(); i++ { + p, err = parameterFromType(t.Out(i)) + if err != nil { + return + } + out = append(out, p) + } + return +} + +func parameterFromType(t reflect.Type) (*Parameter, error) { + tt, err := typeFromType(t) + if err != nil { + return nil, err + } + return &Parameter{Type: tt}, nil +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var byteType = reflect.TypeOf(byte(0)) + +func typeFromType(t reflect.Type) (Type, error) { + // Hack workaround for https://golang.org/issue/3853. + // This explicit check should not be necessary. + if t == byteType { + return PredeclaredType("byte"), nil + } + + if imp := t.PkgPath(); imp != "" { + return &NamedType{ + Package: imp, + Type: t.Name(), + }, nil + } + + // only unnamed or predeclared types after here + + // Lots of types have element types. Let's do the parsing and error checking for all of them. + var elemType Type + switch t.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice: + var err error + elemType, err = typeFromType(t.Elem()) + if err != nil { + return nil, err + } + } + + switch t.Kind() { + case reflect.Array: + return &ArrayType{ + Len: t.Len(), + Type: elemType, + }, nil + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String: + return PredeclaredType(t.Kind().String()), nil + case reflect.Chan: + var dir ChanDir + switch t.ChanDir() { + case reflect.RecvDir: + dir = RecvDir + case reflect.SendDir: + dir = SendDir + } + return &ChanType{ + Dir: dir, + Type: elemType, + }, nil + case reflect.Func: + in, variadic, out, err := funcArgsFromType(t) + if err != nil { + return nil, err + } + return &FuncType{ + In: in, + Out: out, + Variadic: variadic, + }, nil + case reflect.Interface: + // Two special interfaces. + if t.NumMethod() == 0 { + return PredeclaredType("interface{}"), nil + } + if t == errorType { + return PredeclaredType("error"), nil + } + case reflect.Map: + kt, err := typeFromType(t.Key()) + if err != nil { + return nil, err + } + return &MapType{ + Key: kt, + Value: elemType, + }, nil + case reflect.Ptr: + return &PointerType{ + Type: elemType, + }, nil + case reflect.Slice: + return &ArrayType{ + Len: -1, + Type: elemType, + }, nil + case reflect.Struct: + if t.NumField() == 0 { + return PredeclaredType("struct{}"), nil + } + } + + // TODO: Struct, UnsafePointer + return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind()) +} diff --git a/vendor/github.com/golang/mock/mockgen/parse.go b/vendor/github.com/golang/mock/mockgen/parse.go new file mode 100644 index 000000000..5615d4b33 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/parse.go @@ -0,0 +1,447 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +// This file contains the model construction by parsing source files. + +import ( + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "log" + "path" + "strconv" + "strings" + + "github.com/golang/mock/mockgen/model" +) + +var ( + imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.") + auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.") +) + +// TODO: simplify error reporting + +func ParseFile(source string) (*model.Package, error) { + fs := token.NewFileSet() + file, err := parser.ParseFile(fs, source, nil, 0) + if err != nil { + return nil, fmt.Errorf("failed parsing source file %v: %v", source, err) + } + + p := &fileParser{ + fileSet: fs, + imports: make(map[string]string), + auxInterfaces: make(map[string]map[string]*ast.InterfaceType), + } + + // Handle -imports. + dotImports := make(map[string]bool) + if *imports != "" { + for _, kv := range strings.Split(*imports, ",") { + eq := strings.Index(kv, "=") + k, v := kv[:eq], kv[eq+1:] + if k == "." { + // TODO: Catch dupes? + dotImports[v] = true + } else { + // TODO: Catch dupes? + p.imports[k] = v + } + } + } + + // Handle -aux_files. + if err := p.parseAuxFiles(*auxFiles); err != nil { + return nil, err + } + p.addAuxInterfacesFromFile("", file) // this file + + pkg, err := p.parseFile(file) + if err != nil { + return nil, err + } + pkg.DotImports = make([]string, 0, len(dotImports)) + for path := range dotImports { + pkg.DotImports = append(pkg.DotImports, path) + } + return pkg, nil +} + +type fileParser struct { + fileSet *token.FileSet + imports map[string]string // package name => import path + + auxFiles []*ast.File + auxInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface +} + +func (p *fileParser) errorf(pos token.Pos, format string, args ...interface{}) error { + ps := p.fileSet.Position(pos) + format = "%s:%d:%d: " + format + args = append([]interface{}{ps.Filename, ps.Line, ps.Column}, args...) + return fmt.Errorf(format, args...) +} + +func (p *fileParser) parseAuxFiles(auxFiles string) error { + auxFiles = strings.TrimSpace(auxFiles) + if auxFiles == "" { + return nil + } + for _, kv := range strings.Split(auxFiles, ",") { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("bad aux file spec: %v", kv) + } + file, err := parser.ParseFile(p.fileSet, parts[1], nil, 0) + if err != nil { + return err + } + p.auxFiles = append(p.auxFiles, file) + p.addAuxInterfacesFromFile(parts[0], file) + } + return nil +} + +func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) { + if _, ok := p.auxInterfaces[pkg]; !ok { + p.auxInterfaces[pkg] = make(map[string]*ast.InterfaceType) + } + for ni := range iterInterfaces(file) { + p.auxInterfaces[pkg][ni.name.Name] = ni.it + } +} + +func (p *fileParser) parseFile(file *ast.File) (*model.Package, error) { + allImports := importsOfFile(file) + // Don't stomp imports provided by -imports. Those should take precedence. + for pkg, path := range allImports { + if _, ok := p.imports[pkg]; !ok { + p.imports[pkg] = path + } + } + // Add imports from auxiliary files, which might be needed for embedded interfaces. + // Don't stomp any other imports. + for _, f := range p.auxFiles { + for pkg, path := range importsOfFile(f) { + if _, ok := p.imports[pkg]; !ok { + p.imports[pkg] = path + } + } + } + + var is []*model.Interface + for ni := range iterInterfaces(file) { + i, err := p.parseInterface(ni.name.String(), "", ni.it) + if err != nil { + return nil, err + } + is = append(is, i) + } + return &model.Package{ + Name: file.Name.String(), + Interfaces: is, + }, nil +} + +func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) { + intf := &model.Interface{Name: name} + for _, field := range it.Methods.List { + switch v := field.Type.(type) { + case *ast.FuncType: + if nn := len(field.Names); nn != 1 { + return nil, fmt.Errorf("expected one name for interface %v, got %d", intf.Name, nn) + } + m := &model.Method{ + Name: field.Names[0].String(), + } + var err error + m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v) + if err != nil { + return nil, err + } + intf.Methods = append(intf.Methods, m) + case *ast.Ident: + // Embedded interface in this package. + ei := p.auxInterfaces[""][v.String()] + if ei == nil { + return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String()) + } + eintf, err := p.parseInterface(v.String(), pkg, ei) + if err != nil { + return nil, err + } + // Copy the methods. + // TODO: apply shadowing rules. + for _, m := range eintf.Methods { + intf.Methods = append(intf.Methods, m) + } + case *ast.SelectorExpr: + // Embedded interface in another package. + fpkg, sel := v.X.(*ast.Ident).String(), v.Sel.String() + ei := p.auxInterfaces[fpkg][sel] + if ei == nil { + return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", fpkg, sel) + } + epkg, ok := p.imports[fpkg] + if !ok { + return nil, p.errorf(v.X.Pos(), "unknown package %s", fpkg) + } + eintf, err := p.parseInterface(sel, epkg, ei) + if err != nil { + return nil, err + } + // Copy the methods. + // TODO: apply shadowing rules. + for _, m := range eintf.Methods { + intf.Methods = append(intf.Methods, m) + } + default: + return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type) + } + } + return intf, nil +} + +func (p *fileParser) parseFunc(pkg string, f *ast.FuncType) (in []*model.Parameter, variadic *model.Parameter, out []*model.Parameter, err error) { + if f.Params != nil { + regParams := f.Params.List + if isVariadic(f) { + n := len(regParams) + varParams := regParams[n-1:] + regParams = regParams[:n-1] + vp, err := p.parseFieldList(pkg, varParams) + if err != nil { + return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err) + } + variadic = vp[0] + } + in, err = p.parseFieldList(pkg, regParams) + if err != nil { + return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err) + } + } + if f.Results != nil { + out, err = p.parseFieldList(pkg, f.Results.List) + if err != nil { + return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err) + } + } + return +} + +func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field) ([]*model.Parameter, error) { + nf := 0 + for _, f := range fields { + nn := len(f.Names) + if nn == 0 { + nn = 1 // anonymous parameter + } + nf += nn + } + if nf == 0 { + return nil, nil + } + ps := make([]*model.Parameter, nf) + i := 0 // destination index + for _, f := range fields { + t, err := p.parseType(pkg, f.Type) + if err != nil { + return nil, err + } + + if len(f.Names) == 0 { + // anonymous arg + ps[i] = &model.Parameter{Type: t} + i++ + continue + } + for _, name := range f.Names { + ps[i] = &model.Parameter{Name: name.Name, Type: t} + i++ + } + } + return ps, nil +} + +func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) { + switch v := typ.(type) { + case *ast.ArrayType: + ln := -1 + if v.Len != nil { + x, err := strconv.Atoi(v.Len.(*ast.BasicLit).Value) + if err != nil { + return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err) + } + ln = x + } + t, err := p.parseType(pkg, v.Elt) + if err != nil { + return nil, err + } + return &model.ArrayType{Len: ln, Type: t}, nil + case *ast.ChanType: + t, err := p.parseType(pkg, v.Value) + if err != nil { + return nil, err + } + var dir model.ChanDir + if v.Dir == ast.SEND { + dir = model.SendDir + } + if v.Dir == ast.RECV { + dir = model.RecvDir + } + return &model.ChanType{Dir: dir, Type: t}, nil + case *ast.Ellipsis: + // assume we're parsing a variadic argument + return p.parseType(pkg, v.Elt) + case *ast.FuncType: + in, variadic, out, err := p.parseFunc(pkg, v) + if err != nil { + return nil, err + } + return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil + case *ast.Ident: + if v.IsExported() { + // assume type in this package + return &model.NamedType{Package: pkg, Type: v.Name}, nil + } else { + // assume predeclared type + return model.PredeclaredType(v.Name), nil + } + case *ast.InterfaceType: + if v.Methods != nil && len(v.Methods.List) > 0 { + return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types") + } + return model.PredeclaredType("interface{}"), nil + case *ast.MapType: + key, err := p.parseType(pkg, v.Key) + if err != nil { + return nil, err + } + value, err := p.parseType(pkg, v.Value) + if err != nil { + return nil, err + } + return &model.MapType{Key: key, Value: value}, nil + case *ast.SelectorExpr: + pkgName := v.X.(*ast.Ident).String() + pkg, ok := p.imports[pkgName] + if !ok { + return nil, p.errorf(v.Pos(), "unknown package %q", pkgName) + } + return &model.NamedType{Package: pkg, Type: v.Sel.String()}, nil + case *ast.StarExpr: + t, err := p.parseType(pkg, v.X) + if err != nil { + return nil, err + } + return &model.PointerType{Type: t}, nil + case *ast.StructType: + if v.Fields != nil && len(v.Fields.List) > 0 { + return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types") + } + return model.PredeclaredType("struct{}"), nil + } + + return nil, fmt.Errorf("don't know how to parse type %T", typ) +} + +// importsOfFile returns a map of package name to import path +// of the imports in file. +func importsOfFile(file *ast.File) map[string]string { + /* We have to make guesses about some imports, because imports are not required + * to have names. Named imports are always certain. Unnamed imports are guessed + * to have a name of the last path component; if the last path component has dots, + * the first dot-delimited field is used as the name. + */ + + m := make(map[string]string) + for _, decl := range file.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.IMPORT { + continue + } + for _, spec := range gd.Specs { + is, ok := spec.(*ast.ImportSpec) + if !ok { + continue + } + pkg, importPath := "", string(is.Path.Value) + importPath = importPath[1 : len(importPath)-1] // remove quotes + + if is.Name != nil { + if is.Name.Name == "_" { + continue + } + pkg = removeDot(is.Name.Name) + } else { + _, last := path.Split(importPath) + pkg = strings.SplitN(last, ".", 2)[0] + } + if _, ok := m[pkg]; ok { + log.Fatalf("imported package collision: %q imported twice", pkg) + } + m[pkg] = importPath + } + } + return m +} + +type namedInterface struct { + name *ast.Ident + it *ast.InterfaceType +} + +// Create an iterator over all interfaces in file. +func iterInterfaces(file *ast.File) <-chan namedInterface { + ch := make(chan namedInterface) + go func() { + for _, decl := range file.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.TYPE { + continue + } + for _, spec := range gd.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + it, ok := ts.Type.(*ast.InterfaceType) + if !ok { + continue + } + + ch <- namedInterface{ts.Name, it} + } + } + close(ch) + }() + return ch +} + +// isVariadic returns whether the function is variadic. +func isVariadic(f *ast.FuncType) bool { + nargs := len(f.Params.List) + if nargs == 0 { + return false + } + _, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis) + return ok +} diff --git a/vendor/github.com/golang/mock/mockgen/reflect.go b/vendor/github.com/golang/mock/mockgen/reflect.go new file mode 100644 index 000000000..4df9689fe --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/reflect.go @@ -0,0 +1,163 @@ +// Copyright 2012 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +// This file contains the model construction by reflection. + +import ( + "bytes" + "encoding/gob" + "flag" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "text/template" + + "github.com/golang/mock/mockgen/model" +) + +var ( + progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout.") + execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.") + buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.") +) + +func Reflect(importPath string, symbols []string) (*model.Package, error) { + // TODO: sanity check arguments + + progPath := *execOnly + if *execOnly == "" { + // We use TempDir instead of TempFile so we can control the filename. + tmpDir, err := ioutil.TempDir("", "gomock_reflect_") + if err != nil { + return nil, err + } + defer func() { os.RemoveAll(tmpDir) }() + const progSource = "prog.go" + var progBinary = "prog.bin" + if runtime.GOOS == "windows" { + // Windows won't execute a program unless it has a ".exe" suffix. + progBinary += ".exe" + } + + // Generate program. + var program bytes.Buffer + data := reflectData{ + ImportPath: importPath, + Symbols: symbols, + } + if err := reflectProgram.Execute(&program, &data); err != nil { + return nil, err + } + if *progOnly { + io.Copy(os.Stdout, &program) + os.Exit(0) + } + if err := ioutil.WriteFile(filepath.Join(tmpDir, progSource), program.Bytes(), 0600); err != nil { + return nil, err + } + + cmdArgs := []string{} + cmdArgs = append(cmdArgs, "build") + if *buildFlags != "" { + cmdArgs = append(cmdArgs, *buildFlags) + } + cmdArgs = append(cmdArgs, "-o", progBinary, progSource) + + // Build the program. + cmd := exec.Command("go", cmdArgs...) + cmd.Dir = tmpDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + progPath = filepath.Join(tmpDir, progBinary) + } + + // Run it. + cmd := exec.Command(progPath) + var stdout bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + // Process output. + var pkg model.Package + if err := gob.NewDecoder(&stdout).Decode(&pkg); err != nil { + return nil, err + } + return &pkg, nil +} + +type reflectData struct { + ImportPath string + Symbols []string +} + +// This program reflects on an interface value, and prints the +// gob encoding of a model.Package to standard output. +// JSON doesn't work because of the model.Type interface. +var reflectProgram = template.Must(template.New("program").Parse(` +package main + +import ( + "encoding/gob" + "fmt" + "os" + "path" + "reflect" + + "github.com/golang/mock/mockgen/model" + + pkg_ {{printf "%q" .ImportPath}} +) + +func main() { + its := []struct{ + sym string + typ reflect.Type + }{ + {{range .Symbols}} + { {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()}, + {{end}} + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base({{printf "%q" .ImportPath}}), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + if err := gob.NewEncoder(os.Stdout).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +} +`)) diff --git a/vendor/github.com/golang/mock/mockgen/tests/unexported_method/README.md b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/README.md new file mode 100644 index 000000000..87f91d464 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/README.md @@ -0,0 +1 @@ +From #52, this tests an unexported method in the mocked interface. diff --git a/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport.go b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport.go new file mode 100644 index 000000000..91d5baf7b --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport.go @@ -0,0 +1,15 @@ +//go:generate mockgen -destination bugreport_mock.go -package bugreport -source=bugreport.go Example + +package bugreport + +import "fmt" + +// Example is an interface with a non exported method +type Example interface { + someMethod(string) string +} + +// CallExample is a simple function that uses the interface +func CallExample(e Example) { + fmt.Println(e.someMethod("test")) +} diff --git a/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_mock.go b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_mock.go new file mode 100644 index 000000000..f1a16d813 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_mock.go @@ -0,0 +1,44 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: bugreport.go + +package bugreport + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockExample is a mock of Example interface +type MockExample struct { + ctrl *gomock.Controller + recorder *MockExampleMockRecorder +} + +// MockExampleMockRecorder is the mock recorder for MockExample +type MockExampleMockRecorder struct { + mock *MockExample +} + +// NewMockExample creates a new mock instance +func NewMockExample(ctrl *gomock.Controller) *MockExample { + mock := &MockExample{ctrl: ctrl} + mock.recorder = &MockExampleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (_m *MockExample) EXPECT() *MockExampleMockRecorder { + return _m.recorder +} + +// someMethod mocks base method +func (_m *MockExample) someMethod(_param0 string) string { + ret := _m.ctrl.Call(_m, "someMethod", _param0) + ret0, _ := ret[0].(string) + return ret0 +} + +// someMethod indicates an expected call of someMethod +func (_mr *MockExampleMockRecorder) someMethod(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "someMethod", reflect.TypeOf((*MockExample)(nil).someMethod), arg0) +} diff --git a/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_test.go b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_test.go new file mode 100644 index 000000000..d428fb4c4 --- /dev/null +++ b/vendor/github.com/golang/mock/mockgen/tests/unexported_method/bugreport_test.go @@ -0,0 +1,17 @@ +package bugreport + +import ( + "testing" + + "github.com/golang/mock/gomock" +) + +func TestCallExample(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + e := NewMockExample(ctrl) + e.EXPECT().someMethod(gomock.Any()).Return("it works!") + + CallExample(e) +} diff --git a/vendor/github.com/golang/mock/sample/README.md b/vendor/github.com/golang/mock/sample/README.md new file mode 100644 index 000000000..7180204f5 --- /dev/null +++ b/vendor/github.com/golang/mock/sample/README.md @@ -0,0 +1,16 @@ +This directory contains an example of a package containing a non-trivial +interface that can be mocked with GoMock. The interesting files are: + + * `user.go`: Source code for the sample package, containing interfaces to be + mocked. This file depends on the packages named imp[1-4] for various things. + + * `user_test.go`: A test for the sample package, in which mocks of the + interfaces from `user.go` are used. This demonstrates how to create mock + objects, set up expectations, and so on. + + * `mock_user/mock_user.go`: The generated mock code. See ../update_mocks.sh + for the command used to generate it. + +To run the test, + + go test github.com/golang/mock/sample diff --git a/vendor/github.com/golang/mock/sample/imp1/imp1.go b/vendor/github.com/golang/mock/sample/imp1/imp1.go new file mode 100644 index 000000000..ef9d01449 --- /dev/null +++ b/vendor/github.com/golang/mock/sample/imp1/imp1.go @@ -0,0 +1,17 @@ +package imp1 + +import "bufio" + +type Imp1 struct{} + +type ImpT int + +type ForeignEmbedded interface { + // The return value here also makes sure that + // the generated mock picks up the "bufio" import. + ForeignEmbeddedMethod() *bufio.Reader + + // This method uses a type in this package, + // which should be qualified when this interface is embedded. + ImplicitPackage(s string, t ImpT, st []ImpT, pt *ImpT, ct chan ImpT) +} diff --git a/vendor/github.com/golang/mock/sample/imp2/imp2.go b/vendor/github.com/golang/mock/sample/imp2/imp2.go new file mode 100644 index 000000000..53bee9ade --- /dev/null +++ b/vendor/github.com/golang/mock/sample/imp2/imp2.go @@ -0,0 +1,3 @@ +package imp2 + +type Imp2 struct{} diff --git a/vendor/github.com/golang/mock/sample/imp3/imp3.go b/vendor/github.com/golang/mock/sample/imp3/imp3.go new file mode 100644 index 000000000..70f17c00f --- /dev/null +++ b/vendor/github.com/golang/mock/sample/imp3/imp3.go @@ -0,0 +1,3 @@ +package imp3 + +type Imp3 struct{} diff --git a/vendor/github.com/golang/mock/sample/imp4/imp4.go b/vendor/github.com/golang/mock/sample/imp4/imp4.go new file mode 100644 index 000000000..30a70767e --- /dev/null +++ b/vendor/github.com/golang/mock/sample/imp4/imp4.go @@ -0,0 +1,3 @@ +package imp_four + +type Imp4 struct{} diff --git a/vendor/github.com/golang/mock/sample/mock_user/mock_user.go b/vendor/github.com/golang/mock/sample/mock_user/mock_user.go new file mode 100644 index 000000000..59602ef62 --- /dev/null +++ b/vendor/github.com/golang/mock/sample/mock_user/mock_user.go @@ -0,0 +1,383 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/golang/mock/sample (interfaces: Index,Embed,Embedded) + +package mock_sample + +import ( + bufio "bufio" + bytes "bytes" + gomock "github.com/golang/mock/gomock" + imp1 "github.com/golang/mock/sample/imp1" + imp2 "github.com/golang/mock/sample/imp2" + imp3 "github.com/golang/mock/sample/imp3" + imp4 "github.com/golang/mock/sample/imp4" + hash "hash" + template "html/template" + io "io" + http "net/http" + reflect "reflect" + template0 "text/template" +) + +// MockIndex is a mock of Index interface +type MockIndex struct { + ctrl *gomock.Controller + recorder *MockIndexMockRecorder +} + +// MockIndexMockRecorder is the mock recorder for MockIndex +type MockIndexMockRecorder struct { + mock *MockIndex +} + +// NewMockIndex creates a new mock instance +func NewMockIndex(ctrl *gomock.Controller) *MockIndex { + mock := &MockIndex{ctrl: ctrl} + mock.recorder = &MockIndexMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (_m *MockIndex) EXPECT() *MockIndexMockRecorder { + return _m.recorder +} + +// Anon mocks base method +func (_m *MockIndex) Anon(_param0 string) { + _m.ctrl.Call(_m, "Anon", _param0) +} + +// Anon indicates an expected call of Anon +func (_mr *MockIndexMockRecorder) Anon(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Anon", reflect.TypeOf((*MockIndex)(nil).Anon), arg0) +} + +// Chan mocks base method +func (_m *MockIndex) Chan(_param0 chan int, _param1 chan<- hash.Hash) { + _m.ctrl.Call(_m, "Chan", _param0, _param1) +} + +// Chan indicates an expected call of Chan +func (_mr *MockIndexMockRecorder) Chan(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Chan", reflect.TypeOf((*MockIndex)(nil).Chan), arg0, arg1) +} + +// ConcreteRet mocks base method +func (_m *MockIndex) ConcreteRet() chan<- bool { + ret := _m.ctrl.Call(_m, "ConcreteRet") + ret0, _ := ret[0].(chan<- bool) + return ret0 +} + +// ConcreteRet indicates an expected call of ConcreteRet +func (_mr *MockIndexMockRecorder) ConcreteRet() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ConcreteRet", reflect.TypeOf((*MockIndex)(nil).ConcreteRet)) +} + +// Ellip mocks base method +func (_m *MockIndex) Ellip(_param0 string, _param1 ...interface{}) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + _m.ctrl.Call(_m, "Ellip", _s...) +} + +// Ellip indicates an expected call of Ellip +func (_mr *MockIndexMockRecorder) Ellip(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Ellip", reflect.TypeOf((*MockIndex)(nil).Ellip), _s...) +} + +// EllipOnly mocks base method +func (_m *MockIndex) EllipOnly(_param0 ...string) { + _s := []interface{}{} + for _, _x := range _param0 { + _s = append(_s, _x) + } + _m.ctrl.Call(_m, "EllipOnly", _s...) +} + +// EllipOnly indicates an expected call of EllipOnly +func (_mr *MockIndexMockRecorder) EllipOnly(arg0 ...interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "EllipOnly", reflect.TypeOf((*MockIndex)(nil).EllipOnly), arg0...) +} + +// ForeignFour mocks base method +func (_m *MockIndex) ForeignFour(_param0 imp4.Imp4) { + _m.ctrl.Call(_m, "ForeignFour", _param0) +} + +// ForeignFour indicates an expected call of ForeignFour +func (_mr *MockIndexMockRecorder) ForeignFour(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ForeignFour", reflect.TypeOf((*MockIndex)(nil).ForeignFour), arg0) +} + +// ForeignOne mocks base method +func (_m *MockIndex) ForeignOne(_param0 imp1.Imp1) { + _m.ctrl.Call(_m, "ForeignOne", _param0) +} + +// ForeignOne indicates an expected call of ForeignOne +func (_mr *MockIndexMockRecorder) ForeignOne(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ForeignOne", reflect.TypeOf((*MockIndex)(nil).ForeignOne), arg0) +} + +// ForeignThree mocks base method +func (_m *MockIndex) ForeignThree(_param0 imp3.Imp3) { + _m.ctrl.Call(_m, "ForeignThree", _param0) +} + +// ForeignThree indicates an expected call of ForeignThree +func (_mr *MockIndexMockRecorder) ForeignThree(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ForeignThree", reflect.TypeOf((*MockIndex)(nil).ForeignThree), arg0) +} + +// ForeignTwo mocks base method +func (_m *MockIndex) ForeignTwo(_param0 imp2.Imp2) { + _m.ctrl.Call(_m, "ForeignTwo", _param0) +} + +// ForeignTwo indicates an expected call of ForeignTwo +func (_mr *MockIndexMockRecorder) ForeignTwo(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ForeignTwo", reflect.TypeOf((*MockIndex)(nil).ForeignTwo), arg0) +} + +// Func mocks base method +func (_m *MockIndex) Func(_param0 func(http.Request) (int, bool)) { + _m.ctrl.Call(_m, "Func", _param0) +} + +// Func indicates an expected call of Func +func (_mr *MockIndexMockRecorder) Func(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Func", reflect.TypeOf((*MockIndex)(nil).Func), arg0) +} + +// Get mocks base method +func (_m *MockIndex) Get(_param0 string) interface{} { + ret := _m.ctrl.Call(_m, "Get", _param0) + ret0, _ := ret[0].(interface{}) + return ret0 +} + +// Get indicates an expected call of Get +func (_mr *MockIndexMockRecorder) Get(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Get", reflect.TypeOf((*MockIndex)(nil).Get), arg0) +} + +// GetTwo mocks base method +func (_m *MockIndex) GetTwo(_param0 string, _param1 string) (interface{}, interface{}) { + ret := _m.ctrl.Call(_m, "GetTwo", _param0, _param1) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(interface{}) + return ret0, ret1 +} + +// GetTwo indicates an expected call of GetTwo +func (_mr *MockIndexMockRecorder) GetTwo(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "GetTwo", reflect.TypeOf((*MockIndex)(nil).GetTwo), arg0, arg1) +} + +// Map mocks base method +func (_m *MockIndex) Map(_param0 map[int]hash.Hash) { + _m.ctrl.Call(_m, "Map", _param0) +} + +// Map indicates an expected call of Map +func (_mr *MockIndexMockRecorder) Map(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Map", reflect.TypeOf((*MockIndex)(nil).Map), arg0) +} + +// NillableRet mocks base method +func (_m *MockIndex) NillableRet() error { + ret := _m.ctrl.Call(_m, "NillableRet") + ret0, _ := ret[0].(error) + return ret0 +} + +// NillableRet indicates an expected call of NillableRet +func (_mr *MockIndexMockRecorder) NillableRet() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "NillableRet", reflect.TypeOf((*MockIndex)(nil).NillableRet)) +} + +// Other mocks base method +func (_m *MockIndex) Other() hash.Hash { + ret := _m.ctrl.Call(_m, "Other") + ret0, _ := ret[0].(hash.Hash) + return ret0 +} + +// Other indicates an expected call of Other +func (_mr *MockIndexMockRecorder) Other() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Other", reflect.TypeOf((*MockIndex)(nil).Other)) +} + +// Ptr mocks base method +func (_m *MockIndex) Ptr(_param0 *int) { + _m.ctrl.Call(_m, "Ptr", _param0) +} + +// Ptr indicates an expected call of Ptr +func (_mr *MockIndexMockRecorder) Ptr(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Ptr", reflect.TypeOf((*MockIndex)(nil).Ptr), arg0) +} + +// Put mocks base method +func (_m *MockIndex) Put(_param0 string, _param1 interface{}) { + _m.ctrl.Call(_m, "Put", _param0, _param1) +} + +// Put indicates an expected call of Put +func (_mr *MockIndexMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Put", reflect.TypeOf((*MockIndex)(nil).Put), arg0, arg1) +} + +// Slice mocks base method +func (_m *MockIndex) Slice(_param0 []int, _param1 []byte) [3]int { + ret := _m.ctrl.Call(_m, "Slice", _param0, _param1) + ret0, _ := ret[0].([3]int) + return ret0 +} + +// Slice indicates an expected call of Slice +func (_mr *MockIndexMockRecorder) Slice(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Slice", reflect.TypeOf((*MockIndex)(nil).Slice), arg0, arg1) +} + +// Struct mocks base method +func (_m *MockIndex) Struct(_param0 struct{}) { + _m.ctrl.Call(_m, "Struct", _param0) +} + +// Struct indicates an expected call of Struct +func (_mr *MockIndexMockRecorder) Struct(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Struct", reflect.TypeOf((*MockIndex)(nil).Struct), arg0) +} + +// StructChan mocks base method +func (_m *MockIndex) StructChan(_param0 chan struct{}) { + _m.ctrl.Call(_m, "StructChan", _param0) +} + +// StructChan indicates an expected call of StructChan +func (_mr *MockIndexMockRecorder) StructChan(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "StructChan", reflect.TypeOf((*MockIndex)(nil).StructChan), arg0) +} + +// Summary mocks base method +func (_m *MockIndex) Summary(_param0 *bytes.Buffer, _param1 io.Writer) { + _m.ctrl.Call(_m, "Summary", _param0, _param1) +} + +// Summary indicates an expected call of Summary +func (_mr *MockIndexMockRecorder) Summary(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Summary", reflect.TypeOf((*MockIndex)(nil).Summary), arg0, arg1) +} + +// Templates mocks base method +func (_m *MockIndex) Templates(_param0 template.CSS, _param1 template0.FuncMap) { + _m.ctrl.Call(_m, "Templates", _param0, _param1) +} + +// Templates indicates an expected call of Templates +func (_mr *MockIndexMockRecorder) Templates(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "Templates", reflect.TypeOf((*MockIndex)(nil).Templates), arg0, arg1) +} + +// MockEmbed is a mock of Embed interface +type MockEmbed struct { + ctrl *gomock.Controller + recorder *MockEmbedMockRecorder +} + +// MockEmbedMockRecorder is the mock recorder for MockEmbed +type MockEmbedMockRecorder struct { + mock *MockEmbed +} + +// NewMockEmbed creates a new mock instance +func NewMockEmbed(ctrl *gomock.Controller) *MockEmbed { + mock := &MockEmbed{ctrl: ctrl} + mock.recorder = &MockEmbedMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (_m *MockEmbed) EXPECT() *MockEmbedMockRecorder { + return _m.recorder +} + +// EmbeddedMethod mocks base method +func (_m *MockEmbed) EmbeddedMethod() { + _m.ctrl.Call(_m, "EmbeddedMethod") +} + +// EmbeddedMethod indicates an expected call of EmbeddedMethod +func (_mr *MockEmbedMockRecorder) EmbeddedMethod() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "EmbeddedMethod", reflect.TypeOf((*MockEmbed)(nil).EmbeddedMethod)) +} + +// ForeignEmbeddedMethod mocks base method +func (_m *MockEmbed) ForeignEmbeddedMethod() *bufio.Reader { + ret := _m.ctrl.Call(_m, "ForeignEmbeddedMethod") + ret0, _ := ret[0].(*bufio.Reader) + return ret0 +} + +// ForeignEmbeddedMethod indicates an expected call of ForeignEmbeddedMethod +func (_mr *MockEmbedMockRecorder) ForeignEmbeddedMethod() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ForeignEmbeddedMethod", reflect.TypeOf((*MockEmbed)(nil).ForeignEmbeddedMethod)) +} + +// ImplicitPackage mocks base method +func (_m *MockEmbed) ImplicitPackage(_param0 string, _param1 imp1.ImpT, _param2 []imp1.ImpT, _param3 *imp1.ImpT, _param4 chan imp1.ImpT) { + _m.ctrl.Call(_m, "ImplicitPackage", _param0, _param1, _param2, _param3, _param4) +} + +// ImplicitPackage indicates an expected call of ImplicitPackage +func (_mr *MockEmbedMockRecorder) ImplicitPackage(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "ImplicitPackage", reflect.TypeOf((*MockEmbed)(nil).ImplicitPackage), arg0, arg1, arg2, arg3, arg4) +} + +// RegularMethod mocks base method +func (_m *MockEmbed) RegularMethod() { + _m.ctrl.Call(_m, "RegularMethod") +} + +// RegularMethod indicates an expected call of RegularMethod +func (_mr *MockEmbedMockRecorder) RegularMethod() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "RegularMethod", reflect.TypeOf((*MockEmbed)(nil).RegularMethod)) +} + +// MockEmbedded is a mock of Embedded interface +type MockEmbedded struct { + ctrl *gomock.Controller + recorder *MockEmbeddedMockRecorder +} + +// MockEmbeddedMockRecorder is the mock recorder for MockEmbedded +type MockEmbeddedMockRecorder struct { + mock *MockEmbedded +} + +// NewMockEmbedded creates a new mock instance +func NewMockEmbedded(ctrl *gomock.Controller) *MockEmbedded { + mock := &MockEmbedded{ctrl: ctrl} + mock.recorder = &MockEmbeddedMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (_m *MockEmbedded) EXPECT() *MockEmbeddedMockRecorder { + return _m.recorder +} + +// EmbeddedMethod mocks base method +func (_m *MockEmbedded) EmbeddedMethod() { + _m.ctrl.Call(_m, "EmbeddedMethod") +} + +// EmbeddedMethod indicates an expected call of EmbeddedMethod +func (_mr *MockEmbeddedMockRecorder) EmbeddedMethod() *gomock.Call { + return _mr.mock.ctrl.RecordCallWithMethodType(_mr.mock, "EmbeddedMethod", reflect.TypeOf((*MockEmbedded)(nil).EmbeddedMethod)) +} diff --git a/vendor/github.com/golang/mock/sample/user.go b/vendor/github.com/golang/mock/sample/user.go new file mode 100644 index 000000000..0e4a8142d --- /dev/null +++ b/vendor/github.com/golang/mock/sample/user.go @@ -0,0 +1,114 @@ +//go:generate mockgen -destination mock_user/mock_user.go github.com/golang/mock/sample Index,Embed,Embedded + +// An example package with an interface. +package user + +// Random bunch of imports to test mockgen. +import "io" +import ( + btz "bytes" + "hash" + "log" + "net" + "net/http" + + // Two imports with the same base name. + t1 "html/template" + t2 "text/template" +) + +// Dependencies outside the standard library. +import ( + "github.com/golang/mock/sample/imp1" + renamed2 "github.com/golang/mock/sample/imp2" + . "github.com/golang/mock/sample/imp3" + "github.com/golang/mock/sample/imp4" // calls itself "imp_four" +) + +// A bizarre interface to test corner cases in mockgen. +// This would normally be in its own file or package, +// separate from the user of it (e.g. io.Reader). +type Index interface { + Get(key string) interface{} + GetTwo(key1, key2 string) (v1, v2 interface{}) + Put(key string, value interface{}) + + // Check that imports are handled correctly. + Summary(buf *btz.Buffer, w io.Writer) + Other() hash.Hash + Templates(a t1.CSS, b t2.FuncMap) + + // A method with an anonymous argument. + Anon(string) + + // Methods using foreign types outside the standard library. + ForeignOne(imp1.Imp1) + ForeignTwo(renamed2.Imp2) + ForeignThree(Imp3) + ForeignFour(imp_four.Imp4) + + // A method that returns a nillable type. + NillableRet() error + // A method that returns a non-interface type. + ConcreteRet() chan<- bool + + // Methods with an ellipsis argument. + Ellip(fmt string, args ...interface{}) + EllipOnly(...string) + + // A method with a pointer argument that we will set. + Ptr(arg *int) + + // A method with a slice argument and an array return. + Slice(a []int, b []byte) [3]int + + // A method with channel arguments. + Chan(a chan int, b chan<- hash.Hash) + + // A method with a function argument. + Func(f func(http.Request) (int, bool)) + + // A method with a map argument. + Map(a map[int]hash.Hash) + + // Methods with an unnamed empty struct argument. + Struct(a struct{}) // not so likely + StructChan(a chan struct{}) // a bit more common +} + +// An interface with an embedded interface. +type Embed interface { + RegularMethod() + Embedded + imp1.ForeignEmbedded +} + +type Embedded interface { + EmbeddedMethod() +} + +// some random use of another package that isn't needed by the interface. +var _ net.Addr + +// A function that we will test that uses the above interface. +// It takes a list of keys and values, and puts them in the index. +func Remember(index Index, keys []string, values []interface{}) { + for i, k := range keys { + index.Put(k, values[i]) + } + err := index.NillableRet() + if err != nil { + log.Fatalf("Woah! %v", err) + } + if len(keys) > 0 && keys[0] == "a" { + index.Ellip("%d", 0, 1, 1, 2, 3) + index.Ellip("%d", 1, 3, 6, 10, 15) + index.EllipOnly("arg") + } +} + +func GrabPointer(index Index) int { + var a int + index.Ptr(&a) + return a +} diff --git a/vendor/github.com/golang/mock/sample/user_test.go b/vendor/github.com/golang/mock/sample/user_test.go new file mode 100644 index 000000000..c8c3864a3 --- /dev/null +++ b/vendor/github.com/golang/mock/sample/user_test.go @@ -0,0 +1,122 @@ +// A test that uses a mock. +package user_test + +import ( + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/mock/sample" + "github.com/golang/mock/sample/imp1" + mock_user "github.com/golang/mock/sample/mock_user" +) + +func TestRemember(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockIndex := mock_user.NewMockIndex(ctrl) + mockIndex.EXPECT().Put("a", 1) // literals work + mockIndex.EXPECT().Put("b", gomock.Eq(2)) // matchers work too + + // NillableRet returns error. Not declaring it should result in a nil return. + mockIndex.EXPECT().NillableRet() + // Calls that returns something assignable to the return type. + boolc := make(chan bool) + // In this case, "chan bool" is assignable to "chan<- bool". + mockIndex.EXPECT().ConcreteRet().Return(boolc) + // In this case, nil is assignable to "chan<- bool". + mockIndex.EXPECT().ConcreteRet().Return(nil) + + // Should be able to place expectations on variadic methods. + mockIndex.EXPECT().Ellip("%d", 0, 1, 1, 2, 3) // direct args + tri := []interface{}{1, 3, 6, 10, 15} + mockIndex.EXPECT().Ellip("%d", tri...) // args from slice + mockIndex.EXPECT().EllipOnly(gomock.Eq("arg")) + + user.Remember(mockIndex, []string{"a", "b"}, []interface{}{1, 2}) + // Check the ConcreteRet calls. + if c := mockIndex.ConcreteRet(); c != boolc { + t.Errorf("ConcreteRet: got %v, want %v", c, boolc) + } + if c := mockIndex.ConcreteRet(); c != nil { + t.Errorf("ConcreteRet: got %v, want nil", c) + } + + // Try one with an action. + calledString := "" + mockIndex.EXPECT().Put(gomock.Any(), gomock.Any()).Do(func(key string, _ interface{}) { + calledString = key + }) + mockIndex.EXPECT().NillableRet() + user.Remember(mockIndex, []string{"blah"}, []interface{}{7}) + if calledString != "blah" { + t.Fatalf(`Uh oh. %q != "blah"`, calledString) + } + + // Use Do with a nil arg. + mockIndex.EXPECT().Put("nil-key", gomock.Any()).Do(func(key string, value interface{}) { + if value != nil { + t.Errorf("Put did not pass through nil; got %v", value) + } + }) + mockIndex.EXPECT().NillableRet() + user.Remember(mockIndex, []string{"nil-key"}, []interface{}{nil}) +} + +func TestVariadicFunction(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockIndex := mock_user.NewMockIndex(ctrl) + m := mockIndex.EXPECT().Ellip("%d", 0, 1, 1, 2, 3) + m.Do(func(format string, nums ...int) { + sum := 0 + for _, value := range nums { + sum += value + } + if sum != 7 { + t.Errorf("Expected 7, got %d", sum) + } + }) + + mockIndex.Ellip("%d", 0, 1, 1, 2, 3) +} + +func TestGrabPointer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockIndex := mock_user.NewMockIndex(ctrl) + mockIndex.EXPECT().Ptr(gomock.Any()).SetArg(0, 7) // set first argument to 7 + + i := user.GrabPointer(mockIndex) + if i != 7 { + t.Errorf("Expected 7, got %d", i) + } +} + +func TestEmbeddedInterface(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockEmbed := mock_user.NewMockEmbed(ctrl) + mockEmbed.EXPECT().RegularMethod() + mockEmbed.EXPECT().EmbeddedMethod() + mockEmbed.EXPECT().ForeignEmbeddedMethod() + + mockEmbed.RegularMethod() + mockEmbed.EmbeddedMethod() + var emb imp1.ForeignEmbedded = mockEmbed // also does interface check + emb.ForeignEmbeddedMethod() +} + +func TestExpectTrueNil(t *testing.T) { + // Make sure that passing "nil" to EXPECT (thus as a nil interface value), + // will correctly match a nil concrete type. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockIndex := mock_user.NewMockIndex(ctrl) + mockIndex.EXPECT().Ptr(nil) // this nil is a nil interface{} + mockIndex.Ptr(nil) // this nil is a nil *int +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml new file mode 100644 index 000000000..14e71bf2e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -0,0 +1,12 @@ +language: go +install: + - curl https://glide.sh/get | sh + - glide install -v +matrix: + include: + - go: 1.8.4 + - go: 1.9.1 +script: +- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 +- go vet $(go list ./... | grep -v vendor) +- go test $(go list ./... | grep -v vendor) diff --git a/vendor/github.com/kubernetes-csi/csi-test/LICENSE b/vendor/github.com/kubernetes-csi/csi-test/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md new file mode 100644 index 000000000..fa1d625f0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -0,0 +1,17 @@ + +# csi-test +csi-test is a golang unit testing framework for container orchestration (CO) +system and CSI driver developers. + +### For Container Orchestration Unit Tests +CO developers can use this framework to create drivers based on the +[Golang mock](https://github.com/golang/mock) framework. Please see +[co_test.go](test/co_test.go) for an example. + +### For CSI Driver Unit Tests +Driver developers do not need to leverage the mocking framework, and +instead just use the CSI protocol buffers golang output library. This +framework may provide little value currently, but if the need arises, +it may provide future libraries to make developement and testing of +drivers easier. Please see the example [driver_test.go](test/driver_test.go) +for more information. diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go new file mode 100644 index 000000000..784b93cbd --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +//go:generate mockgen -source=$GOPATH/src/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go -imports .=github.com/container-storage-interface/spec/lib/go/csi -package=driver -destination=driver.mock.go +import ( + "net" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer +} + +type MockCSIDriver struct { + listener net.Listener + server *grpc.Server + conn *grpc.ClientConn + servers *MockCSIDriverServers + wg sync.WaitGroup +} + +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ + servers: servers, + } +} + +func (m *MockCSIDriver) goServe() { + m.wg.Add(1) + go func() { + defer m.wg.Done() + m.server.Serve(m.listener) + }() +} + +func (m *MockCSIDriver) Address() string { + return m.listener.Addr().String() +} +func (m *MockCSIDriver) Start() error { + + // Listen on a port assigned by the net package + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + m.listener = l + + // Create a new grpc server + m.server = grpc.NewServer() + + // Register Mock servers + if m.servers.Controller != nil { + csi.RegisterControllerServer(m.server, m.servers.Controller) + } + if m.servers.Identity != nil { + csi.RegisterIdentityServer(m.server, m.servers.Identity) + } + if m.servers.Node != nil { + csi.RegisterNodeServer(m.server, m.servers.Node) + } + reflection.Register(m.server) + + // Start listening for requests + m.goServe() + return nil +} + +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err + } + + // Create a client connection + m.conn, err = grpc.Dial(m.Address(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + + return m.conn, nil +} + +func (m *MockCSIDriver) Stop() { + m.server.Stop() + m.wg.Wait() +} + +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go new file mode 100644 index 000000000..2f4bdba51 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -0,0 +1,1243 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: /home/lpabon/git/golang/osd/src/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go + +// Package driver is a generated GoMock package. +package driver + +import ( + . "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + reflect "reflect" +) + +// MockisGetSupportedVersionsResponse_Reply is a mock of isGetSupportedVersionsResponse_Reply interface +type MockisGetSupportedVersionsResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisGetSupportedVersionsResponse_ReplyMockRecorder +} + +// MockisGetSupportedVersionsResponse_ReplyMockRecorder is the mock recorder for MockisGetSupportedVersionsResponse_Reply +type MockisGetSupportedVersionsResponse_ReplyMockRecorder struct { + mock *MockisGetSupportedVersionsResponse_Reply +} + +// NewMockisGetSupportedVersionsResponse_Reply creates a new mock instance +func NewMockisGetSupportedVersionsResponse_Reply(ctrl *gomock.Controller) *MockisGetSupportedVersionsResponse_Reply { + mock := &MockisGetSupportedVersionsResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisGetSupportedVersionsResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisGetSupportedVersionsResponse_Reply) EXPECT() *MockisGetSupportedVersionsResponse_ReplyMockRecorder { + return m.recorder +} + +// isGetSupportedVersionsResponse_Reply mocks base method +func (m *MockisGetSupportedVersionsResponse_Reply) isGetSupportedVersionsResponse_Reply() { + m.ctrl.Call(m, "isGetSupportedVersionsResponse_Reply") +} + +// isGetSupportedVersionsResponse_Reply indicates an expected call of isGetSupportedVersionsResponse_Reply +func (mr *MockisGetSupportedVersionsResponse_ReplyMockRecorder) isGetSupportedVersionsResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isGetSupportedVersionsResponse_Reply", reflect.TypeOf((*MockisGetSupportedVersionsResponse_Reply)(nil).isGetSupportedVersionsResponse_Reply)) +} + +// MockisGetPluginInfoResponse_Reply is a mock of isGetPluginInfoResponse_Reply interface +type MockisGetPluginInfoResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisGetPluginInfoResponse_ReplyMockRecorder +} + +// MockisGetPluginInfoResponse_ReplyMockRecorder is the mock recorder for MockisGetPluginInfoResponse_Reply +type MockisGetPluginInfoResponse_ReplyMockRecorder struct { + mock *MockisGetPluginInfoResponse_Reply +} + +// NewMockisGetPluginInfoResponse_Reply creates a new mock instance +func NewMockisGetPluginInfoResponse_Reply(ctrl *gomock.Controller) *MockisGetPluginInfoResponse_Reply { + mock := &MockisGetPluginInfoResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisGetPluginInfoResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisGetPluginInfoResponse_Reply) EXPECT() *MockisGetPluginInfoResponse_ReplyMockRecorder { + return m.recorder +} + +// isGetPluginInfoResponse_Reply mocks base method +func (m *MockisGetPluginInfoResponse_Reply) isGetPluginInfoResponse_Reply() { + m.ctrl.Call(m, "isGetPluginInfoResponse_Reply") +} + +// isGetPluginInfoResponse_Reply indicates an expected call of isGetPluginInfoResponse_Reply +func (mr *MockisGetPluginInfoResponse_ReplyMockRecorder) isGetPluginInfoResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isGetPluginInfoResponse_Reply", reflect.TypeOf((*MockisGetPluginInfoResponse_Reply)(nil).isGetPluginInfoResponse_Reply)) +} + +// MockisCreateVolumeResponse_Reply is a mock of isCreateVolumeResponse_Reply interface +type MockisCreateVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisCreateVolumeResponse_ReplyMockRecorder +} + +// MockisCreateVolumeResponse_ReplyMockRecorder is the mock recorder for MockisCreateVolumeResponse_Reply +type MockisCreateVolumeResponse_ReplyMockRecorder struct { + mock *MockisCreateVolumeResponse_Reply +} + +// NewMockisCreateVolumeResponse_Reply creates a new mock instance +func NewMockisCreateVolumeResponse_Reply(ctrl *gomock.Controller) *MockisCreateVolumeResponse_Reply { + mock := &MockisCreateVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisCreateVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisCreateVolumeResponse_Reply) EXPECT() *MockisCreateVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isCreateVolumeResponse_Reply mocks base method +func (m *MockisCreateVolumeResponse_Reply) isCreateVolumeResponse_Reply() { + m.ctrl.Call(m, "isCreateVolumeResponse_Reply") +} + +// isCreateVolumeResponse_Reply indicates an expected call of isCreateVolumeResponse_Reply +func (mr *MockisCreateVolumeResponse_ReplyMockRecorder) isCreateVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isCreateVolumeResponse_Reply", reflect.TypeOf((*MockisCreateVolumeResponse_Reply)(nil).isCreateVolumeResponse_Reply)) +} + +// MockisVolumeCapability_AccessType is a mock of isVolumeCapability_AccessType interface +type MockisVolumeCapability_AccessType struct { + ctrl *gomock.Controller + recorder *MockisVolumeCapability_AccessTypeMockRecorder +} + +// MockisVolumeCapability_AccessTypeMockRecorder is the mock recorder for MockisVolumeCapability_AccessType +type MockisVolumeCapability_AccessTypeMockRecorder struct { + mock *MockisVolumeCapability_AccessType +} + +// NewMockisVolumeCapability_AccessType creates a new mock instance +func NewMockisVolumeCapability_AccessType(ctrl *gomock.Controller) *MockisVolumeCapability_AccessType { + mock := &MockisVolumeCapability_AccessType{ctrl: ctrl} + mock.recorder = &MockisVolumeCapability_AccessTypeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisVolumeCapability_AccessType) EXPECT() *MockisVolumeCapability_AccessTypeMockRecorder { + return m.recorder +} + +// isVolumeCapability_AccessType mocks base method +func (m *MockisVolumeCapability_AccessType) isVolumeCapability_AccessType() { + m.ctrl.Call(m, "isVolumeCapability_AccessType") +} + +// isVolumeCapability_AccessType indicates an expected call of isVolumeCapability_AccessType +func (mr *MockisVolumeCapability_AccessTypeMockRecorder) isVolumeCapability_AccessType() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isVolumeCapability_AccessType", reflect.TypeOf((*MockisVolumeCapability_AccessType)(nil).isVolumeCapability_AccessType)) +} + +// MockisDeleteVolumeResponse_Reply is a mock of isDeleteVolumeResponse_Reply interface +type MockisDeleteVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisDeleteVolumeResponse_ReplyMockRecorder +} + +// MockisDeleteVolumeResponse_ReplyMockRecorder is the mock recorder for MockisDeleteVolumeResponse_Reply +type MockisDeleteVolumeResponse_ReplyMockRecorder struct { + mock *MockisDeleteVolumeResponse_Reply +} + +// NewMockisDeleteVolumeResponse_Reply creates a new mock instance +func NewMockisDeleteVolumeResponse_Reply(ctrl *gomock.Controller) *MockisDeleteVolumeResponse_Reply { + mock := &MockisDeleteVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisDeleteVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisDeleteVolumeResponse_Reply) EXPECT() *MockisDeleteVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isDeleteVolumeResponse_Reply mocks base method +func (m *MockisDeleteVolumeResponse_Reply) isDeleteVolumeResponse_Reply() { + m.ctrl.Call(m, "isDeleteVolumeResponse_Reply") +} + +// isDeleteVolumeResponse_Reply indicates an expected call of isDeleteVolumeResponse_Reply +func (mr *MockisDeleteVolumeResponse_ReplyMockRecorder) isDeleteVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isDeleteVolumeResponse_Reply", reflect.TypeOf((*MockisDeleteVolumeResponse_Reply)(nil).isDeleteVolumeResponse_Reply)) +} + +// MockisControllerPublishVolumeResponse_Reply is a mock of isControllerPublishVolumeResponse_Reply interface +type MockisControllerPublishVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisControllerPublishVolumeResponse_ReplyMockRecorder +} + +// MockisControllerPublishVolumeResponse_ReplyMockRecorder is the mock recorder for MockisControllerPublishVolumeResponse_Reply +type MockisControllerPublishVolumeResponse_ReplyMockRecorder struct { + mock *MockisControllerPublishVolumeResponse_Reply +} + +// NewMockisControllerPublishVolumeResponse_Reply creates a new mock instance +func NewMockisControllerPublishVolumeResponse_Reply(ctrl *gomock.Controller) *MockisControllerPublishVolumeResponse_Reply { + mock := &MockisControllerPublishVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisControllerPublishVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisControllerPublishVolumeResponse_Reply) EXPECT() *MockisControllerPublishVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isControllerPublishVolumeResponse_Reply mocks base method +func (m *MockisControllerPublishVolumeResponse_Reply) isControllerPublishVolumeResponse_Reply() { + m.ctrl.Call(m, "isControllerPublishVolumeResponse_Reply") +} + +// isControllerPublishVolumeResponse_Reply indicates an expected call of isControllerPublishVolumeResponse_Reply +func (mr *MockisControllerPublishVolumeResponse_ReplyMockRecorder) isControllerPublishVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isControllerPublishVolumeResponse_Reply", reflect.TypeOf((*MockisControllerPublishVolumeResponse_Reply)(nil).isControllerPublishVolumeResponse_Reply)) +} + +// MockisControllerUnpublishVolumeResponse_Reply is a mock of isControllerUnpublishVolumeResponse_Reply interface +type MockisControllerUnpublishVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisControllerUnpublishVolumeResponse_ReplyMockRecorder +} + +// MockisControllerUnpublishVolumeResponse_ReplyMockRecorder is the mock recorder for MockisControllerUnpublishVolumeResponse_Reply +type MockisControllerUnpublishVolumeResponse_ReplyMockRecorder struct { + mock *MockisControllerUnpublishVolumeResponse_Reply +} + +// NewMockisControllerUnpublishVolumeResponse_Reply creates a new mock instance +func NewMockisControllerUnpublishVolumeResponse_Reply(ctrl *gomock.Controller) *MockisControllerUnpublishVolumeResponse_Reply { + mock := &MockisControllerUnpublishVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisControllerUnpublishVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisControllerUnpublishVolumeResponse_Reply) EXPECT() *MockisControllerUnpublishVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isControllerUnpublishVolumeResponse_Reply mocks base method +func (m *MockisControllerUnpublishVolumeResponse_Reply) isControllerUnpublishVolumeResponse_Reply() { + m.ctrl.Call(m, "isControllerUnpublishVolumeResponse_Reply") +} + +// isControllerUnpublishVolumeResponse_Reply indicates an expected call of isControllerUnpublishVolumeResponse_Reply +func (mr *MockisControllerUnpublishVolumeResponse_ReplyMockRecorder) isControllerUnpublishVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isControllerUnpublishVolumeResponse_Reply", reflect.TypeOf((*MockisControllerUnpublishVolumeResponse_Reply)(nil).isControllerUnpublishVolumeResponse_Reply)) +} + +// MockisValidateVolumeCapabilitiesResponse_Reply is a mock of isValidateVolumeCapabilitiesResponse_Reply interface +type MockisValidateVolumeCapabilitiesResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder +} + +// MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder is the mock recorder for MockisValidateVolumeCapabilitiesResponse_Reply +type MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder struct { + mock *MockisValidateVolumeCapabilitiesResponse_Reply +} + +// NewMockisValidateVolumeCapabilitiesResponse_Reply creates a new mock instance +func NewMockisValidateVolumeCapabilitiesResponse_Reply(ctrl *gomock.Controller) *MockisValidateVolumeCapabilitiesResponse_Reply { + mock := &MockisValidateVolumeCapabilitiesResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisValidateVolumeCapabilitiesResponse_Reply) EXPECT() *MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder { + return m.recorder +} + +// isValidateVolumeCapabilitiesResponse_Reply mocks base method +func (m *MockisValidateVolumeCapabilitiesResponse_Reply) isValidateVolumeCapabilitiesResponse_Reply() { + m.ctrl.Call(m, "isValidateVolumeCapabilitiesResponse_Reply") +} + +// isValidateVolumeCapabilitiesResponse_Reply indicates an expected call of isValidateVolumeCapabilitiesResponse_Reply +func (mr *MockisValidateVolumeCapabilitiesResponse_ReplyMockRecorder) isValidateVolumeCapabilitiesResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isValidateVolumeCapabilitiesResponse_Reply", reflect.TypeOf((*MockisValidateVolumeCapabilitiesResponse_Reply)(nil).isValidateVolumeCapabilitiesResponse_Reply)) +} + +// MockisListVolumesResponse_Reply is a mock of isListVolumesResponse_Reply interface +type MockisListVolumesResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisListVolumesResponse_ReplyMockRecorder +} + +// MockisListVolumesResponse_ReplyMockRecorder is the mock recorder for MockisListVolumesResponse_Reply +type MockisListVolumesResponse_ReplyMockRecorder struct { + mock *MockisListVolumesResponse_Reply +} + +// NewMockisListVolumesResponse_Reply creates a new mock instance +func NewMockisListVolumesResponse_Reply(ctrl *gomock.Controller) *MockisListVolumesResponse_Reply { + mock := &MockisListVolumesResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisListVolumesResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisListVolumesResponse_Reply) EXPECT() *MockisListVolumesResponse_ReplyMockRecorder { + return m.recorder +} + +// isListVolumesResponse_Reply mocks base method +func (m *MockisListVolumesResponse_Reply) isListVolumesResponse_Reply() { + m.ctrl.Call(m, "isListVolumesResponse_Reply") +} + +// isListVolumesResponse_Reply indicates an expected call of isListVolumesResponse_Reply +func (mr *MockisListVolumesResponse_ReplyMockRecorder) isListVolumesResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isListVolumesResponse_Reply", reflect.TypeOf((*MockisListVolumesResponse_Reply)(nil).isListVolumesResponse_Reply)) +} + +// MockisGetCapacityResponse_Reply is a mock of isGetCapacityResponse_Reply interface +type MockisGetCapacityResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisGetCapacityResponse_ReplyMockRecorder +} + +// MockisGetCapacityResponse_ReplyMockRecorder is the mock recorder for MockisGetCapacityResponse_Reply +type MockisGetCapacityResponse_ReplyMockRecorder struct { + mock *MockisGetCapacityResponse_Reply +} + +// NewMockisGetCapacityResponse_Reply creates a new mock instance +func NewMockisGetCapacityResponse_Reply(ctrl *gomock.Controller) *MockisGetCapacityResponse_Reply { + mock := &MockisGetCapacityResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisGetCapacityResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisGetCapacityResponse_Reply) EXPECT() *MockisGetCapacityResponse_ReplyMockRecorder { + return m.recorder +} + +// isGetCapacityResponse_Reply mocks base method +func (m *MockisGetCapacityResponse_Reply) isGetCapacityResponse_Reply() { + m.ctrl.Call(m, "isGetCapacityResponse_Reply") +} + +// isGetCapacityResponse_Reply indicates an expected call of isGetCapacityResponse_Reply +func (mr *MockisGetCapacityResponse_ReplyMockRecorder) isGetCapacityResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isGetCapacityResponse_Reply", reflect.TypeOf((*MockisGetCapacityResponse_Reply)(nil).isGetCapacityResponse_Reply)) +} + +// MockisControllerGetCapabilitiesResponse_Reply is a mock of isControllerGetCapabilitiesResponse_Reply interface +type MockisControllerGetCapabilitiesResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisControllerGetCapabilitiesResponse_ReplyMockRecorder +} + +// MockisControllerGetCapabilitiesResponse_ReplyMockRecorder is the mock recorder for MockisControllerGetCapabilitiesResponse_Reply +type MockisControllerGetCapabilitiesResponse_ReplyMockRecorder struct { + mock *MockisControllerGetCapabilitiesResponse_Reply +} + +// NewMockisControllerGetCapabilitiesResponse_Reply creates a new mock instance +func NewMockisControllerGetCapabilitiesResponse_Reply(ctrl *gomock.Controller) *MockisControllerGetCapabilitiesResponse_Reply { + mock := &MockisControllerGetCapabilitiesResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisControllerGetCapabilitiesResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisControllerGetCapabilitiesResponse_Reply) EXPECT() *MockisControllerGetCapabilitiesResponse_ReplyMockRecorder { + return m.recorder +} + +// isControllerGetCapabilitiesResponse_Reply mocks base method +func (m *MockisControllerGetCapabilitiesResponse_Reply) isControllerGetCapabilitiesResponse_Reply() { + m.ctrl.Call(m, "isControllerGetCapabilitiesResponse_Reply") +} + +// isControllerGetCapabilitiesResponse_Reply indicates an expected call of isControllerGetCapabilitiesResponse_Reply +func (mr *MockisControllerGetCapabilitiesResponse_ReplyMockRecorder) isControllerGetCapabilitiesResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isControllerGetCapabilitiesResponse_Reply", reflect.TypeOf((*MockisControllerGetCapabilitiesResponse_Reply)(nil).isControllerGetCapabilitiesResponse_Reply)) +} + +// MockisControllerServiceCapability_Type is a mock of isControllerServiceCapability_Type interface +type MockisControllerServiceCapability_Type struct { + ctrl *gomock.Controller + recorder *MockisControllerServiceCapability_TypeMockRecorder +} + +// MockisControllerServiceCapability_TypeMockRecorder is the mock recorder for MockisControllerServiceCapability_Type +type MockisControllerServiceCapability_TypeMockRecorder struct { + mock *MockisControllerServiceCapability_Type +} + +// NewMockisControllerServiceCapability_Type creates a new mock instance +func NewMockisControllerServiceCapability_Type(ctrl *gomock.Controller) *MockisControllerServiceCapability_Type { + mock := &MockisControllerServiceCapability_Type{ctrl: ctrl} + mock.recorder = &MockisControllerServiceCapability_TypeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisControllerServiceCapability_Type) EXPECT() *MockisControllerServiceCapability_TypeMockRecorder { + return m.recorder +} + +// isControllerServiceCapability_Type mocks base method +func (m *MockisControllerServiceCapability_Type) isControllerServiceCapability_Type() { + m.ctrl.Call(m, "isControllerServiceCapability_Type") +} + +// isControllerServiceCapability_Type indicates an expected call of isControllerServiceCapability_Type +func (mr *MockisControllerServiceCapability_TypeMockRecorder) isControllerServiceCapability_Type() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isControllerServiceCapability_Type", reflect.TypeOf((*MockisControllerServiceCapability_Type)(nil).isControllerServiceCapability_Type)) +} + +// MockisNodePublishVolumeResponse_Reply is a mock of isNodePublishVolumeResponse_Reply interface +type MockisNodePublishVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisNodePublishVolumeResponse_ReplyMockRecorder +} + +// MockisNodePublishVolumeResponse_ReplyMockRecorder is the mock recorder for MockisNodePublishVolumeResponse_Reply +type MockisNodePublishVolumeResponse_ReplyMockRecorder struct { + mock *MockisNodePublishVolumeResponse_Reply +} + +// NewMockisNodePublishVolumeResponse_Reply creates a new mock instance +func NewMockisNodePublishVolumeResponse_Reply(ctrl *gomock.Controller) *MockisNodePublishVolumeResponse_Reply { + mock := &MockisNodePublishVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisNodePublishVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisNodePublishVolumeResponse_Reply) EXPECT() *MockisNodePublishVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isNodePublishVolumeResponse_Reply mocks base method +func (m *MockisNodePublishVolumeResponse_Reply) isNodePublishVolumeResponse_Reply() { + m.ctrl.Call(m, "isNodePublishVolumeResponse_Reply") +} + +// isNodePublishVolumeResponse_Reply indicates an expected call of isNodePublishVolumeResponse_Reply +func (mr *MockisNodePublishVolumeResponse_ReplyMockRecorder) isNodePublishVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isNodePublishVolumeResponse_Reply", reflect.TypeOf((*MockisNodePublishVolumeResponse_Reply)(nil).isNodePublishVolumeResponse_Reply)) +} + +// MockisNodeUnpublishVolumeResponse_Reply is a mock of isNodeUnpublishVolumeResponse_Reply interface +type MockisNodeUnpublishVolumeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisNodeUnpublishVolumeResponse_ReplyMockRecorder +} + +// MockisNodeUnpublishVolumeResponse_ReplyMockRecorder is the mock recorder for MockisNodeUnpublishVolumeResponse_Reply +type MockisNodeUnpublishVolumeResponse_ReplyMockRecorder struct { + mock *MockisNodeUnpublishVolumeResponse_Reply +} + +// NewMockisNodeUnpublishVolumeResponse_Reply creates a new mock instance +func NewMockisNodeUnpublishVolumeResponse_Reply(ctrl *gomock.Controller) *MockisNodeUnpublishVolumeResponse_Reply { + mock := &MockisNodeUnpublishVolumeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisNodeUnpublishVolumeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisNodeUnpublishVolumeResponse_Reply) EXPECT() *MockisNodeUnpublishVolumeResponse_ReplyMockRecorder { + return m.recorder +} + +// isNodeUnpublishVolumeResponse_Reply mocks base method +func (m *MockisNodeUnpublishVolumeResponse_Reply) isNodeUnpublishVolumeResponse_Reply() { + m.ctrl.Call(m, "isNodeUnpublishVolumeResponse_Reply") +} + +// isNodeUnpublishVolumeResponse_Reply indicates an expected call of isNodeUnpublishVolumeResponse_Reply +func (mr *MockisNodeUnpublishVolumeResponse_ReplyMockRecorder) isNodeUnpublishVolumeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isNodeUnpublishVolumeResponse_Reply", reflect.TypeOf((*MockisNodeUnpublishVolumeResponse_Reply)(nil).isNodeUnpublishVolumeResponse_Reply)) +} + +// MockisGetNodeIDResponse_Reply is a mock of isGetNodeIDResponse_Reply interface +type MockisGetNodeIDResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisGetNodeIDResponse_ReplyMockRecorder +} + +// MockisGetNodeIDResponse_ReplyMockRecorder is the mock recorder for MockisGetNodeIDResponse_Reply +type MockisGetNodeIDResponse_ReplyMockRecorder struct { + mock *MockisGetNodeIDResponse_Reply +} + +// NewMockisGetNodeIDResponse_Reply creates a new mock instance +func NewMockisGetNodeIDResponse_Reply(ctrl *gomock.Controller) *MockisGetNodeIDResponse_Reply { + mock := &MockisGetNodeIDResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisGetNodeIDResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisGetNodeIDResponse_Reply) EXPECT() *MockisGetNodeIDResponse_ReplyMockRecorder { + return m.recorder +} + +// isGetNodeIDResponse_Reply mocks base method +func (m *MockisGetNodeIDResponse_Reply) isGetNodeIDResponse_Reply() { + m.ctrl.Call(m, "isGetNodeIDResponse_Reply") +} + +// isGetNodeIDResponse_Reply indicates an expected call of isGetNodeIDResponse_Reply +func (mr *MockisGetNodeIDResponse_ReplyMockRecorder) isGetNodeIDResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isGetNodeIDResponse_Reply", reflect.TypeOf((*MockisGetNodeIDResponse_Reply)(nil).isGetNodeIDResponse_Reply)) +} + +// MockisProbeNodeResponse_Reply is a mock of isProbeNodeResponse_Reply interface +type MockisProbeNodeResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisProbeNodeResponse_ReplyMockRecorder +} + +// MockisProbeNodeResponse_ReplyMockRecorder is the mock recorder for MockisProbeNodeResponse_Reply +type MockisProbeNodeResponse_ReplyMockRecorder struct { + mock *MockisProbeNodeResponse_Reply +} + +// NewMockisProbeNodeResponse_Reply creates a new mock instance +func NewMockisProbeNodeResponse_Reply(ctrl *gomock.Controller) *MockisProbeNodeResponse_Reply { + mock := &MockisProbeNodeResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisProbeNodeResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisProbeNodeResponse_Reply) EXPECT() *MockisProbeNodeResponse_ReplyMockRecorder { + return m.recorder +} + +// isProbeNodeResponse_Reply mocks base method +func (m *MockisProbeNodeResponse_Reply) isProbeNodeResponse_Reply() { + m.ctrl.Call(m, "isProbeNodeResponse_Reply") +} + +// isProbeNodeResponse_Reply indicates an expected call of isProbeNodeResponse_Reply +func (mr *MockisProbeNodeResponse_ReplyMockRecorder) isProbeNodeResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isProbeNodeResponse_Reply", reflect.TypeOf((*MockisProbeNodeResponse_Reply)(nil).isProbeNodeResponse_Reply)) +} + +// MockisNodeGetCapabilitiesResponse_Reply is a mock of isNodeGetCapabilitiesResponse_Reply interface +type MockisNodeGetCapabilitiesResponse_Reply struct { + ctrl *gomock.Controller + recorder *MockisNodeGetCapabilitiesResponse_ReplyMockRecorder +} + +// MockisNodeGetCapabilitiesResponse_ReplyMockRecorder is the mock recorder for MockisNodeGetCapabilitiesResponse_Reply +type MockisNodeGetCapabilitiesResponse_ReplyMockRecorder struct { + mock *MockisNodeGetCapabilitiesResponse_Reply +} + +// NewMockisNodeGetCapabilitiesResponse_Reply creates a new mock instance +func NewMockisNodeGetCapabilitiesResponse_Reply(ctrl *gomock.Controller) *MockisNodeGetCapabilitiesResponse_Reply { + mock := &MockisNodeGetCapabilitiesResponse_Reply{ctrl: ctrl} + mock.recorder = &MockisNodeGetCapabilitiesResponse_ReplyMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisNodeGetCapabilitiesResponse_Reply) EXPECT() *MockisNodeGetCapabilitiesResponse_ReplyMockRecorder { + return m.recorder +} + +// isNodeGetCapabilitiesResponse_Reply mocks base method +func (m *MockisNodeGetCapabilitiesResponse_Reply) isNodeGetCapabilitiesResponse_Reply() { + m.ctrl.Call(m, "isNodeGetCapabilitiesResponse_Reply") +} + +// isNodeGetCapabilitiesResponse_Reply indicates an expected call of isNodeGetCapabilitiesResponse_Reply +func (mr *MockisNodeGetCapabilitiesResponse_ReplyMockRecorder) isNodeGetCapabilitiesResponse_Reply() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isNodeGetCapabilitiesResponse_Reply", reflect.TypeOf((*MockisNodeGetCapabilitiesResponse_Reply)(nil).isNodeGetCapabilitiesResponse_Reply)) +} + +// MockisNodeServiceCapability_Type is a mock of isNodeServiceCapability_Type interface +type MockisNodeServiceCapability_Type struct { + ctrl *gomock.Controller + recorder *MockisNodeServiceCapability_TypeMockRecorder +} + +// MockisNodeServiceCapability_TypeMockRecorder is the mock recorder for MockisNodeServiceCapability_Type +type MockisNodeServiceCapability_TypeMockRecorder struct { + mock *MockisNodeServiceCapability_Type +} + +// NewMockisNodeServiceCapability_Type creates a new mock instance +func NewMockisNodeServiceCapability_Type(ctrl *gomock.Controller) *MockisNodeServiceCapability_Type { + mock := &MockisNodeServiceCapability_Type{ctrl: ctrl} + mock.recorder = &MockisNodeServiceCapability_TypeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisNodeServiceCapability_Type) EXPECT() *MockisNodeServiceCapability_TypeMockRecorder { + return m.recorder +} + +// isNodeServiceCapability_Type mocks base method +func (m *MockisNodeServiceCapability_Type) isNodeServiceCapability_Type() { + m.ctrl.Call(m, "isNodeServiceCapability_Type") +} + +// isNodeServiceCapability_Type indicates an expected call of isNodeServiceCapability_Type +func (mr *MockisNodeServiceCapability_TypeMockRecorder) isNodeServiceCapability_Type() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isNodeServiceCapability_Type", reflect.TypeOf((*MockisNodeServiceCapability_Type)(nil).isNodeServiceCapability_Type)) +} + +// MockisError_Value is a mock of isError_Value interface +type MockisError_Value struct { + ctrl *gomock.Controller + recorder *MockisError_ValueMockRecorder +} + +// MockisError_ValueMockRecorder is the mock recorder for MockisError_Value +type MockisError_ValueMockRecorder struct { + mock *MockisError_Value +} + +// NewMockisError_Value creates a new mock instance +func NewMockisError_Value(ctrl *gomock.Controller) *MockisError_Value { + mock := &MockisError_Value{ctrl: ctrl} + mock.recorder = &MockisError_ValueMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockisError_Value) EXPECT() *MockisError_ValueMockRecorder { + return m.recorder +} + +// isError_Value mocks base method +func (m *MockisError_Value) isError_Value() { + m.ctrl.Call(m, "isError_Value") +} + +// isError_Value indicates an expected call of isError_Value +func (mr *MockisError_ValueMockRecorder) isError_Value() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isError_Value", reflect.TypeOf((*MockisError_Value)(nil).isError_Value)) +} + +// MockIdentityClient is a mock of IdentityClient interface +type MockIdentityClient struct { + ctrl *gomock.Controller + recorder *MockIdentityClientMockRecorder +} + +// MockIdentityClientMockRecorder is the mock recorder for MockIdentityClient +type MockIdentityClientMockRecorder struct { + mock *MockIdentityClient +} + +// NewMockIdentityClient creates a new mock instance +func NewMockIdentityClient(ctrl *gomock.Controller) *MockIdentityClient { + mock := &MockIdentityClient{ctrl: ctrl} + mock.recorder = &MockIdentityClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityClient) EXPECT() *MockIdentityClientMockRecorder { + return m.recorder +} + +// GetSupportedVersions mocks base method +func (m *MockIdentityClient) GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSupportedVersions", varargs...) + ret0, _ := ret[0].(*GetSupportedVersionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSupportedVersions indicates an expected call of GetSupportedVersions +func (mr *MockIdentityClientMockRecorder) GetSupportedVersions(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVersions", reflect.TypeOf((*MockIdentityClient)(nil).GetSupportedVersions), varargs...) +} + +// GetPluginInfo mocks base method +func (m *MockIdentityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPluginInfo", varargs...) + ret0, _ := ret[0].(*GetPluginInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityClientMockRecorder) GetPluginInfo(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityClient)(nil).GetPluginInfo), varargs...) +} + +// MockIdentityServer is a mock of IdentityServer interface +type MockIdentityServer struct { + ctrl *gomock.Controller + recorder *MockIdentityServerMockRecorder +} + +// MockIdentityServerMockRecorder is the mock recorder for MockIdentityServer +type MockIdentityServerMockRecorder struct { + mock *MockIdentityServer +} + +// NewMockIdentityServer creates a new mock instance +func NewMockIdentityServer(ctrl *gomock.Controller) *MockIdentityServer { + mock := &MockIdentityServer{ctrl: ctrl} + mock.recorder = &MockIdentityServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { + return m.recorder +} + +// GetSupportedVersions mocks base method +func (m *MockIdentityServer) GetSupportedVersions(arg0 context.Context, arg1 *GetSupportedVersionsRequest) (*GetSupportedVersionsResponse, error) { + ret := m.ctrl.Call(m, "GetSupportedVersions", arg0, arg1) + ret0, _ := ret[0].(*GetSupportedVersionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSupportedVersions indicates an expected call of GetSupportedVersions +func (mr *MockIdentityServerMockRecorder) GetSupportedVersions(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVersions", reflect.TypeOf((*MockIdentityServer)(nil).GetSupportedVersions), arg0, arg1) +} + +// GetPluginInfo mocks base method +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { + ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) + ret0, _ := ret[0].(*GetPluginInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) +} + +// MockControllerClient is a mock of ControllerClient interface +type MockControllerClient struct { + ctrl *gomock.Controller + recorder *MockControllerClientMockRecorder +} + +// MockControllerClientMockRecorder is the mock recorder for MockControllerClient +type MockControllerClientMockRecorder struct { + mock *MockControllerClient +} + +// NewMockControllerClient creates a new mock instance +func NewMockControllerClient(ctrl *gomock.Controller) *MockControllerClient { + mock := &MockControllerClient{ctrl: ctrl} + mock.recorder = &MockControllerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerClient) EXPECT() *MockControllerClientMockRecorder { + return m.recorder +} + +// CreateVolume mocks base method +func (m *MockControllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateVolume", varargs...) + ret0, _ := ret[0].(*CreateVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerClientMockRecorder) CreateVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerClient)(nil).CreateVolume), varargs...) +} + +// DeleteVolume mocks base method +func (m *MockControllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteVolume", varargs...) + ret0, _ := ret[0].(*DeleteVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerClientMockRecorder) DeleteVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerClient)(nil).DeleteVolume), varargs...) +} + +// ControllerPublishVolume mocks base method +func (m *MockControllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ControllerPublishVolume", varargs...) + ret0, _ := ret[0].(*ControllerPublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerClientMockRecorder) ControllerPublishVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerClient)(nil).ControllerPublishVolume), varargs...) +} + +// ControllerUnpublishVolume mocks base method +func (m *MockControllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", varargs...) + ret0, _ := ret[0].(*ControllerUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerClientMockRecorder) ControllerUnpublishVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerClient)(nil).ControllerUnpublishVolume), varargs...) +} + +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", varargs...) + ret0, _ := ret[0].(*ValidateVolumeCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerClientMockRecorder) ValidateVolumeCapabilities(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerClient)(nil).ValidateVolumeCapabilities), varargs...) +} + +// ListVolumes mocks base method +func (m *MockControllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListVolumes", varargs...) + ret0, _ := ret[0].(*ListVolumesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerClientMockRecorder) ListVolumes(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerClient)(nil).ListVolumes), varargs...) +} + +// GetCapacity mocks base method +func (m *MockControllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetCapacity", varargs...) + ret0, _ := ret[0].(*GetCapacityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerClientMockRecorder) GetCapacity(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerClient)(nil).GetCapacity), varargs...) +} + +// ControllerGetCapabilities mocks base method +func (m *MockControllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ControllerGetCapabilities", varargs...) + ret0, _ := ret[0].(*ControllerGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerClientMockRecorder) ControllerGetCapabilities(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerClient)(nil).ControllerGetCapabilities), varargs...) +} + +// MockControllerServer is a mock of ControllerServer interface +type MockControllerServer struct { + ctrl *gomock.Controller + recorder *MockControllerServerMockRecorder +} + +// MockControllerServerMockRecorder is the mock recorder for MockControllerServer +type MockControllerServerMockRecorder struct { + mock *MockControllerServer +} + +// NewMockControllerServer creates a new mock instance +func NewMockControllerServer(ctrl *gomock.Controller) *MockControllerServer { + mock := &MockControllerServer{ctrl: ctrl} + mock.recorder = &MockControllerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { + return m.recorder +} + +// CreateVolume mocks base method +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *CreateVolumeRequest) (*CreateVolumeResponse, error) { + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) + ret0, _ := ret[0].(*CreateVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) +} + +// DeleteVolume mocks base method +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { + ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) + ret0, _ := ret[0].(*DeleteVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerServer)(nil).DeleteVolume), arg0, arg1) +} + +// ControllerPublishVolume mocks base method +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) + ret0, _ := ret[0].(*ControllerPublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerPublishVolume), arg0, arg1) +} + +// ControllerUnpublishVolume mocks base method +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*ControllerUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) +} + +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) + ret0, _ := ret[0].(*ValidateVolumeCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerServerMockRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ValidateVolumeCapabilities), arg0, arg1) +} + +// ListVolumes mocks base method +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *ListVolumesRequest) (*ListVolumesResponse, error) { + ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) + ret0, _ := ret[0].(*ListVolumesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) +} + +// GetCapacity mocks base method +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *GetCapacityRequest) (*GetCapacityResponse, error) { + ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) + ret0, _ := ret[0].(*GetCapacityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) +} + +// ControllerGetCapabilities mocks base method +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*ControllerGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) +} + +// MockNodeClient is a mock of NodeClient interface +type MockNodeClient struct { + ctrl *gomock.Controller + recorder *MockNodeClientMockRecorder +} + +// MockNodeClientMockRecorder is the mock recorder for MockNodeClient +type MockNodeClientMockRecorder struct { + mock *MockNodeClient +} + +// NewMockNodeClient creates a new mock instance +func NewMockNodeClient(ctrl *gomock.Controller) *MockNodeClient { + mock := &MockNodeClient{ctrl: ctrl} + mock.recorder = &MockNodeClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeClient) EXPECT() *MockNodeClientMockRecorder { + return m.recorder +} + +// NodePublishVolume mocks base method +func (m *MockNodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "NodePublishVolume", varargs...) + ret0, _ := ret[0].(*NodePublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeClientMockRecorder) NodePublishVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeClient)(nil).NodePublishVolume), varargs...) +} + +// NodeUnpublishVolume mocks base method +func (m *MockNodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "NodeUnpublishVolume", varargs...) + ret0, _ := ret[0].(*NodeUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeClientMockRecorder) NodeUnpublishVolume(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeClient)(nil).NodeUnpublishVolume), varargs...) +} + +// GetNodeID mocks base method +func (m *MockNodeClient) GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetNodeID", varargs...) + ret0, _ := ret[0].(*GetNodeIDResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeID indicates an expected call of GetNodeID +func (mr *MockNodeClientMockRecorder) GetNodeID(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockNodeClient)(nil).GetNodeID), varargs...) +} + +// ProbeNode mocks base method +func (m *MockNodeClient) ProbeNode(ctx context.Context, in *ProbeNodeRequest, opts ...grpc.CallOption) (*ProbeNodeResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ProbeNode", varargs...) + ret0, _ := ret[0].(*ProbeNodeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProbeNode indicates an expected call of ProbeNode +func (mr *MockNodeClientMockRecorder) ProbeNode(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProbeNode", reflect.TypeOf((*MockNodeClient)(nil).ProbeNode), varargs...) +} + +// NodeGetCapabilities mocks base method +func (m *MockNodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "NodeGetCapabilities", varargs...) + ret0, _ := ret[0].(*NodeGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeClientMockRecorder) NodeGetCapabilities(ctx, in interface{}, opts ...interface{}) *gomock.Call { + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeClient)(nil).NodeGetCapabilities), varargs...) +} + +// MockNodeServer is a mock of NodeServer interface +type MockNodeServer struct { + ctrl *gomock.Controller + recorder *MockNodeServerMockRecorder +} + +// MockNodeServerMockRecorder is the mock recorder for MockNodeServer +type MockNodeServerMockRecorder struct { + mock *MockNodeServer +} + +// NewMockNodeServer creates a new mock instance +func NewMockNodeServer(ctrl *gomock.Controller) *MockNodeServer { + mock := &MockNodeServer{ctrl: ctrl} + mock.recorder = &MockNodeServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { + return m.recorder +} + +// NodePublishVolume mocks base method +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) + ret0, _ := ret[0].(*NodePublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) +} + +// NodeUnpublishVolume mocks base method +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*NodeUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) +} + +// GetNodeID mocks base method +func (m *MockNodeServer) GetNodeID(arg0 context.Context, arg1 *GetNodeIDRequest) (*GetNodeIDResponse, error) { + ret := m.ctrl.Call(m, "GetNodeID", arg0, arg1) + ret0, _ := ret[0].(*GetNodeIDResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeID indicates an expected call of GetNodeID +func (mr *MockNodeServerMockRecorder) GetNodeID(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockNodeServer)(nil).GetNodeID), arg0, arg1) +} + +// ProbeNode mocks base method +func (m *MockNodeServer) ProbeNode(arg0 context.Context, arg1 *ProbeNodeRequest) (*ProbeNodeResponse, error) { + ret := m.ctrl.Call(m, "ProbeNode", arg0, arg1) + ret0, _ := ret[0].(*ProbeNodeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProbeNode indicates an expected call of ProbeNode +func (mr *MockNodeServerMockRecorder) ProbeNode(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProbeNode", reflect.TypeOf((*MockNodeServer)(nil).ProbeNode), arg0, arg1) +} + +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*NodeGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.lock b/vendor/github.com/kubernetes-csi/csi-test/glide.lock new file mode 100644 index 000000000..d42e8fb4f --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/glide.lock @@ -0,0 +1,67 @@ +hash: da66d2470299478281bcfc7dc7a92894b909cc6969c9fcc187e5f6f8f98cc1a6 +updated: 2017-10-23T15:41:39.748284252-04:00 +imports: +- name: github.com/container-storage-interface/spec + version: d68da7a7c330a88253e26c06b7c157ec8119d5d6 + subpackages: + - lib/go/csi +- name: github.com/golang/protobuf + version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + subpackages: + - proto + - protoc-gen-go/descriptor + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: golang.org/x/net + version: 5561cd9b4330353950f399814f427425c0a26fd2 + subpackages: + - context + - http2 + - http2/hpack + - idna + - internal/timeseries + - lex/httplex + - trace +- name: golang.org/x/text + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + subpackages: + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: google.golang.org/genproto + version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 + subpackages: + - googleapis/rpc/status +- name: google.golang.org/grpc + version: 1687ce5770e998bcac6a136af6b52f079b9d902b + subpackages: + - balancer + - balancer/roundrobin + - codes + - connectivity + - credentials + - grpclb/grpc_lb_v1/messages + - grpclog + - internal + - keepalive + - metadata + - naming + - peer + - reflection + - reflection/grpc_reflection_v1alpha + - resolver + - resolver/dns + - resolver/passthrough + - stats + - status + - tap + - transport +testImports: +- name: github.com/golang/mock + version: 61503c535dc549c7ffc1ff07902345658a0f20a2 + subpackages: + - gomock + - mockgen diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml new file mode 100644 index 000000000..b04e40ed7 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml @@ -0,0 +1,16 @@ +package: github.com/kubernetes-csi/csi-test +import: +- package: github.com/container-storage-interface/spec + subpackages: + - lib/go/csi +- package: google.golang.org/grpc + subpackages: + - reflection +testImport: +- package: github.com/golang/mock + subpackages: + - gomock + - mockgen +- package: golang.org/x/net + subpackages: + - context diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go new file mode 100644 index 000000000..3f5bd44ca --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package test + +import ( + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" + mock_driver "github.com/kubernetes-csi/csi-test/driver" + mock_utils "github.com/kubernetes-csi/csi-test/utils" + "golang.org/x/net/context" +) + +func TestPluginInfoResponse(t *testing.T) { + + // Setup mock + m := gomock.NewController(t) + defer m.Finish() + driver := mock_driver.NewMockIdentityServer(m) + + // Setup input + in := &csi.GetPluginInfoRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } + + // Setup mock outout + out := &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Result_{ + Result: &csi.GetPluginInfoResponse_Result{ + Name: "mock", + VendorVersion: "0.1.1", + Manifest: map[string]string{ + "hello": "world", + }, + }, + }, + } + + // Setup expectation + driver.EXPECT().GetPluginInfo(nil, in).Return(out, nil).Times(1) + + // Actual call + r, err := driver.GetPluginInfo(nil, in) + name := r.GetResult().GetName() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + if name != "mock" { + t.Errorf("Unknown name: %s\n", name) + } +} + +func TestGRPCGetPluginInfoReponse(t *testing.T) { + + // Setup mock + m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) + defer m.Finish() + driver := mock_driver.NewMockIdentityServer(m) + + // Setup input + in := &csi.GetPluginInfoRequest{ + Version: &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + } + + // Setup mock outout + out := &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Result_{ + Result: &csi.GetPluginInfoResponse_Result{ + Name: "mock", + VendorVersion: "0.1.1", + Manifest: map[string]string{ + "hello": "world", + }, + }, + }, + } + + // Setup expectation + // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value + driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) + + // Create a new RPC + server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ + Identity: driver, + }) + conn, err := server.Nexus() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer server.Close() + + // Make call + c := csi.NewIdentityClient(conn) + r, err := c.GetPluginInfo(context.Background(), in) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + name := r.GetResult().GetName() + if name != "mock" { + t.Errorf("Unknown name: %s\n", name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go new file mode 100644 index 000000000..989ee0b9b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package test + +import ( + "net" + "sync" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +// Example simple driver +// This example assumes that your driver will create the server and listen on +// some unix domain socket or port for tests. +type simpleDriver struct { + listener net.Listener + server *grpc.Server + wg sync.WaitGroup +} + +func (s *simpleDriver) GetSupportedVersions( + context.Context, *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { + return &csi.GetSupportedVersionsResponse{ + Reply: &csi.GetSupportedVersionsResponse_Result_{ + Result: &csi.GetSupportedVersionsResponse_Result{ + SupportedVersions: []*csi.Version{ + &csi.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + }, + }, + }, + }, nil +} + +func (s *simpleDriver) GetPluginInfo( + context.Context, *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + return &csi.GetPluginInfoResponse{ + Reply: &csi.GetPluginInfoResponse_Result_{ + Result: &csi.GetPluginInfoResponse_Result{ + Name: "simpleDriver", + VendorVersion: "0.1.1", + Manifest: map[string]string{ + "hello": "world", + }, + }, + }, + }, nil +} + +func (s *simpleDriver) goServe() { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.server.Serve(s.listener) + }() +} + +func (s *simpleDriver) Address() string { + return s.listener.Addr().String() +} + +func (s *simpleDriver) Start() error { + // Listen on a port assigned by the net package + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + s.listener = l + + // Create a new grpc server + s.server = grpc.NewServer() + + csi.RegisterIdentityServer(s.server, s) + reflection.Register(s.server) + + // Start listening for requests + s.goServe() + return nil +} + +func (s *simpleDriver) Stop() { + s.server.Stop() + s.wg.Wait() +} + +// +// Tests +// +func TestSimpleDriver(t *testing.T) { + + // Setup simple driver + s := &simpleDriver{} + err := s.Start() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer s.Stop() + + // Setup a connection to the driver + conn, err := grpc.Dial(s.Address(), grpc.WithInsecure()) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer conn.Close() + + // Make a call + c := csi.NewIdentityClient(conn) + r, err := c.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{}) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + // Verify + name := r.GetResult().GetName() + if name != "simpleDriver" { + t.Errorf("Unknown name: %s\n", name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go new file mode 100644 index 000000000..c89a5cf1d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import "fmt" + +// SafeGoroutineTester is an implementation of the mock ... interface +// which can be used to use the mock functions in another go routine. +// +// The major issue is that the golang mock framework uses t.Fatalf() +// which causes a deadlock when called in another goroutine. To avoid +// this issue, this simple implementation prints the error then panics, +// which avoids the deadlock. +type SafeGoroutineTester struct{} + +// Errorf prints the error to the screen then panics +func (s *SafeGoroutineTester) Errorf(format string, args ...interface{}) { + fmt.Printf(format, args) + panic("MOCK TEST ERROR") +} + +// Fatalf prints the error to the screen then panics +func (s *SafeGoroutineTester) Fatalf(format string, args ...interface{}) { + fmt.Printf(format+"\n", args...) + panic("MOCK TEST FATAL FAILURE") +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go index 19b260dea..c7244b782 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -789,6 +789,12 @@ func (m *Message) Unpack(msg []byte) error { // Pack packs a full Message. func (m *Message) Pack() ([]byte, error) { + return m.AppendPack(make([]byte, 0, packStartingCap)) +} + +// AppendPack is like Pack but appends the full Message to b and returns the +// extended buffer. +func (m *Message) AppendPack(b []byte) ([]byte, error) { // Validate the lengths. It is very unlikely that anyone will try to // pack more than 65535 of any particular type, but it is possible and // we should fail gracefully. @@ -813,9 +819,7 @@ func (m *Message) Pack() ([]byte, error) { h.authorities = uint16(len(m.Authorities)) h.additionals = uint16(len(m.Additionals)) - msg := make([]byte, 0, packStartingCap) - - msg = h.pack(msg) + msg := h.pack(b) // RFC 1035 allows (but does not require) compression for packing. RFC // 1035 requires unpacking implementations to support compression, so diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go index 9295d36ce..2bb763420 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -852,6 +852,31 @@ func smallTestMsg() Message { } } +func BenchmarkPack(b *testing.B) { + msg := largeTestMsg() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.Pack(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkAppendPack(b *testing.B) { + msg := largeTestMsg() + buf := make([]byte, 0, packStartingCap) + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if _, err := msg.AppendPack(buf[:0]); err != nil { + b.Fatal(err) + } + } +} + func largeTestMsg() Message { name := mustNewName("foo.bar.example.com.") return Message{ diff --git a/vendor/golang.org/x/sys/unix/linux/Dockerfile b/vendor/golang.org/x/sys/unix/linux/Dockerfile index c448b9dde..7fe5fc3c4 100644 --- a/vendor/golang.org/x/sys/unix/linux/Dockerfile +++ b/vendor/golang.org/x/sys/unix/linux/Dockerfile @@ -17,10 +17,10 @@ RUN git clone --branch v4.13 --depth 1 https://kernel.googlesource.com/pub/scm/l # GNU C library: Released 02 Aug 2017 (we should try to get a secure way to clone this) RUN git clone --branch glibc-2.26 --depth 1 git://sourceware.org/git/glibc.git -# Get Go 1.8 (https://github.com/docker-library/golang/blob/master/1.8/Dockerfile) -ENV GOLANG_VERSION 1.8 +# Get Go 1.9.2 +ENV GOLANG_VERSION 1.9.2 ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz -ENV GOLANG_DOWNLOAD_SHA256 53ab94104ee3923e228a2cb2116e5e462ad3ebaeea06ff04463479d7f12d27ca +ENV GOLANG_DOWNLOAD_SHA256 de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \ && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \ @@ -33,7 +33,7 @@ ENV PATH /usr/local/go/bin:$PATH RUN apt-get update && apt-get install -y \ gawk make python \ gcc gcc-multilib \ - gettext texinfo \ + gettext texinfo \ && rm -rf /var/lib/apt/lists/* # Emulator and cross compilers RUN apt-get update && apt-get install -y \ diff --git a/vendor/golang.org/x/sys/unix/linux/mkall.go b/vendor/golang.org/x/sys/unix/linux/mkall.go index 184dad735..89b2fe886 100644 --- a/vendor/golang.org/x/sys/unix/linux/mkall.go +++ b/vendor/golang.org/x/sys/unix/linux/mkall.go @@ -178,6 +178,7 @@ func main() { for _, p := range ptracePairs { if err := generatePtracePair(p.a1, p.a2); err != nil { fmt.Printf("%v\n***** FAILURE: %s/%s *****\n\n", err, p.a1, p.a2) + ok = false } } if ok { @@ -423,7 +424,7 @@ func generatePtracePair(arch1, arch2 string) error { return err } buf := bufio.NewWriter(f) - fmt.Fprintf(buf, "// Code generated by linux/mkall.go generatePtracePair(%s, %s). DO NOT EDIT.\n", arch1, arch2) + fmt.Fprintf(buf, "// Code generated by linux/mkall.go generatePtracePair(%s, %s). DO NOT EDIT.\n", arch1, arch2) fmt.Fprintf(buf, "\n") fmt.Fprintf(buf, "// +build linux\n") fmt.Fprintf(buf, "// +build %s %s\n", arch1, arch2) diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index d048da8ef..857d2a42d 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -49,19 +49,3 @@ func BytePtrFromString(s string) (*byte, error) { // Single-word zero for use when we need a valid pointer to 0 bytes. // See mkunix.pl. var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 408e63081..a159fc4b6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -102,6 +102,15 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -243,7 +252,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // nfssvc // nnpfspioctl // openat -// poll // preadv // profil // pwritev diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 80be7902b..3ab9e07c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -578,6 +578,15 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { return &value, err } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index e46382467..139fbbebb 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -38,3 +38,25 @@ func NsecToTimeval(nsec int64) Timeval { } return setTimeval(sec, usec) } + +// Unix returns ts as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +// Unix returns tv as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 392da69bf..44ea7c028 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -17,6 +17,7 @@ package unix #define KERNEL #include #include +#include #include #include #include @@ -249,3 +250,20 @@ const ( AT_FDCWD = C.AT_FDCWD AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index 6d7461401..f77715544 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -24,6 +24,7 @@ package unix #include #include #include +#include #include #include #include @@ -263,3 +264,20 @@ type Termios C.struct_termios type Termio C.struct_termio type Winsize C.struct_winsize + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index b40d9d855..bb8a7724b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1277,7 +1277,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dea0767c3..cf0b2249f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1278,7 +1278,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 41ecde00d..57cfcf3fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1282,7 +1282,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 0b4dbcaf1..b6e5b090e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1188,7 +1188,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1268,7 +1268,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index e7ccdd964..0113e1f6a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c7150a90c..6857657a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 0cd94a1b7..14f7e0e05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index dc7e101d9..f795862d8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76876ccce..2544c4b63 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1189,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1335,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9d68b5bd2..133bdf584 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1189,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1335,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e9419e944..b921fb17a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1186,7 +1186,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1339,7 +1339,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go index a55035927..2d21c49e1 100644 --- a/vendor/golang.org/x/sys/unix/zptrace386_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace386_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go index 630dc0c0d..faf23bbed 100644 --- a/vendor/golang.org/x/sys/unix/zptracearm_linux.go +++ b/vendor/golang.org/x/sys/unix/zptracearm_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go index 23f662acd..c431131e6 100644 --- a/vendor/golang.org/x/sys/unix/zptracemips_linux.go +++ b/vendor/golang.org/x/sys/unix/zptracemips_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go index 057caecf2..dc3d6d373 100644 --- a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 7baea87c7..f135b65fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d69ce6b5..d2bd72ddc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 41572c26e..317f8c24c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 98b266550..1d4527649 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -29,6 +29,7 @@ import ( //go:cgo_import_dynamic libc___major __major "libc.so" //go:cgo_import_dynamic libc___minor __minor "libc.so" //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" +//go:cgo_import_dynamic libc_poll poll "libc.so" //go:cgo_import_dynamic libc_access access "libc.so" //go:cgo_import_dynamic libc_adjtime adjtime "libc.so" //go:cgo_import_dynamic libc_chdir chdir "libc.so" @@ -153,6 +154,7 @@ import ( //go:linkname proc__major libc___major //go:linkname proc__minor libc___minor //go:linkname procioctl libc_ioctl +//go:linkname procpoll libc_poll //go:linkname procAccess libc_access //go:linkname procAdjtime libc_adjtime //go:linkname procChdir libc_chdir @@ -278,6 +280,7 @@ var ( proc__major, proc__minor, procioctl, + procpoll, procAccess, procAdjtime, procChdir, @@ -557,6 +560,15 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 22bdab961..207c408e7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -652,8 +652,6 @@ type Sigset_t struct { X__val [16]uint64 } -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 20fc9f450..af295c3d0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,openbsd @@ -444,3 +444,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 46fe9490c..ae153e70c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,openbsd @@ -451,3 +451,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 62e1f7c04..35bb6195b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,openbsd @@ -437,3 +437,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index a979a33d5..c2280a579 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -438,3 +438,22 @@ type Winsize struct { Xpixel uint16 Ypixel uint16 } + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/google.golang.org/genproto/.travis.yml b/vendor/google.golang.org/genproto/.travis.yml new file mode 100644 index 000000000..e673e05e6 --- /dev/null +++ b/vendor/google.golang.org/genproto/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.6 + - 1.7 + - 1.8 +go_import_path: google.golang.org/genproto + +script: +- go test -v ./... +- if [[ "$TRAVIS_BRANCH" == "master" && "$TRAVIS_PULL_REQUEST" == "false" ]]; then + go get -u -v cloud.google.com/go/...; + fi diff --git a/vendor/google.golang.org/genproto/CONTRIBUTING.md b/vendor/google.golang.org/genproto/CONTRIBUTING.md new file mode 100644 index 000000000..2827b7d3f --- /dev/null +++ b/vendor/google.golang.org/genproto/CONTRIBUTING.md @@ -0,0 +1,27 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement] +(https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use Github pull requests for this purpose. + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement] +(https://cla.developers.google.com/about/google-corporate). diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/README.md b/vendor/google.golang.org/genproto/README.md new file mode 100644 index 000000000..51d0bae5f --- /dev/null +++ b/vendor/google.golang.org/genproto/README.md @@ -0,0 +1,28 @@ +Go generated proto packages +=========================== + +[![Build Status](https://travis-ci.org/google/go-genproto.svg?branch=master)](https://travis-ci.org/google/go-genproto) +[![GoDoc](https://godoc.org/google.golang.org/genproto?status.svg)](https://godoc.org/google.golang.org/genproto) + +> **IMPORTANT** This repository is currently experimental. The structure +> of the contained packages is subject to change. Please see the original +> source repositories (listed below) to find out the status of the each +> protocol buffer's associated service. + +This repository contains the generated Go packages for common protocol buffer +types, and the generated [gRPC][1] code necessary for interacting with Google's gRPC +APIs. + +There are two sources for the proto files used in this repository: + +1. [google/protobuf][2]: the code in the `protobuf` and `ptypes` subdirectories + is derived from this repo. The messages in `protobuf` are used to describe + protocol buffer messages themselves. The messages under `ptypes` define the + common well-known types. +2. [googleapis/googleapis][3]: the code in the `googleapis` is derived from this + repo. The packages here contain types specifically for interacting with Google + APIs. + +[1]: http://grpc.io +[2]: https://github.com/google/protobuf/ +[3]: https://github.com/googleapis/googleapis/ diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 000000000..53d57f67a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +/* +Package annotations is a generated protocol buffer package. + +It is generated from these files: + google/api/annotations.proto + google/api/http.proto + +It has these top-level messages: + Http + HttpRule + CustomHttpPattern +*/ +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 000000000..f91c60462 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Defines the HTTP configuration for a service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST APIs. The mapping determines what portions of the request +// message are populated from the path, query parameters, or body of +// the HTTP request. The mapping is typically specified as an +// `google.api.http` annotation, see "google/api/annotations.proto" +// for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it assumes there is no HTTP body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. It follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion. +// +// The syntax `**` matches zero or more path segments. It follows the semantics +// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved +// Expansion. NOTE: it must be the last segment in the path except the Verb. +// +// The syntax `LITERAL` matches literal text in the URL path. +// +// The syntax `Variable` matches the entire path as specified by its template; +// this nested template must not contain further variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +// +// Use CustomHttpPattern to specify any HTTP method that is not included in the +// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for +// a given URL path rule. The wild-card rule is useful for services that provide +// content to Web (HTML) clients. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,oneof"` +} +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,oneof"` +} +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,oneof"` +} +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` +} +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` +} +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} +func (*HttpRule_Put) isHttpRule_Pattern() {} +func (*HttpRule_Post) isHttpRule_Pattern() {} +func (*HttpRule_Delete) isHttpRule_Pattern() {} +func (*HttpRule_Patch) isHttpRule_Pattern() {} +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 359 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30, + 0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29, + 0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1, + 0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe, + 0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8, + 0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39, + 0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62, + 0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18, + 0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2, + 0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48, + 0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24, + 0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49, + 0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc, + 0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84, + 0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12, + 0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74, + 0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4, + 0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67, + 0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90, + 0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64, + 0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a, + 0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, + 0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go b/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go new file mode 100644 index 000000000..8062b9619 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/authorization_config.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/experimental/authorization_config.proto + +/* +Package api is a generated protocol buffer package. + +It is generated from these files: + google/api/experimental/authorization_config.proto + google/api/experimental/experimental.proto + +It has these top-level messages: + AuthorizationConfig + Experimental +*/ +package api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Configuration of authorization. +// +// This section determines the authorization provider, if unspecified, then no +// authorization check will be done. +// +// Example: +// +// experimental: +// authorization: +// provider: firebaserules.googleapis.com +type AuthorizationConfig struct { + // The name of the authorization provider, such as + // firebaserules.googleapis.com. + Provider string `protobuf:"bytes,1,opt,name=provider" json:"provider,omitempty"` +} + +func (m *AuthorizationConfig) Reset() { *m = AuthorizationConfig{} } +func (m *AuthorizationConfig) String() string { return proto.CompactTextString(m) } +func (*AuthorizationConfig) ProtoMessage() {} +func (*AuthorizationConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AuthorizationConfig) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func init() { + proto.RegisterType((*AuthorizationConfig)(nil), "google.api.AuthorizationConfig") +} + +func init() { proto.RegisterFile("google/api/experimental/authorization_config.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 180 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd, + 0x2b, 0x49, 0xcc, 0xd1, 0x4f, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0xac, 0x4a, 0x2c, 0xc9, 0xcc, + 0xcf, 0x8b, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, + 0x82, 0xe8, 0xd1, 0x4b, 0x2c, 0xc8, 0x54, 0x32, 0xe4, 0x12, 0x76, 0x44, 0x56, 0xe9, 0x0c, 0x56, + 0x28, 0x24, 0xc5, 0xc5, 0x51, 0x50, 0x94, 0x5f, 0x96, 0x99, 0x92, 0x5a, 0x24, 0xc1, 0xa8, 0xc0, + 0xa8, 0xc1, 0x19, 0x04, 0xe7, 0x3b, 0x25, 0x71, 0xf1, 0x25, 0xe7, 0xe7, 0xea, 0x21, 0x0c, 0x71, + 0x92, 0xc0, 0x62, 0x44, 0x00, 0xc8, 0xaa, 0x00, 0xc6, 0x28, 0x5d, 0xa8, 0xba, 0xf4, 0xfc, 0x9c, + 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x43, 0xf4, 0x21, 0x52, + 0x89, 0x05, 0x99, 0xc5, 0x20, 0xf7, 0x5b, 0x27, 0x16, 0x64, 0x2e, 0x62, 0x62, 0x71, 0x77, 0x0c, + 0xf0, 0x4c, 0x62, 0x03, 0x2b, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x52, 0x27, 0x0c, 0xba, + 0xdf, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/configchange/config_change.pb.go b/vendor/google.golang.org/genproto/googleapis/api/configchange/config_change.pb.go new file mode 100644 index 000000000..cf4150007 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/configchange/config_change.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/config_change.proto + +/* +Package configchange is a generated protocol buffer package. + +It is generated from these files: + google/api/config_change.proto + +It has these top-level messages: + ConfigChange + Advice +*/ +package configchange + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Classifies set of possible modifications to an object in the service +// configuration. +type ChangeType int32 + +const ( + // No value was provided. + ChangeType_CHANGE_TYPE_UNSPECIFIED ChangeType = 0 + // The changed object exists in the 'new' service configuration, but not + // in the 'old' service configuration. + ChangeType_ADDED ChangeType = 1 + // The changed object exists in the 'old' service configuration, but not + // in the 'new' service configuration. + ChangeType_REMOVED ChangeType = 2 + // The changed object exists in both service configurations, but its value + // is different. + ChangeType_MODIFIED ChangeType = 3 +) + +var ChangeType_name = map[int32]string{ + 0: "CHANGE_TYPE_UNSPECIFIED", + 1: "ADDED", + 2: "REMOVED", + 3: "MODIFIED", +} +var ChangeType_value = map[string]int32{ + "CHANGE_TYPE_UNSPECIFIED": 0, + "ADDED": 1, + "REMOVED": 2, + "MODIFIED": 3, +} + +func (x ChangeType) String() string { + return proto.EnumName(ChangeType_name, int32(x)) +} +func (ChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Output generated from semantically comparing two versions of a service +// configuration. +// +// Includes detailed information about a field that have changed with +// applicable advice about potential consequences for the change, such as +// backwards-incompatibility. +type ConfigChange struct { + // Object hierarchy path to the change, with levels separated by a '.' + // character. For repeated fields, an applicable unique identifier field is + // used for the index (usually selector, name, or id). For maps, the term + // 'key' is used. If the field has no unique identifier, the numeric index + // is used. + // Examples: + // - visibility.rules[selector=="google.LibraryService.CreateBook"].restriction + // - quota.metric_rules[selector=="google"].metric_costs[key=="reads"].value + // - logging.producer_destinations[0] + Element string `protobuf:"bytes,1,opt,name=element" json:"element,omitempty"` + // Value of the changed object in the old Service configuration, + // in JSON format. This field will not be populated if ChangeType == ADDED. + OldValue string `protobuf:"bytes,2,opt,name=old_value,json=oldValue" json:"old_value,omitempty"` + // Value of the changed object in the new Service configuration, + // in JSON format. This field will not be populated if ChangeType == REMOVED. + NewValue string `protobuf:"bytes,3,opt,name=new_value,json=newValue" json:"new_value,omitempty"` + // The type for this change, either ADDED, REMOVED, or MODIFIED. + ChangeType ChangeType `protobuf:"varint,4,opt,name=change_type,json=changeType,enum=google.api.ChangeType" json:"change_type,omitempty"` + // Collection of advice provided for this change, useful for determining the + // possible impact of this change. + Advices []*Advice `protobuf:"bytes,5,rep,name=advices" json:"advices,omitempty"` +} + +func (m *ConfigChange) Reset() { *m = ConfigChange{} } +func (m *ConfigChange) String() string { return proto.CompactTextString(m) } +func (*ConfigChange) ProtoMessage() {} +func (*ConfigChange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ConfigChange) GetElement() string { + if m != nil { + return m.Element + } + return "" +} + +func (m *ConfigChange) GetOldValue() string { + if m != nil { + return m.OldValue + } + return "" +} + +func (m *ConfigChange) GetNewValue() string { + if m != nil { + return m.NewValue + } + return "" +} + +func (m *ConfigChange) GetChangeType() ChangeType { + if m != nil { + return m.ChangeType + } + return ChangeType_CHANGE_TYPE_UNSPECIFIED +} + +func (m *ConfigChange) GetAdvices() []*Advice { + if m != nil { + return m.Advices + } + return nil +} + +// Generated advice about this change, used for providing more +// information about how a change will affect the existing service. +type Advice struct { + // Useful description for why this advice was applied and what actions should + // be taken to mitigate any implied risks. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *Advice) Reset() { *m = Advice{} } +func (m *Advice) String() string { return proto.CompactTextString(m) } +func (*Advice) ProtoMessage() {} +func (*Advice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Advice) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*ConfigChange)(nil), "google.api.ConfigChange") + proto.RegisterType((*Advice)(nil), "google.api.Advice") + proto.RegisterEnum("google.api.ChangeType", ChangeType_name, ChangeType_value) +} + +func init() { proto.RegisterFile("google/api/config_change.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xcd, 0x4e, 0xc2, 0x40, + 0x14, 0x85, 0x2d, 0xff, 0xdc, 0x12, 0x82, 0xb3, 0xd0, 0x26, 0x24, 0xa6, 0x61, 0x45, 0x88, 0x69, + 0x13, 0x5c, 0xb8, 0x70, 0x55, 0xda, 0x8a, 0x2c, 0x80, 0xa6, 0x22, 0x89, 0x6e, 0x9a, 0xb1, 0x1d, + 0xc7, 0x49, 0xca, 0xcc, 0x08, 0x15, 0xc2, 0xeb, 0xf8, 0x36, 0xbe, 0x95, 0xa1, 0x03, 0xd2, 0xdd, + 0x9c, 0xf9, 0xce, 0xcd, 0x3d, 0x39, 0x17, 0x6e, 0xa8, 0x10, 0x34, 0x25, 0x36, 0x96, 0xcc, 0x8e, + 0x05, 0xff, 0x60, 0x34, 0x8a, 0x3f, 0x31, 0xa7, 0xc4, 0x92, 0x6b, 0x91, 0x09, 0x04, 0x8a, 0x5b, + 0x58, 0xb2, 0xde, 0xaf, 0x06, 0x2d, 0x37, 0xf7, 0xb8, 0xb9, 0x05, 0x19, 0x50, 0x27, 0x29, 0x59, + 0x11, 0x9e, 0x19, 0x9a, 0xa9, 0xf5, 0x9b, 0xe1, 0x49, 0xa2, 0x2e, 0x34, 0x45, 0x9a, 0x44, 0x5b, + 0x9c, 0x7e, 0x13, 0xa3, 0x94, 0xb3, 0x86, 0x48, 0x93, 0xe5, 0x41, 0x1f, 0x20, 0x27, 0xbb, 0x23, + 0x2c, 0x2b, 0xc8, 0xc9, 0x4e, 0xc1, 0x7b, 0xd0, 0x55, 0x80, 0x28, 0xdb, 0x4b, 0x62, 0x54, 0x4c, + 0xad, 0xdf, 0x1e, 0x5e, 0x59, 0xe7, 0x18, 0x96, 0x5a, 0xbe, 0xd8, 0x4b, 0x12, 0x42, 0xfc, 0xff, + 0x46, 0xb7, 0x50, 0xc7, 0xc9, 0x96, 0xc5, 0x64, 0x63, 0x54, 0xcd, 0x72, 0x5f, 0x1f, 0xa2, 0xe2, + 0x90, 0x93, 0xa3, 0xf0, 0x64, 0xe9, 0x0d, 0xa0, 0xa6, 0xbe, 0x90, 0x09, 0x7a, 0x42, 0x36, 0xf1, + 0x9a, 0xc9, 0x8c, 0x09, 0x7e, 0x0c, 0x5b, 0xfc, 0x1a, 0xcc, 0x01, 0xce, 0x3b, 0x51, 0x17, 0xae, + 0xdd, 0x27, 0x67, 0x36, 0xf6, 0xa3, 0xc5, 0x6b, 0xe0, 0x47, 0x2f, 0xb3, 0xe7, 0xc0, 0x77, 0x27, + 0x8f, 0x13, 0xdf, 0xeb, 0x5c, 0xa0, 0x26, 0x54, 0x1d, 0xcf, 0xf3, 0xbd, 0x8e, 0x86, 0x74, 0xa8, + 0x87, 0xfe, 0x74, 0xbe, 0xf4, 0xbd, 0x4e, 0x09, 0xb5, 0xa0, 0x31, 0x9d, 0x7b, 0xca, 0x55, 0x1e, + 0x7d, 0x41, 0x3b, 0x16, 0xab, 0x42, 0xbc, 0xd1, 0x65, 0xb1, 0xd7, 0xe0, 0xd0, 0x7c, 0xa0, 0xbd, + 0xb9, 0x47, 0x03, 0x15, 0x29, 0xe6, 0xd4, 0x12, 0x6b, 0x6a, 0x53, 0xc2, 0xf3, 0xbb, 0xd8, 0x0a, + 0x61, 0xc9, 0x36, 0x85, 0xd3, 0xa9, 0x36, 0x1e, 0x8a, 0xe2, 0xa7, 0x54, 0x19, 0x3b, 0xc1, 0xe4, + 0xbd, 0x96, 0x8f, 0xdd, 0xfd, 0x05, 0x00, 0x00, 0xff, 0xff, 0x46, 0x8b, 0xd3, 0xf5, 0xf0, 0x01, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 000000000..68b702a13 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,506 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/distribution.proto + +/* +Package distribution is a generated protocol buffer package. + +It is generated from these files: + google/api/distribution.proto + +It has these top-level messages: + Distribution +*/ +package distribution + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "github.com/golang/protobuf/ptypes/any" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Distribution contains summary statistics for a population of values and, +// optionally, a histogram representing the distribution of those values across +// a specified set of histogram buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by specifying parameters for a method of computing them +// (buckets of fixed width or buckets of exponentially increasing width). +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + // The number of values in the population. Must be non-negative. + Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range" json:"range,omitempty"` + // Defines the histogram bucket boundaries. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions" json:"bucket_options,omitempty"` + // If `bucket_options` is given, then the sum of the values in `bucket_counts` + // must equal the value in `count`. If `bucket_options` is not given, no + // `bucket_counts` fields may be given. + // + // Bucket counts are given in order under the numbering scheme described + // above (the underflow bucket has number 0; the finite buckets, if any, + // have numbers 1 through N-2; the overflow bucket has number N-1). + // + // The size of `bucket_counts` must be no greater than N as defined in + // `bucket_options`. + // + // Any suffix of trailing zero bucket_count fields may be omitted. + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts" json:"bucket_counts,omitempty"` +} + +func (m *Distribution) Reset() { *m = Distribution{} } +func (m *Distribution) String() string { return proto.CompactTextString(m) } +func (*Distribution) ProtoMessage() {} +func (*Distribution) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Distribution) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Distribution) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *Distribution) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *Distribution) GetRange() *Distribution_Range { + if m != nil { + return m.Range + } + return nil +} + +func (m *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *Distribution) GetBucketCounts() []int64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max" json:"max,omitempty"` +} + +func (m *Distribution_Range) Reset() { *m = Distribution_Range{} } +func (m *Distribution_Range) String() string { return proto.CompactTextString(m) } +func (*Distribution_Range) ProtoMessage() {} +func (*Distribution_Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +func (m *Distribution_Range) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *Distribution_Range) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The histogram is given in `bucket_counts` as counts of values +// that fall into one of a sequence of non-overlapping buckets. The sequence +// of buckets is described by `bucket_options`. +// +// A bucket specifies an inclusive lower bound and exclusive upper bound for +// the values that are counted for that bucket. The upper bound of a bucket +// is strictly greater than the lower bound. +// +// The sequence of N buckets for a Distribution consists of an underflow +// bucket (number 0), zero or more finite buckets (number 1 through N - 2) and +// an overflow bucket (number N - 1). The buckets are contiguous: the lower +// bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. +// The buckets span the whole range of finite values: lower bound of the +// underflow bucket is -infinity and the upper bound of the overflow bucket is +// +infinity. The finite buckets are so-called because both bounds are +// finite. +// +// `BucketOptions` describes bucket boundaries in one of three ways. Two +// describe the boundaries by giving parameters for a formula to generate +// boundaries and one gives the bucket boundaries explicitly. +// +// If `bucket_boundaries` is not given, then no `bucket_counts` may be given. +type Distribution_BucketOptions struct { + // Exactly one of these three fields must be set. + // + // Types that are valid to be assigned to Options: + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` +} + +func (m *Distribution_BucketOptions) Reset() { *m = Distribution_BucketOptions{} } +func (m *Distribution_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions) ProtoMessage() {} +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,oneof"` +} +type Distribution_BucketOptions_ExponentialBuckets struct { + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,oneof"` +} +type Distribution_BucketOptions_ExplicitBuckets struct { + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Distribution_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Distribution_BucketOptions_OneofMarshaler, _Distribution_BucketOptions_OneofUnmarshaler, _Distribution_BucketOptions_OneofSizer, []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } +} + +func _Distribution_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LinearBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExponentialBuckets: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExponentialBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExplicitBuckets: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExplicitBuckets); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Distribution_BucketOptions.Options has unexpected type %T", x) + } + return nil +} + +func _Distribution_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Distribution_BucketOptions) + switch tag { + case 1: // options.linear_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Linear) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_LinearBuckets{msg} + return true, err + case 2: // options.exponential_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Exponential) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExponentialBuckets{msg} + return true, err + case 3: // options.explicit_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Explicit) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExplicitBuckets{msg} + return true, err + default: + return false, nil + } +} + +func _Distribution_BucketOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + s := proto.Size(x.LinearBuckets) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExponentialBuckets: + s := proto.Size(x.ExponentialBuckets) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExplicitBuckets: + s := proto.Size(x.ExplicitBuckets) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specify a sequence of buckets that all have the same width (except +// overflow and underflow). Each bucket represents a constant absolute +// uncertainty on the specific value in the bucket. +// +// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for +// bucket `i`: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset" json:"offset,omitempty"` +} + +func (m *Distribution_BucketOptions_Linear) Reset() { *m = Distribution_BucketOptions_Linear{} } +func (m *Distribution_BucketOptions_Linear) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 0} +} + +func (m *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetWidth() float64 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetOffset() float64 { + if m != nil { + return m.Offset + } + return 0 +} + +// Specify a sequence of buckets that have a width that is proportional to +// the value of the lower bound. Each bucket represents a constant relative +// uncertainty on a specific value in the bucket. +// +// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for +// bucket i: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale" json:"scale,omitempty"` +} + +func (m *Distribution_BucketOptions_Exponential) Reset() { + *m = Distribution_BucketOptions_Exponential{} +} +func (m *Distribution_BucketOptions_Exponential) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 1} +} + +func (m *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if m != nil { + return m.GrowthFactor + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetScale() float64 { + if m != nil { + return m.Scale + } + return 0 +} + +// A set of buckets with arbitrary widths. +// +// Defines `size(bounds) + 1` (= N) buckets with these boundaries for +// bucket i: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// There must be at least one element in `bounds`. If `bounds` has only one +// element, there are no finite buckets, and that single element is the +// common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds" json:"bounds,omitempty"` +} + +func (m *Distribution_BucketOptions_Explicit) Reset() { *m = Distribution_BucketOptions_Explicit{} } +func (m *Distribution_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1, 2} +} + +func (m *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +func init() { + proto.RegisterType((*Distribution)(nil), "google.api.Distribution") + proto.RegisterType((*Distribution_Range)(nil), "google.api.Distribution.Range") + proto.RegisterType((*Distribution_BucketOptions)(nil), "google.api.Distribution.BucketOptions") + proto.RegisterType((*Distribution_BucketOptions_Linear)(nil), "google.api.Distribution.BucketOptions.Linear") + proto.RegisterType((*Distribution_BucketOptions_Exponential)(nil), "google.api.Distribution.BucketOptions.Exponential") + proto.RegisterType((*Distribution_BucketOptions_Explicit)(nil), "google.api.Distribution.BucketOptions.Explicit") +} + +func init() { proto.RegisterFile("google/api/distribution.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 544 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x5d, 0x96, 0xb5, 0x85, 0xdb, 0x0f, 0x8a, 0x19, 0x28, 0x44, 0x7c, 0x54, 0x9b, 0x84, 0x2a, + 0x01, 0x89, 0x54, 0x90, 0x78, 0xe0, 0xad, 0x1b, 0x53, 0x1f, 0x40, 0x9b, 0x8c, 0xc4, 0x03, 0x42, + 0x8a, 0x9c, 0xc4, 0xc9, 0x0c, 0x89, 0x1d, 0x62, 0x67, 0x2b, 0xef, 0xfc, 0x29, 0xfe, 0x1d, 0x8a, + 0xed, 0x6e, 0x19, 0x08, 0xa9, 0xbc, 0xf9, 0xde, 0x73, 0x7c, 0xce, 0xb9, 0x57, 0x71, 0xe0, 0x71, + 0x2e, 0x44, 0x5e, 0xd0, 0x90, 0x54, 0x2c, 0x4c, 0x99, 0x54, 0x35, 0x8b, 0x1b, 0xc5, 0x04, 0x0f, + 0xaa, 0x5a, 0x28, 0x81, 0xc0, 0xc0, 0x01, 0xa9, 0x98, 0xff, 0xa8, 0x43, 0x25, 0x9c, 0x0b, 0x45, + 0x5a, 0xa2, 0x34, 0x4c, 0xff, 0xa1, 0x45, 0x75, 0x15, 0x37, 0x59, 0x48, 0xf8, 0x0f, 0x0b, 0x3d, + 0xfd, 0x13, 0x52, 0xac, 0xa4, 0x52, 0x91, 0xb2, 0x32, 0x84, 0x83, 0x9f, 0x03, 0x18, 0x1d, 0x77, + 0xcc, 0xd1, 0x3e, 0xf4, 0x12, 0xd1, 0x70, 0xe5, 0x39, 0x33, 0x67, 0xee, 0x62, 0x53, 0x20, 0x04, + 0x7b, 0x25, 0x25, 0xdc, 0xdb, 0x9d, 0x39, 0x73, 0x07, 0xeb, 0x33, 0x7a, 0x03, 0x9e, 0x6c, 0xca, + 0x48, 0x64, 0x91, 0xfc, 0xde, 0x90, 0x9a, 0xa6, 0x51, 0x4a, 0x2f, 0x98, 0x4e, 0xe6, 0xb9, 0x9a, + 0x77, 0x5f, 0x36, 0xe5, 0x69, 0xf6, 0xd1, 0xa0, 0xc7, 0x1b, 0x10, 0xbd, 0x86, 0x5e, 0x4d, 0x78, + 0x4e, 0xbd, 0xbd, 0x99, 0x33, 0x1f, 0x2e, 0x9e, 0x04, 0xd7, 0x93, 0x06, 0xdd, 0x2c, 0x01, 0x6e, + 0x59, 0xd8, 0x90, 0xd1, 0x07, 0x98, 0xc4, 0x4d, 0xf2, 0x8d, 0xaa, 0x48, 0x54, 0x7a, 0x7a, 0xaf, + 0xaf, 0xaf, 0x3f, 0xfb, 0xe7, 0xf5, 0xa5, 0xa6, 0x9f, 0x1a, 0x36, 0x1e, 0xc7, 0xdd, 0x12, 0x1d, + 0x82, 0x6d, 0x44, 0x7a, 0x42, 0xe9, 0x0d, 0x66, 0xee, 0xdc, 0xc5, 0x23, 0xd3, 0x3c, 0xd2, 0x3d, + 0xff, 0x39, 0xf4, 0x74, 0x06, 0x34, 0x05, 0xb7, 0x64, 0x5c, 0xef, 0xc4, 0xc1, 0xed, 0x51, 0x77, + 0xc8, 0xda, 0x2e, 0xa4, 0x3d, 0xfa, 0xbf, 0xf6, 0x60, 0x7c, 0xc3, 0x12, 0x7d, 0x82, 0x49, 0xc1, + 0x38, 0x25, 0x75, 0x64, 0x54, 0xa5, 0x16, 0x18, 0x2e, 0x5e, 0x6e, 0x17, 0x39, 0x78, 0xaf, 0x2f, + 0xaf, 0x76, 0xf0, 0xd8, 0xc8, 0x18, 0x54, 0x22, 0x0a, 0xf7, 0xe8, 0xba, 0x12, 0x9c, 0x72, 0xc5, + 0x48, 0x71, 0x25, 0xbe, 0xab, 0xc5, 0x17, 0x5b, 0x8a, 0xbf, 0xbb, 0x56, 0x58, 0xed, 0x60, 0xd4, + 0x11, 0xdc, 0xd8, 0x7c, 0x81, 0x29, 0x5d, 0x57, 0x05, 0x4b, 0x98, 0xba, 0xf2, 0x70, 0xb5, 0x47, + 0xb8, 0xbd, 0x87, 0xbe, 0xbe, 0xda, 0xc1, 0x77, 0x36, 0x52, 0x56, 0xdd, 0x4f, 0xa1, 0x6f, 0xe6, + 0x43, 0x2f, 0x00, 0xf1, 0xa6, 0x8c, 0x32, 0xc6, 0x99, 0xa2, 0x37, 0x56, 0xd5, 0xc3, 0x53, 0xde, + 0x94, 0x27, 0x1a, 0xd8, 0xa4, 0xda, 0x87, 0xde, 0x25, 0x4b, 0xd5, 0xb9, 0x5d, 0xbd, 0x29, 0xd0, + 0x03, 0xe8, 0x8b, 0x2c, 0x93, 0x54, 0xd9, 0x4f, 0xcf, 0x56, 0xfe, 0x05, 0x0c, 0x3b, 0x83, 0xfe, + 0xa7, 0xd5, 0x21, 0x8c, 0xf3, 0x5a, 0x5c, 0xaa, 0xf3, 0x28, 0x23, 0x89, 0x12, 0xb5, 0xb5, 0x1c, + 0x99, 0xe6, 0x89, 0xee, 0xb5, 0x79, 0x64, 0x42, 0x0a, 0x6a, 0x8d, 0x4d, 0xe1, 0x1f, 0xc0, 0xad, + 0xcd, 0xf0, 0x6d, 0xb6, 0x58, 0x34, 0x3c, 0x6d, 0x8d, 0xdc, 0x36, 0x9b, 0xa9, 0x96, 0xb7, 0x61, + 0x60, 0x3f, 0xe5, 0xe5, 0x57, 0x98, 0x24, 0xa2, 0xec, 0x6c, 0x75, 0x79, 0xb7, 0xbb, 0xd6, 0xb3, + 0xf6, 0xad, 0x9e, 0x39, 0x9f, 0x8f, 0x2c, 0x21, 0x17, 0x05, 0xe1, 0x79, 0x20, 0xea, 0x3c, 0xcc, + 0x29, 0xd7, 0x2f, 0x39, 0x34, 0x10, 0xa9, 0x98, 0xfc, 0xeb, 0x8f, 0xf2, 0xb6, 0x5b, 0xc4, 0x7d, + 0xcd, 0x7f, 0xf5, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x88, 0x8e, 0xc5, 0x4b, 0x80, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go b/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go new file mode 100644 index 000000000..a88df3d14 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/experimental.pb.go @@ -0,0 +1,56 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/experimental/experimental.proto + +package api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Experimental service configuration. These configuration options can +// only be used by whitelisted users. +type Experimental struct { + // Authorization configuration. + Authorization *AuthorizationConfig `protobuf:"bytes,8,opt,name=authorization" json:"authorization,omitempty"` +} + +func (m *Experimental) Reset() { *m = Experimental{} } +func (m *Experimental) String() string { return proto.CompactTextString(m) } +func (*Experimental) ProtoMessage() {} +func (*Experimental) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Experimental) GetAuthorization() *AuthorizationConfig { + if m != nil { + return m.Authorization + } + return nil +} + +func init() { + proto.RegisterType((*Experimental)(nil), "google.api.Experimental") +} + +func init() { proto.RegisterFile("google/api/experimental/experimental.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd, + 0x2b, 0x49, 0xcc, 0x41, 0xe1, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x41, 0xd4, 0xea, + 0x25, 0x16, 0x64, 0x4a, 0xc9, 0x20, 0xe9, 0x4b, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0x86, 0xa8, 0x94, 0x32, 0xc2, 0x65, 0x6a, 0x62, 0x69, 0x49, 0x46, 0x7e, 0x51, 0x66, + 0x15, 0x58, 0x75, 0x7c, 0x72, 0x7e, 0x5e, 0x5a, 0x66, 0x3a, 0x44, 0x8f, 0x52, 0x28, 0x17, 0x8f, + 0x2b, 0x92, 0x52, 0x21, 0x57, 0x2e, 0x5e, 0x14, 0xd5, 0x12, 0x1c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, + 0xf2, 0x7a, 0x08, 0x57, 0xe8, 0x39, 0x22, 0x2b, 0x70, 0x06, 0x9b, 0x16, 0x84, 0xaa, 0xcb, 0x29, + 0x9a, 0x8b, 0x2f, 0x39, 0x3f, 0x17, 0x49, 0x93, 0x93, 0x20, 0xb2, 0x35, 0x01, 0x20, 0xbb, 0x03, + 0x18, 0xa3, 0x74, 0xa1, 0x0a, 0xd2, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, + 0xd3, 0x53, 0xf3, 0xc0, 0x2e, 0xd3, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x83, 0x3c, 0x64, 0x9d, + 0x58, 0x90, 0xb9, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10, + 0x00, 0x00, 0xff, 0xff, 0xa0, 0x95, 0x20, 0xe5, 0x46, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 000000000..c3e12e5eb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/httpbody.proto + +/* +Package httpbody is a generated protocol buffer package. + +It is generated from these files: + google/api/httpbody.proto + +It has these top-level messages: + HttpBody +*/ +package httpbody + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + // The HTTP Content-Type string representing the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType" json:"content_type,omitempty"` + // HTTP body binary data. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *HttpBody) Reset() { *m = HttpBody{} } +func (m *HttpBody) String() string { return proto.CompactTextString(m) } +func (*HttpBody) ProtoMessage() {} +func (*HttpBody) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HttpBody) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *HttpBody) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody") +} + +func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 181 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x28, 0x29, 0x29, 0x48, 0xca, 0x4f, 0xa9, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe9, 0x25, 0x16, 0x64, 0x2a, 0x39, 0x72, + 0x71, 0x78, 0x94, 0x94, 0x14, 0x38, 0xe5, 0xa7, 0x54, 0x0a, 0x29, 0x72, 0xf1, 0x24, 0xe7, 0xe7, + 0x95, 0xa4, 0xe6, 0x95, 0xc4, 0x97, 0x54, 0x16, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, + 0x71, 0x43, 0xc5, 0x42, 0x2a, 0x0b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, + 0x98, 0x14, 0x18, 0x35, 0x78, 0x82, 0xc0, 0x6c, 0xa7, 0x54, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, + 0x84, 0xa1, 0x4e, 0xbc, 0x30, 0x23, 0x03, 0x40, 0xf6, 0x05, 0x30, 0x46, 0x59, 0x43, 0x25, 0xd3, + 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0xae, 0xd1, + 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xa3, 0xb8, 0xd5, 0x1a, 0xc6, 0x58, 0xc4, 0xc4, 0xe2, 0xee, + 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x3a, 0xdf, + 0x30, 0xd9, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 000000000..790f6c071 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/label.proto + +/* +Package label is a generated protocol buffer package. + +It is generated from these files: + google/api/label.proto + +It has these top-level messages: + LabelDescriptor +*/ +package label + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +var LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", +} +var LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, +} + +func (x LabelDescriptor_ValueType) String() string { + return proto.EnumName(LabelDescriptor_ValueType_name, int32(x)) +} +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// A description of a label. +type LabelDescriptor struct { + // The label key. + Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} } +func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) } +func (*LabelDescriptor) ProtoMessage() {} +func (*LabelDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *LabelDescriptor) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return LabelDescriptor_STRING +} + +func (m *LabelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor") + proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value) +} + +func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7, + 0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c, + 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48, + 0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24, + 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05, + 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08, + 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1, + 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3, + 0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9, + 0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 000000000..df451ac5c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/metric.proto + +/* +Package metric is a generated protocol buffer package. + +It is generated from these files: + google/api/metric.proto + +It has these top-level messages: + MetricDescriptor + Metric +*/ +package metric + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The kind of measurement. It describes how the data is reported. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +var MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", +} +var MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, +} + +func (x MetricDescriptor_MetricKind) String() string { + return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x)) +} +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +var MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", +} +var MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, +} + +func (x MetricDescriptor_ValueType) String() string { + return proto.EnumName(MetricDescriptor_ValueType_name, int32(x)) +} +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + // The resource name of the metric descriptor. Depending on the + // implementation, the name typically includes: (1) the parent resource name + // that defines the scope of the metric type or of its data; and (2) the + // metric's URL-encoded type, which also appears in the `type` field of this + // descriptor. For example, following is the resource name of a custom + // metric within the GCP project `my-project-id`: + // + // "projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount" + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined custom metric types have the DNS name + // `custom.googleapis.com`. Metric types should use a natural hierarchical + // grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*google_api.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The unit in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The + // supported units are a subset of [The Unified Code for Units of + // Measure](http://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10**3) + // * `M` mega (10**6) + // * `G` giga (10**9) + // * `T` tera (10**12) + // * `P` peta (10**15) + // * `E` exa (10**18) + // * `Z` zetta (10**21) + // * `Y` yotta (10**24) + // * `m` milli (10**-3) + // * `u` micro (10**-6) + // * `n` nano (10**-9) + // * `p` pico (10**-12) + // * `f` femto (10**-15) + // * `a` atto (10**-18) + // * `z` zepto (10**-21) + // * `y` yocto (10**-24) + // * `Ki` kibi (2**10) + // * `Mi` mebi (2**20) + // * `Gi` gibi (2**30) + // * `Ti` tebi (2**40) + // + // **Grammar** + // + // The grammar includes the dimensionless unit `1`, such as `1/s`. + // + // The grammar also includes these connectors: + // + // * `/` division (as an infix operator, e.g. `1/s`). + // * `.` multiplication (as an infix operator, e.g. `GBy.d`) + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = [ PREFIX ] UNIT [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // * `Annotation` is just a comment if it follows a `UNIT` and is + // equivalent to `1` if it is used alone. For examples, + // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`. + // * `NAME` is a sequence of non-blank printable ASCII characters not + // containing '{' or '}'. + Unit string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MetricDescriptor) GetLabels() []*google_api.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. + // For example, `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Metric) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Metric) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor") + proto.RegisterType((*Metric)(nil), "google.api.Metric") + proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value) + proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value) +} + +func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4d, 0x6f, 0xda, 0x40, + 0x10, 0xad, 0x3f, 0x70, 0xc3, 0x10, 0xa1, 0xd5, 0xaa, 0x4a, 0x2c, 0x22, 0x55, 0x94, 0x43, 0xcb, + 0x09, 0xa4, 0xa4, 0x4a, 0xbf, 0x4e, 0x80, 0xb7, 0xd4, 0x8a, 0xb1, 0x91, 0x63, 0x23, 0xa5, 0x17, + 0xcb, 0x81, 0x95, 0x65, 0xc5, 0xd8, 0xae, 0x71, 0x22, 0xf9, 0x57, 0xf4, 0x17, 0xf4, 0xd2, 0x5f, + 0x5a, 0xed, 0xae, 0x03, 0x16, 0x95, 0x72, 0xe2, 0xed, 0x9b, 0x37, 0x6f, 0x67, 0x96, 0x67, 0x38, + 0x8f, 0xb2, 0x2c, 0x4a, 0xe8, 0x38, 0xcc, 0xe3, 0xf1, 0x96, 0x96, 0x45, 0xbc, 0x1e, 0xe5, 0x45, + 0x56, 0x66, 0x18, 0x44, 0x61, 0x14, 0xe6, 0x71, 0xef, 0xac, 0x21, 0x4a, 0xc2, 0x7b, 0x9a, 0x08, + 0xcd, 0xe0, 0x8f, 0x0a, 0x68, 0xc1, 0x9b, 0x0c, 0xba, 0x5b, 0x17, 0x71, 0x5e, 0x66, 0x05, 0xc6, + 0xa0, 0xa6, 0xe1, 0x96, 0xea, 0x52, 0x5f, 0x1a, 0xb6, 0x5d, 0x8e, 0x19, 0x57, 0x56, 0x39, 0xd5, + 0x4f, 0x04, 0xc7, 0x30, 0xbe, 0x02, 0x8d, 0x7b, 0xed, 0x74, 0xb9, 0xaf, 0x0c, 0x3b, 0x97, 0x17, + 0xa3, 0xc3, 0x8d, 0x23, 0x8b, 0x55, 0x0e, 0xa6, 0x6e, 0x2d, 0xc5, 0x3f, 0xa0, 0x23, 0xa6, 0x0c, + 0x1e, 0xe2, 0x74, 0xa3, 0x2b, 0x7d, 0x69, 0xd8, 0xbd, 0xfc, 0xd0, 0xec, 0x3c, 0x9e, 0xa7, 0x26, + 0x6e, 0xe2, 0x74, 0xe3, 0xc2, 0x76, 0x8f, 0x31, 0x01, 0x78, 0x0a, 0x93, 0x47, 0x1a, 0xf0, 0xc1, + 0x54, 0x6e, 0xf4, 0xfe, 0x45, 0xa3, 0x15, 0x93, 0x7b, 0x55, 0x4e, 0xdd, 0xf6, 0xd3, 0x33, 0x64, + 0x9b, 0x3d, 0xa6, 0x71, 0xa9, 0xb7, 0xc4, 0x66, 0x0c, 0xe3, 0x3e, 0x74, 0x36, 0x75, 0x5b, 0x9c, + 0xa5, 0xba, 0xc6, 0x4b, 0x4d, 0x0a, 0xbf, 0x83, 0xd3, 0x4d, 0xbc, 0xcb, 0x93, 0xb0, 0x0a, 0xf8, + 0x5b, 0xbd, 0xae, 0x25, 0x82, 0xb3, 0xc3, 0x2d, 0x1d, 0x38, 0x00, 0x87, 0xc9, 0xf1, 0x05, 0x9c, + 0x2f, 0x88, 0xe7, 0x9a, 0xb3, 0xe0, 0xc6, 0xb4, 0x8d, 0xc0, 0xb7, 0x6f, 0x97, 0x64, 0x66, 0x7e, + 0x37, 0x89, 0x81, 0x5e, 0xe1, 0x36, 0xb4, 0xe6, 0x13, 0x7f, 0x4e, 0x90, 0xc4, 0xa0, 0x41, 0x2c, + 0x6f, 0x82, 0x64, 0xdc, 0x05, 0x98, 0xf9, 0x0b, 0xdf, 0x9a, 0x78, 0xe6, 0x8a, 0x20, 0x65, 0xf0, + 0x0b, 0xda, 0xfb, 0x0d, 0x70, 0x0f, 0xce, 0x56, 0x13, 0xcb, 0x27, 0x81, 0x77, 0xb7, 0x24, 0x47, + 0x76, 0x27, 0xa0, 0x4e, 0x1d, 0xc7, 0x12, 0x6e, 0xa6, 0xed, 0x5d, 0x7f, 0x44, 0x32, 0x06, 0xd0, + 0x0c, 0xc7, 0x9f, 0x5a, 0x04, 0x29, 0x0c, 0xdf, 0x7a, 0xae, 0x69, 0xcf, 0x91, 0x8a, 0x11, 0x9c, + 0x1a, 0x26, 0x3b, 0x4d, 0x7d, 0xcf, 0x74, 0x6c, 0xd4, 0x62, 0x4d, 0x0b, 0xc7, 0x26, 0x77, 0x48, + 0x1b, 0xfc, 0x96, 0x40, 0x13, 0x4b, 0xec, 0x13, 0xa0, 0x34, 0x12, 0x70, 0x7d, 0x94, 0x80, 0xb7, + 0xff, 0x3f, 0xbf, 0x08, 0xc2, 0x8e, 0xa4, 0x65, 0x51, 0x3d, 0x87, 0xa0, 0xf7, 0x05, 0x3a, 0x0d, + 0x1a, 0x23, 0x50, 0x1e, 0x68, 0x55, 0xe7, 0x8d, 0x41, 0xfc, 0x06, 0x5a, 0xfc, 0x1f, 0xd2, 0x65, + 0xce, 0x89, 0xc3, 0x57, 0xf9, 0xb3, 0x34, 0x0d, 0xa0, 0xbb, 0xce, 0xb6, 0x8d, 0x7b, 0xa6, 0x1d, + 0x71, 0xd1, 0x92, 0x05, 0x7a, 0x29, 0xfd, 0xfc, 0x54, 0x97, 0xa2, 0x2c, 0x09, 0xd3, 0x68, 0x94, + 0x15, 0xd1, 0x38, 0xa2, 0x29, 0x8f, 0xfb, 0x58, 0x94, 0xc2, 0x3c, 0xde, 0x35, 0x3e, 0x97, 0x6f, + 0xe2, 0xe7, 0xaf, 0xac, 0xce, 0x27, 0x4b, 0xf3, 0x5e, 0xe3, 0xd2, 0xab, 0x7f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x18, 0x04, 0x05, 0x82, 0x58, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 000000000..3ce7eb74d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,180 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/monitored_resource.proto + +/* +Package monitoredres is a generated protocol buffer package. + +It is generated from these files: + google/api/monitored_resource.proto + +It has these top-level messages: + MonitoredResourceDescriptor + MonitoredResource +*/ +package monitoredres + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a +// type name and a set of labels. For example, the monitored resource +// descriptor for Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // The maximum length of this value is 256 characters. + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*google_api.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"` +} + +func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} } +func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceDescriptor) ProtoMessage() {} +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *MonitoredResourceDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's +// schema. Information in the `labels` field identifies the actual resource and +// its attributes according to the schema. For example, a particular Compute +// Engine VM instance could be represented by the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + // Required. The monitored resource type. This field must match + // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For + // example, the type of a Cloud SQL database is `"cloudsql_database"`. + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Cloud SQL databases use the labels + // `"database_id"` and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *MonitoredResource) Reset() { *m = MonitoredResource{} } +func (m *MonitoredResource) String() string { return proto.CompactTextString(m) } +func (*MonitoredResource) ProtoMessage() {} +func (*MonitoredResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *MonitoredResource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor") + proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource") +} + +func init() { proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4b, 0x4b, 0x3b, 0x31, + 0x10, 0x27, 0xdb, 0x07, 0xfc, 0x67, 0xff, 0x88, 0x06, 0x29, 0x4b, 0x7b, 0xa9, 0xf5, 0x52, 0x2f, + 0xbb, 0x60, 0x2f, 0x3e, 0x4e, 0xad, 0x8a, 0x08, 0x2a, 0xa5, 0x47, 0x2f, 0x25, 0x6d, 0xc3, 0x12, + 0xdc, 0x66, 0x42, 0xb2, 0x15, 0xf6, 0xeb, 0x08, 0x7e, 0x0e, 0xbf, 0x96, 0x47, 0xc9, 0xa3, 0x76, + 0xa5, 0xde, 0x26, 0xbf, 0xf9, 0x3d, 0x66, 0x32, 0x70, 0x9a, 0x23, 0xe6, 0x05, 0xcf, 0x98, 0x12, + 0xd9, 0x1a, 0xa5, 0x28, 0x51, 0xf3, 0xd5, 0x5c, 0x73, 0x83, 0x1b, 0xbd, 0xe4, 0xa9, 0xd2, 0x58, + 0x22, 0x05, 0x4f, 0x4a, 0x99, 0x12, 0xdd, 0x4e, 0x4d, 0x50, 0xb0, 0x05, 0x2f, 0x3c, 0x67, 0xf0, + 0x49, 0xa0, 0xf7, 0xb4, 0x35, 0x98, 0x05, 0xfd, 0x2d, 0x37, 0x4b, 0x2d, 0x54, 0x89, 0x9a, 0x52, + 0x68, 0x4a, 0xb6, 0xe6, 0x49, 0xab, 0x4f, 0x86, 0xff, 0x66, 0xae, 0xb6, 0x58, 0x59, 0x29, 0x9e, + 0x10, 0x8f, 0xd9, 0x9a, 0x9e, 0xc0, 0xff, 0x95, 0x30, 0xaa, 0x60, 0xd5, 0xdc, 0xf1, 0x23, 0xd7, + 0x8b, 0x03, 0xf6, 0x6c, 0x65, 0x7d, 0x88, 0x57, 0xc1, 0x58, 0xa0, 0x4c, 0x1a, 0x81, 0xb1, 0x83, + 0xe8, 0x08, 0xda, 0x6e, 0x36, 0x93, 0x34, 0xfb, 0x8d, 0x61, 0x7c, 0xde, 0x4b, 0x77, 0x1b, 0xa4, + 0x8f, 0xb6, 0xb3, 0x9b, 0x6c, 0x16, 0xa8, 0x83, 0x0f, 0x02, 0x47, 0x7b, 0x1b, 0xfc, 0x39, 0xe3, + 0xf8, 0xc7, 0x3e, 0x72, 0xf6, 0x67, 0x75, 0xfb, 0x3d, 0x0b, 0x1f, 0x68, 0xee, 0x64, 0xa9, 0xab, + 0x6d, 0x58, 0xf7, 0x12, 0xe2, 0x1a, 0x4c, 0x0f, 0xa1, 0xf1, 0xca, 0xab, 0x10, 0x62, 0x4b, 0x7a, + 0x0c, 0xad, 0x37, 0x56, 0x6c, 0xb6, 0x1f, 0xe0, 0x1f, 0x57, 0xd1, 0x05, 0x99, 0x54, 0x70, 0xb0, + 0xc4, 0x75, 0x2d, 0x72, 0xd2, 0xd9, 0xcb, 0x9c, 0xda, 0x9b, 0x4c, 0xc9, 0xcb, 0x4d, 0x60, 0xe5, + 0x58, 0x30, 0x99, 0xa7, 0xa8, 0xf3, 0x2c, 0xe7, 0xd2, 0x5d, 0x2c, 0xf3, 0x2d, 0xa6, 0x84, 0xf9, + 0x7d, 0x7d, 0xcd, 0xcd, 0x75, 0xfd, 0xf1, 0x45, 0xc8, 0x7b, 0xd4, 0xbc, 0x1f, 0x4f, 0x1f, 0x16, + 0x6d, 0xa7, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xfb, 0xfb, 0x11, 0x36, 0x02, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go new file mode 100644 index 000000000..ac8de7ddb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/auth.pb.go @@ -0,0 +1,393 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/auth.proto + +/* +Package serviceconfig is a generated protocol buffer package. + +It is generated from these files: + google/api/auth.proto + google/api/backend.proto + google/api/billing.proto + google/api/consumer.proto + google/api/context.proto + google/api/control.proto + google/api/documentation.proto + google/api/endpoint.proto + google/api/log.proto + google/api/logging.proto + google/api/monitoring.proto + google/api/quota.proto + google/api/service.proto + google/api/source_info.proto + google/api/system_parameter.proto + google/api/usage.proto + +It has these top-level messages: + Authentication + AuthenticationRule + AuthProvider + OAuthRequirements + AuthRequirement + Backend + BackendRule + Billing + BillingStatusRule + ProjectProperties + Property + Context + ContextRule + Control + Documentation + DocumentationRule + Page + Endpoint + LogDescriptor + Logging + Monitoring + Quota + MetricRule + QuotaLimit + Service + SourceInfo + SystemParameters + SystemParameterRule + SystemParameter + Usage + UsageRule +*/ +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Authentication` defines the authentication configuration for an API. +// +// Example for an API targeted for external use: +// +// name: calendar.googleapis.com +// authentication: +// providers: +// - id: google_calendar_auth +// jwks_uri: https://www.googleapis.com/oauth2/v1/certs +// issuer: https://securetoken.google.com +// rules: +// - selector: "*" +// requirements: +// provider_id: google_calendar_auth +type Authentication struct { + // A list of authentication rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` + // Defines a set of authentication providers that a service supports. + Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers" json:"providers,omitempty"` +} + +func (m *Authentication) Reset() { *m = Authentication{} } +func (m *Authentication) String() string { return proto.CompactTextString(m) } +func (*Authentication) ProtoMessage() {} +func (*Authentication) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Authentication) GetRules() []*AuthenticationRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Authentication) GetProviders() []*AuthProvider { + if m != nil { + return m.Providers + } + return nil +} + +// Authentication rules for the service. +// +// By default, if a method has any authentication requirements, every request +// must include a valid credential matching one of the requirements. +// It's an error to include more than one kind of credential in a single +// request. +// +// If a method doesn't have any auth requirements, request credentials will be +// ignored. +type AuthenticationRule struct { + // Selects the methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // The requirements for OAuth credentials. + Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth" json:"oauth,omitempty"` + // Whether to allow requests without a credential. The credential can be + // an OAuth token, Google cookies (first-party auth) or EndUserCreds. + // + // For requests without credentials, if the service control environment is + // specified, each incoming request **must** be associated with a service + // consumer. This can be done by passing an API key that belongs to a consumer + // project. + AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential" json:"allow_without_credential,omitempty"` + // Requirements for additional authentication providers. + Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements" json:"requirements,omitempty"` +} + +func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} } +func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) } +func (*AuthenticationRule) ProtoMessage() {} +func (*AuthenticationRule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *AuthenticationRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *AuthenticationRule) GetOauth() *OAuthRequirements { + if m != nil { + return m.Oauth + } + return nil +} + +func (m *AuthenticationRule) GetAllowWithoutCredential() bool { + if m != nil { + return m.AllowWithoutCredential + } + return false +} + +func (m *AuthenticationRule) GetRequirements() []*AuthRequirement { + if m != nil { + return m.Requirements + } + return nil +} + +// Configuration for an anthentication provider, including support for +// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). +type AuthProvider struct { + // The unique identifier of the auth provider. It will be referred to by + // `AuthRequirement.provider_id`. + // + // Example: "bookstore_auth". + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Identifies the principal that issued the JWT. See + // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 + // Usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + Issuer string `protobuf:"bytes,2,opt,name=issuer" json:"issuer,omitempty"` + // URL of the provider's public key set to validate signature of the JWT. See + // [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). + // Optional if the key set document: + // - can be retrieved from + // [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html + // of the issuer. + // - can be inferred from the email domain of the issuer (e.g. a Google service account). + // + // Example: https://www.googleapis.com/oauth2/v1/certs + JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri" json:"jwks_uri,omitempty"` + // The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). + // that are allowed to access. A JWT containing any of these audiences will + // be accepted. When this setting is absent, only JWTs with audience + // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]" + // will be accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.LibraryService". + // + // Example: + // + // audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com + Audiences string `protobuf:"bytes,4,opt,name=audiences" json:"audiences,omitempty"` + // Redirect URL if JWT token is required but no present or is expired. + // Implement authorizationUrl of securityDefinitions in OpenAPI spec. + AuthorizationUrl string `protobuf:"bytes,5,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` +} + +func (m *AuthProvider) Reset() { *m = AuthProvider{} } +func (m *AuthProvider) String() string { return proto.CompactTextString(m) } +func (*AuthProvider) ProtoMessage() {} +func (*AuthProvider) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *AuthProvider) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *AuthProvider) GetIssuer() string { + if m != nil { + return m.Issuer + } + return "" +} + +func (m *AuthProvider) GetJwksUri() string { + if m != nil { + return m.JwksUri + } + return "" +} + +func (m *AuthProvider) GetAudiences() string { + if m != nil { + return m.Audiences + } + return "" +} + +func (m *AuthProvider) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +// OAuth scopes are a way to define data and permissions on data. For example, +// there are scopes defined for "Read-only access to Google Calendar" and +// "Access to Cloud Platform". Users can consent to a scope for an application, +// giving it permission to access that data on their behalf. +// +// OAuth scope specifications should be fairly coarse grained; a user will need +// to see and understand the text description of what your scope means. +// +// In most cases: use one or at most two OAuth scopes for an entire family of +// products. If your product has multiple APIs, you should probably be sharing +// the OAuth scope across all of those APIs. +// +// When you need finer grained OAuth consent screens: talk with your product +// management about how developers will use them in practice. +// +// Please note that even though each of the canonical scopes is enough for a +// request to be accepted and passed to the backend, a request can still fail +// due to the backend requiring additional scopes or permissions. +type OAuthRequirements struct { + // The list of publicly documented OAuth scopes that are allowed access. An + // OAuth token containing any of these scopes will be accepted. + // + // Example: + // + // canonical_scopes: https://www.googleapis.com/auth/calendar, + // https://www.googleapis.com/auth/calendar.read + CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes" json:"canonical_scopes,omitempty"` +} + +func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} } +func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) } +func (*OAuthRequirements) ProtoMessage() {} +func (*OAuthRequirements) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *OAuthRequirements) GetCanonicalScopes() string { + if m != nil { + return m.CanonicalScopes + } + return "" +} + +// User-defined authentication requirements, including support for +// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). +type AuthRequirement struct { + // [id][google.api.AuthProvider.id] from authentication provider. + // + // Example: + // + // provider_id: bookstore_auth + ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId" json:"provider_id,omitempty"` + // NOTE: This will be deprecated soon, once AuthProvider.audiences is + // implemented and accepted in all the runtime components. + // + // The list of JWT + // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). + // that are allowed to access. A JWT containing any of these audiences will + // be accepted. When this setting is absent, only JWTs with audience + // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]" + // will be accepted. For example, if no audiences are in the setting, + // LibraryService API will only accept JWTs with the following audience + // "https://library-example.googleapis.com/google.example.library.v1.LibraryService". + // + // Example: + // + // audiences: bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com + Audiences string `protobuf:"bytes,2,opt,name=audiences" json:"audiences,omitempty"` +} + +func (m *AuthRequirement) Reset() { *m = AuthRequirement{} } +func (m *AuthRequirement) String() string { return proto.CompactTextString(m) } +func (*AuthRequirement) ProtoMessage() {} +func (*AuthRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *AuthRequirement) GetProviderId() string { + if m != nil { + return m.ProviderId + } + return "" +} + +func (m *AuthRequirement) GetAudiences() string { + if m != nil { + return m.Audiences + } + return "" +} + +func init() { + proto.RegisterType((*Authentication)(nil), "google.api.Authentication") + proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule") + proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider") + proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements") + proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement") +} + +func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x5f, 0x6b, 0x13, 0x4f, + 0x14, 0x65, 0x93, 0xa6, 0xcd, 0xde, 0x94, 0xb4, 0x1d, 0xf8, 0x95, 0xfd, 0xd5, 0xaa, 0x21, 0x4f, + 0x11, 0x61, 0x03, 0xad, 0x88, 0x20, 0x28, 0xad, 0x88, 0xf4, 0xc9, 0x30, 0x52, 0x04, 0x5f, 0x96, + 0x71, 0x76, 0xdc, 0x8c, 0x9d, 0xce, 0x5d, 0xe7, 0x4f, 0x03, 0x3e, 0xf8, 0x49, 0x7c, 0xf2, 0x93, + 0xf9, 0x51, 0x64, 0x67, 0xb7, 0xc9, 0x6e, 0xfa, 0x78, 0xef, 0x39, 0xe7, 0xde, 0x7b, 0xce, 0x0c, + 0xfc, 0x57, 0x20, 0x16, 0x4a, 0xcc, 0x59, 0x29, 0xe7, 0xcc, 0xbb, 0x65, 0x5a, 0x1a, 0x74, 0x48, + 0xa0, 0x6e, 0xa7, 0xac, 0x94, 0x27, 0xa7, 0x6d, 0x8a, 0xd6, 0xe8, 0x98, 0x93, 0xa8, 0x6d, 0xcd, + 0x9c, 0xfe, 0x82, 0xf1, 0x85, 0x77, 0x4b, 0xa1, 0x9d, 0xe4, 0x01, 0x20, 0x2f, 0x60, 0x60, 0xbc, + 0x12, 0x36, 0xe9, 0x4f, 0xfa, 0xb3, 0xd1, 0xd9, 0x93, 0x74, 0x33, 0x2b, 0xed, 0x52, 0xa9, 0x57, + 0x82, 0xd6, 0x64, 0xf2, 0x12, 0xe2, 0xd2, 0xe0, 0x9d, 0xcc, 0x85, 0xb1, 0xc9, 0x4e, 0x50, 0x26, + 0xdb, 0xca, 0x45, 0x43, 0xa0, 0x1b, 0xea, 0xf4, 0x6f, 0x04, 0xe4, 0xe1, 0x54, 0x72, 0x02, 0x43, + 0x2b, 0x94, 0xe0, 0x0e, 0x4d, 0x12, 0x4d, 0xa2, 0x59, 0x4c, 0xd7, 0x35, 0x39, 0x87, 0x01, 0x56, + 0x5e, 0x93, 0xde, 0x24, 0x9a, 0x8d, 0xce, 0x1e, 0xb7, 0xd7, 0x7c, 0xac, 0x66, 0x51, 0xf1, 0xc3, + 0x4b, 0x23, 0x6e, 0x85, 0x76, 0x96, 0xd6, 0x5c, 0xf2, 0x0a, 0x12, 0xa6, 0x14, 0xae, 0xb2, 0x95, + 0x74, 0x4b, 0xf4, 0x2e, 0xe3, 0x46, 0xe4, 0xd5, 0x52, 0xa6, 0x92, 0xc1, 0x24, 0x9a, 0x0d, 0xe9, + 0x71, 0xc0, 0x3f, 0xd7, 0xf0, 0xbb, 0x35, 0x4a, 0xde, 0xc2, 0xbe, 0x69, 0x0d, 0x4c, 0xf6, 0x82, + 0xb9, 0x47, 0xdb, 0xe6, 0x5a, 0x4b, 0x69, 0x47, 0x30, 0xfd, 0x1d, 0xc1, 0x7e, 0xdb, 0x3e, 0x19, + 0x43, 0x4f, 0xe6, 0x8d, 0xad, 0x9e, 0xcc, 0xc9, 0x31, 0xec, 0x4a, 0x6b, 0xbd, 0x30, 0xc1, 0x51, + 0x4c, 0x9b, 0x8a, 0xfc, 0x0f, 0xc3, 0xef, 0xab, 0x1b, 0x9b, 0x79, 0x23, 0x93, 0x7e, 0x40, 0xf6, + 0xaa, 0xfa, 0xda, 0x48, 0x72, 0x0a, 0x31, 0xf3, 0xb9, 0x14, 0x9a, 0x8b, 0x2a, 0xee, 0x0a, 0xdb, + 0x34, 0xc8, 0x73, 0x38, 0xaa, 0x4c, 0xa3, 0x91, 0x3f, 0x43, 0xa4, 0x99, 0x37, 0xb5, 0xcb, 0x98, + 0x1e, 0x76, 0x80, 0x6b, 0xa3, 0xa6, 0x6f, 0xe0, 0xe8, 0x41, 0x6a, 0xe4, 0x19, 0x1c, 0x72, 0xa6, + 0x51, 0x4b, 0xce, 0x54, 0x66, 0x39, 0x96, 0xc2, 0x36, 0x07, 0x1f, 0xac, 0xfb, 0x9f, 0x42, 0x7b, + 0xba, 0x80, 0x83, 0x2d, 0x39, 0x79, 0x0a, 0xa3, 0xfb, 0x17, 0xce, 0xd6, 0x4e, 0xe1, 0xbe, 0x75, + 0x95, 0x77, 0xcf, 0xef, 0x6d, 0x9d, 0x7f, 0x79, 0x03, 0x63, 0x8e, 0xb7, 0xad, 0x80, 0x2f, 0xe3, + 0x26, 0x3f, 0x87, 0x8b, 0xe8, 0xcb, 0xfb, 0x06, 0x28, 0x50, 0x31, 0x5d, 0xa4, 0x68, 0x8a, 0x79, + 0x21, 0x74, 0xf8, 0xce, 0xf3, 0x1a, 0x62, 0xa5, 0xb4, 0xe1, 0xbf, 0x5b, 0x61, 0xee, 0x24, 0x17, + 0x1c, 0xf5, 0x37, 0x59, 0xbc, 0xee, 0x54, 0x7f, 0x7a, 0x3b, 0x1f, 0x2e, 0x16, 0x57, 0x5f, 0x77, + 0x83, 0xf0, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xa3, 0x9d, 0xc6, 0x4a, 0x03, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go new file mode 100644 index 000000000..5704d9b0a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/backend.pb.go @@ -0,0 +1,98 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/backend.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `Backend` defines the backend configuration for a service. +type Backend struct { + // A list of API backend rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*BackendRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Backend) Reset() { *m = Backend{} } +func (m *Backend) String() string { return proto.CompactTextString(m) } +func (*Backend) ProtoMessage() {} +func (*Backend) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Backend) GetRules() []*BackendRule { + if m != nil { + return m.Rules + } + return nil +} + +// A backend rule provides configuration for an individual API element. +type BackendRule struct { + // Selects the methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // The address of the API backend. + Address string `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"` + // The number of seconds to wait for a response from a request. The + // default depends on the deployment context. + Deadline float64 `protobuf:"fixed64,3,opt,name=deadline" json:"deadline,omitempty"` +} + +func (m *BackendRule) Reset() { *m = BackendRule{} } +func (m *BackendRule) String() string { return proto.CompactTextString(m) } +func (*BackendRule) ProtoMessage() {} +func (*BackendRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *BackendRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *BackendRule) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *BackendRule) GetDeadline() float64 { + if m != nil { + return m.Deadline + } + return 0 +} + +func init() { + proto.RegisterType((*Backend)(nil), "google.api.Backend") + proto.RegisterType((*BackendRule)(nil), "google.api.BackendRule") +} + +func init() { proto.RegisterFile("google/api/backend.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x31, 0x4f, 0x03, 0x21, + 0x18, 0x86, 0x43, 0xab, 0x56, 0xbf, 0x1a, 0x87, 0x5b, 0x24, 0x4e, 0x97, 0x4e, 0xb7, 0xc8, 0x25, + 0xba, 0x98, 0x38, 0x79, 0x89, 0x31, 0x6e, 0x17, 0x46, 0x17, 0x43, 0xe1, 0x93, 0x10, 0x91, 0x8f, + 0x40, 0xeb, 0x0f, 0xf2, 0x97, 0x9a, 0xc2, 0x59, 0xdb, 0xf1, 0xe1, 0x79, 0xdf, 0xc0, 0x0b, 0x70, + 0x4b, 0x64, 0x3d, 0xf6, 0x2a, 0xba, 0x7e, 0xad, 0xf4, 0x27, 0x06, 0x23, 0x62, 0xa2, 0x0d, 0x35, + 0x50, 0x8d, 0x50, 0xd1, 0xad, 0x1e, 0x60, 0x31, 0x54, 0xd9, 0xdc, 0xc2, 0x69, 0xda, 0x7a, 0xcc, + 0x9c, 0xb5, 0xf3, 0x6e, 0x79, 0x77, 0x2d, 0xfe, 0x63, 0x62, 0xca, 0xc8, 0xad, 0x47, 0x59, 0x53, + 0xab, 0x77, 0x58, 0x1e, 0x9c, 0x36, 0x37, 0x70, 0x9e, 0xd1, 0xa3, 0xde, 0x50, 0xe2, 0xac, 0x65, + 0xdd, 0x85, 0xdc, 0x73, 0xc3, 0x61, 0xa1, 0x8c, 0x49, 0x98, 0x33, 0x9f, 0x15, 0xf5, 0x87, 0xbb, + 0x96, 0x41, 0x65, 0xbc, 0x0b, 0xc8, 0xe7, 0x2d, 0xeb, 0x98, 0xdc, 0xf3, 0x10, 0xe0, 0x4a, 0xd3, + 0xd7, 0xc1, 0x2b, 0x86, 0xcb, 0xe9, 0xc2, 0x71, 0x37, 0x63, 0x64, 0x6f, 0xcf, 0x93, 0xb3, 0xe4, + 0x55, 0xb0, 0x82, 0x92, 0xed, 0x2d, 0x86, 0x32, 0xb2, 0xaf, 0x4a, 0x45, 0x97, 0xcb, 0x0f, 0x64, + 0x4c, 0xdf, 0x4e, 0xa3, 0xa6, 0xf0, 0xe1, 0xec, 0xe3, 0x11, 0xfd, 0xcc, 0x4e, 0x5e, 0x9e, 0xc6, + 0xd7, 0xf5, 0x59, 0x29, 0xde, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x93, 0x9e, 0x00, 0x39, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go new file mode 100644 index 000000000..180a1370d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/billing.pb.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/billing.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/metric" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Billing related configuration of the service. +// +// The following example shows how to configure metrics for billing: +// +// metrics: +// - name: library.googleapis.com/read_calls +// metric_kind: DELTA +// value_type: INT64 +// - name: library.googleapis.com/write_calls +// metric_kind: DELTA +// value_type: INT64 +// billing: +// metrics: +// - library.googleapis.com/read_calls +// - library.googleapis.com/write_calls +// +// The next example shows how to enable billing status check and customize the +// check behavior. It makes sure billing status check is included in the `Check` +// method of [Service Control API](https://cloud.google.com/service-control/). +// In the example, "google.storage.Get" method can be served when the billing +// status is either `current` or `delinquent`, while "google.storage.Write" +// method can only be served when the billing status is `current`: +// +// billing: +// rules: +// - selector: google.storage.Get +// allowed_statuses: +// - current +// - delinquent +// - selector: google.storage.Write +// allowed_statuses: current +// +// Mostly services should only allow `current` status when serving requests. +// In addition, services can choose to allow both `current` and `delinquent` +// statuses when serving read-only requests to resources. If there's no +// matching selector for operation, no billing status check will be performed. +// +type Billing struct { + // Names of the metrics to report to billing. Each name must + // be defined in [Service.metrics][google.api.Service.metrics] section. + Metrics []string `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"` + // A list of billing status rules for configuring billing status check. + Rules []*BillingStatusRule `protobuf:"bytes,5,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Billing) Reset() { *m = Billing{} } +func (m *Billing) String() string { return proto.CompactTextString(m) } +func (*Billing) ProtoMessage() {} +func (*Billing) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Billing) GetMetrics() []string { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *Billing) GetRules() []*BillingStatusRule { + if m != nil { + return m.Rules + } + return nil +} + +// Defines the billing status requirements for operations. +// +// When used with +// [Service Control API](https://cloud.google.com/service-control/), the +// following statuses are supported: +// +// - **current**: the associated billing account is up to date and capable of +// paying for resource usages. +// - **delinquent**: the associated billing account has a correctable problem, +// such as late payment. +// +// Mostly services should only allow `current` status when serving requests. +// In addition, services can choose to allow both `current` and `delinquent` +// statuses when serving read-only requests to resources. If the list of +// allowed_statuses is empty, it means no billing requirement. +// +type BillingStatusRule struct { + // Selects the operation names to which this rule applies. + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Allowed billing statuses. The billing status check passes if the actual + // billing status matches any of the provided values here. + AllowedStatuses []string `protobuf:"bytes,2,rep,name=allowed_statuses,json=allowedStatuses" json:"allowed_statuses,omitempty"` +} + +func (m *BillingStatusRule) Reset() { *m = BillingStatusRule{} } +func (m *BillingStatusRule) String() string { return proto.CompactTextString(m) } +func (*BillingStatusRule) ProtoMessage() {} +func (*BillingStatusRule) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *BillingStatusRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *BillingStatusRule) GetAllowedStatuses() []string { + if m != nil { + return m.AllowedStatuses + } + return nil +} + +func init() { + proto.RegisterType((*Billing)(nil), "google.api.Billing") + proto.RegisterType((*BillingStatusRule)(nil), "google.api.BillingStatusRule") +} + +func init() { proto.RegisterFile("google/api/billing.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0x4f, 0x4b, 0x03, 0x31, + 0x10, 0xc5, 0x59, 0x4b, 0xad, 0x8d, 0xe2, 0x9f, 0xbd, 0x18, 0x16, 0x85, 0xa5, 0xa7, 0xf5, 0x92, + 0x05, 0x7b, 0xf4, 0xb6, 0xe0, 0xbd, 0x6c, 0x2f, 0xd2, 0x8b, 0xa4, 0x71, 0x0c, 0x81, 0x34, 0xb3, + 0x64, 0xb2, 0xfa, 0xf5, 0xc5, 0x24, 0xea, 0x4a, 0x8f, 0x6f, 0x7e, 0xef, 0x65, 0xe6, 0x85, 0x71, + 0x8d, 0xa8, 0x2d, 0xb4, 0x72, 0x30, 0xed, 0xde, 0x58, 0x6b, 0x9c, 0x16, 0x83, 0xc7, 0x80, 0x25, + 0x4b, 0x44, 0xc8, 0xc1, 0x54, 0x77, 0x13, 0x97, 0x74, 0x0e, 0x83, 0x0c, 0x06, 0x1d, 0x25, 0x67, + 0x75, 0x3b, 0xa1, 0x07, 0x08, 0xde, 0xa8, 0x04, 0x56, 0x2f, 0x6c, 0xd1, 0xa5, 0x37, 0x4b, 0xce, + 0x16, 0x09, 0x11, 0x2f, 0xea, 0x59, 0xb3, 0xec, 0x7f, 0x64, 0xb9, 0x66, 0x73, 0x3f, 0x5a, 0x20, + 0x3e, 0xaf, 0x67, 0xcd, 0xf9, 0xe3, 0xbd, 0xf8, 0xdb, 0x2b, 0x72, 0x7a, 0x1b, 0x64, 0x18, 0xa9, + 0x1f, 0x2d, 0xf4, 0xc9, 0xbb, 0xda, 0xb1, 0x9b, 0x23, 0x56, 0x56, 0xec, 0x8c, 0xc0, 0x82, 0x0a, + 0xe8, 0x79, 0x51, 0x17, 0xcd, 0xb2, 0xff, 0xd5, 0xe5, 0x03, 0xbb, 0x96, 0xd6, 0xe2, 0x27, 0xbc, + 0xbd, 0x52, 0x4c, 0x00, 0xf1, 0x93, 0x78, 0xc8, 0x55, 0x9e, 0x6f, 0xf3, 0xb8, 0xd3, 0xec, 0x52, + 0xe1, 0x61, 0x72, 0x46, 0x77, 0x91, 0x77, 0x6d, 0xbe, 0x5b, 0x6d, 0x8a, 0xdd, 0x73, 0x66, 0x1a, + 0xad, 0x74, 0x5a, 0xa0, 0xd7, 0xad, 0x06, 0x17, 0x3b, 0xb7, 0x09, 0xc9, 0xc1, 0x50, 0xfc, 0x0f, + 0x02, 0xff, 0x61, 0x14, 0x28, 0x74, 0xef, 0x46, 0x3f, 0xfd, 0x53, 0xfb, 0xd3, 0x98, 0x58, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x90, 0x2d, 0x32, 0x84, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go new file mode 100644 index 000000000..3e92ac506 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/consumer.pb.go @@ -0,0 +1,158 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/consumer.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Supported data type of the property values +type Property_PropertyType int32 + +const ( + // The type is unspecified, and will result in an error. + Property_UNSPECIFIED Property_PropertyType = 0 + // The type is `int64`. + Property_INT64 Property_PropertyType = 1 + // The type is `bool`. + Property_BOOL Property_PropertyType = 2 + // The type is `string`. + Property_STRING Property_PropertyType = 3 + // The type is 'double'. + Property_DOUBLE Property_PropertyType = 4 +) + +var Property_PropertyType_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "INT64", + 2: "BOOL", + 3: "STRING", + 4: "DOUBLE", +} +var Property_PropertyType_value = map[string]int32{ + "UNSPECIFIED": 0, + "INT64": 1, + "BOOL": 2, + "STRING": 3, + "DOUBLE": 4, +} + +func (x Property_PropertyType) String() string { + return proto.EnumName(Property_PropertyType_name, int32(x)) +} +func (Property_PropertyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{1, 0} } + +// A descriptor for defining project properties for a service. One service may +// have many consumer projects, and the service may want to behave differently +// depending on some properties on the project. For example, a project may be +// associated with a school, or a business, or a government agency, a business +// type property on the project may affect how a service responds to the client. +// This descriptor defines which properties are allowed to be set on a project. +// +// Example: +// +// project_properties: +// properties: +// - name: NO_WATERMARK +// type: BOOL +// description: Allows usage of the API without watermarks. +// - name: EXTENDED_TILE_CACHE_PERIOD +// type: INT64 +type ProjectProperties struct { + // List of per consumer project-specific properties. + Properties []*Property `protobuf:"bytes,1,rep,name=properties" json:"properties,omitempty"` +} + +func (m *ProjectProperties) Reset() { *m = ProjectProperties{} } +func (m *ProjectProperties) String() string { return proto.CompactTextString(m) } +func (*ProjectProperties) ProtoMessage() {} +func (*ProjectProperties) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *ProjectProperties) GetProperties() []*Property { + if m != nil { + return m.Properties + } + return nil +} + +// Defines project properties. +// +// API services can define properties that can be assigned to consumer projects +// so that backends can perform response customization without having to make +// additional calls or maintain additional storage. For example, Maps API +// defines properties that controls map tile cache period, or whether to embed a +// watermark in a result. +// +// These values can be set via API producer console. Only API providers can +// define and set these properties. +type Property struct { + // The name of the property (a.k.a key). + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The type of this property. + Type Property_PropertyType `protobuf:"varint,2,opt,name=type,enum=google.api.Property_PropertyType" json:"type,omitempty"` + // The description of the property + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} +func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *Property) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Property) GetType() Property_PropertyType { + if m != nil { + return m.Type + } + return Property_UNSPECIFIED +} + +func (m *Property) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*ProjectProperties)(nil), "google.api.ProjectProperties") + proto.RegisterType((*Property)(nil), "google.api.Property") + proto.RegisterEnum("google.api.Property_PropertyType", Property_PropertyType_name, Property_PropertyType_value) +} + +func init() { proto.RegisterFile("google/api/consumer.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 299 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4f, 0xf2, 0x40, + 0x10, 0xc6, 0xdf, 0x85, 0xbe, 0x04, 0x06, 0xc5, 0xba, 0xf1, 0x50, 0x6f, 0x95, 0x13, 0xa7, 0x36, + 0x41, 0xf4, 0xe2, 0xad, 0x50, 0x4d, 0x13, 0x02, 0x4d, 0x81, 0x8b, 0xb7, 0x5a, 0xc7, 0x75, 0x0d, + 0xec, 0x6c, 0xb6, 0xd5, 0x84, 0x0f, 0xe8, 0xf7, 0x32, 0x2c, 0x88, 0x35, 0xf1, 0xf6, 0xcc, 0x3e, + 0x7f, 0xb2, 0xf9, 0xc1, 0xa5, 0x20, 0x12, 0x6b, 0x0c, 0x73, 0x2d, 0xc3, 0x82, 0x54, 0xf9, 0xbe, + 0x41, 0x13, 0x68, 0x43, 0x15, 0x71, 0xd8, 0x5b, 0x41, 0xae, 0x65, 0x3f, 0x81, 0xf3, 0xd4, 0xd0, + 0x1b, 0x16, 0x55, 0x6a, 0x48, 0xa3, 0xa9, 0x24, 0x96, 0x7c, 0x04, 0xa0, 0x8f, 0x97, 0xc7, 0xfc, + 0xe6, 0xa0, 0x3b, 0xbc, 0x08, 0x7e, 0x5a, 0xc1, 0x21, 0xbb, 0xcd, 0x6a, 0xb9, 0xfe, 0x27, 0x83, + 0xf6, 0xb7, 0xc1, 0x39, 0x38, 0x2a, 0xdf, 0xa0, 0xc7, 0x7c, 0x36, 0xe8, 0x64, 0x56, 0xf3, 0x1b, + 0x70, 0xaa, 0xad, 0x46, 0xaf, 0xe1, 0xb3, 0x41, 0x6f, 0x78, 0xf5, 0xd7, 0xe0, 0x51, 0x2c, 0xb7, + 0x1a, 0x33, 0x1b, 0xe7, 0x3e, 0x74, 0x9f, 0xb1, 0x2c, 0x8c, 0xd4, 0x95, 0x24, 0xe5, 0x35, 0xed, + 0x62, 0xfd, 0xa9, 0x3f, 0x85, 0x93, 0x7a, 0x8f, 0x9f, 0x41, 0x77, 0x35, 0x5b, 0xa4, 0xf1, 0x38, + 0xb9, 0x4f, 0xe2, 0x89, 0xfb, 0x8f, 0x77, 0xe0, 0x7f, 0x32, 0x5b, 0xde, 0x8e, 0x5c, 0xc6, 0xdb, + 0xe0, 0x44, 0xf3, 0xf9, 0xd4, 0x6d, 0x70, 0x80, 0xd6, 0x62, 0x99, 0x25, 0xb3, 0x07, 0xb7, 0xb9, + 0xd3, 0x93, 0xf9, 0x2a, 0x9a, 0xc6, 0xae, 0x13, 0xbd, 0x42, 0xaf, 0xa0, 0x4d, 0xed, 0x77, 0xd1, + 0xe9, 0xf8, 0x00, 0x30, 0xdd, 0xf1, 0x4b, 0xd9, 0x63, 0x7c, 0x30, 0x05, 0xad, 0x73, 0x25, 0x02, + 0x32, 0x22, 0x14, 0xa8, 0x2c, 0xdd, 0x70, 0x6f, 0xe5, 0x5a, 0x96, 0x96, 0x7d, 0x89, 0xe6, 0x43, + 0x16, 0x58, 0x90, 0x7a, 0x91, 0xe2, 0xee, 0xd7, 0xf5, 0xd4, 0xb2, 0x8d, 0xeb, 0xaf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xb7, 0xa4, 0x04, 0x2c, 0xac, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go new file mode 100644 index 000000000..f22859b08 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/context.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/context.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `Context` defines which contexts an API requests. +// +// Example: +// +// context: +// rules: +// - selector: "*" +// requested: +// - google.rpc.context.ProjectContext +// - google.rpc.context.OriginContext +// +// The above specifies that all methods in the API request +// `google.rpc.context.ProjectContext` and +// `google.rpc.context.OriginContext`. +// +// Available context types are defined in package +// `google.rpc.context`. +type Context struct { + // A list of RPC context rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*ContextRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *Context) Reset() { *m = Context{} } +func (m *Context) String() string { return proto.CompactTextString(m) } +func (*Context) ProtoMessage() {} +func (*Context) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *Context) GetRules() []*ContextRule { + if m != nil { + return m.Rules + } + return nil +} + +// A context rule provides information about the context for an individual API +// element. +type ContextRule struct { + // Selects the methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // A list of full type names of requested contexts. + Requested []string `protobuf:"bytes,2,rep,name=requested" json:"requested,omitempty"` + // A list of full type names of provided contexts. + Provided []string `protobuf:"bytes,3,rep,name=provided" json:"provided,omitempty"` +} + +func (m *ContextRule) Reset() { *m = ContextRule{} } +func (m *ContextRule) String() string { return proto.CompactTextString(m) } +func (*ContextRule) ProtoMessage() {} +func (*ContextRule) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *ContextRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *ContextRule) GetRequested() []string { + if m != nil { + return m.Requested + } + return nil +} + +func (m *ContextRule) GetProvided() []string { + if m != nil { + return m.Provided + } + return nil +} + +func init() { + proto.RegisterType((*Context)(nil), "google.api.Context") + proto.RegisterType((*ContextRule)(nil), "google.api.ContextRule") +} + +func init() { proto.RegisterFile("google/api/context.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 231 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4b, 0xc4, 0x30, + 0x14, 0xc4, 0xe9, 0xd6, 0x7f, 0x7d, 0x2b, 0x1e, 0x7a, 0x31, 0x88, 0x87, 0xb2, 0xa7, 0x5e, 0x4c, + 0x41, 0x2f, 0x82, 0x27, 0x57, 0x44, 0xbc, 0x95, 0x1e, 0xbd, 0xc5, 0xf4, 0x19, 0x02, 0x31, 0x2f, + 0x26, 0xe9, 0xe2, 0xe7, 0xf1, 0x93, 0xca, 0x26, 0x65, 0xff, 0x1c, 0x67, 0x7e, 0x33, 0x24, 0xf3, + 0x80, 0x29, 0x22, 0x65, 0xb0, 0x13, 0x4e, 0x77, 0x92, 0x6c, 0xc4, 0xdf, 0xc8, 0x9d, 0xa7, 0x48, + 0x35, 0x64, 0xc2, 0x85, 0xd3, 0xab, 0x47, 0x38, 0x7f, 0xc9, 0xb0, 0xbe, 0x83, 0x53, 0x3f, 0x19, + 0x0c, 0xac, 0x68, 0xca, 0x76, 0x79, 0x7f, 0xcd, 0xf7, 0x31, 0x3e, 0x67, 0x86, 0xc9, 0xe0, 0x90, + 0x53, 0x2b, 0x09, 0xcb, 0x03, 0xb7, 0xbe, 0x81, 0x8b, 0x80, 0x06, 0x65, 0x24, 0xcf, 0x8a, 0xa6, + 0x68, 0xab, 0x61, 0xa7, 0xeb, 0x5b, 0xa8, 0x3c, 0xfe, 0x4c, 0x18, 0x22, 0x8e, 0x6c, 0xd1, 0x94, + 0x6d, 0x35, 0xec, 0x8d, 0x6d, 0xd3, 0x79, 0xda, 0xe8, 0x11, 0x47, 0x56, 0x26, 0xb8, 0xd3, 0x6b, + 0x0b, 0x57, 0x92, 0xbe, 0x0f, 0x7e, 0xb2, 0xbe, 0x9c, 0x1f, 0xed, 0xb7, 0x53, 0xfa, 0xe2, 0xe3, + 0x75, 0x66, 0x8a, 0x8c, 0xb0, 0x8a, 0x93, 0x57, 0x9d, 0x42, 0x9b, 0x86, 0x76, 0x19, 0x09, 0xa7, + 0x43, 0xba, 0x42, 0x40, 0xbf, 0xd1, 0x12, 0x25, 0xd9, 0x2f, 0xad, 0x9e, 0x8e, 0xd4, 0xdf, 0xe2, + 0xe4, 0xed, 0xb9, 0x7f, 0xff, 0x3c, 0x4b, 0xc5, 0x87, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb5, + 0x18, 0x98, 0x7a, 0x3d, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go new file mode 100644 index 000000000..1f4d8678c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/control.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/control.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Selects and configures the service controller used by the service. The +// service controller handles features like abuse, quota, billing, logging, +// monitoring, etc. +type Control struct { + // The service control environment to use. If empty, no control plane + // feature (like quota and billing) will be enabled. + Environment string `protobuf:"bytes,1,opt,name=environment" json:"environment,omitempty"` +} + +func (m *Control) Reset() { *m = Control{} } +func (m *Control) String() string { return proto.CompactTextString(m) } +func (*Control) ProtoMessage() {} +func (*Control) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func (m *Control) GetEnvironment() string { + if m != nil { + return m.Environment + } + return "" +} + +func init() { + proto.RegisterType((*Control)(nil), "google.api.Control") +} + +func init() { proto.RegisterFile("google/api/control.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xd1, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xe8, 0x25, 0x16, 0x64, 0x2a, 0x69, 0x73, 0xb1, + 0x3b, 0x43, 0x24, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xf3, 0xca, 0x32, 0x8b, 0xf2, 0xf3, 0x72, 0x53, + 0xf3, 0x4a, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x90, 0x85, 0x9c, 0xf2, 0xb8, 0xf8, 0x92, + 0xf3, 0x73, 0xf5, 0x10, 0xda, 0x9d, 0x78, 0xa0, 0x9a, 0x03, 0x40, 0x06, 0x07, 0x30, 0x46, 0xb9, + 0x42, 0xe5, 0xd2, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, + 0xc0, 0xd6, 0xea, 0x43, 0xa4, 0x12, 0x0b, 0x32, 0x8b, 0xc1, 0x6e, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, + 0x4c, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xb7, 0x46, 0xe1, 0x2d, 0x62, 0x62, 0x71, 0x77, + 0x0c, 0xf0, 0x4c, 0x62, 0x03, 0x6b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x44, 0x6e, 0x78, + 0xbd, 0xcb, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go new file mode 100644 index 000000000..99a568cff --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/documentation.pb.go @@ -0,0 +1,267 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/documentation.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `Documentation` provides the information for describing a service. +// +// Example: +//
documentation:
+//   summary: >
+//     The Google Calendar API gives access
+//     to most calendar features.
+//   pages:
+//   - name: Overview
+//     content: (== include google/foo/overview.md ==)
+//   - name: Tutorial
+//     content: (== include google/foo/tutorial.md ==)
+//     subpages;
+//     - name: Java
+//       content: (== include google/foo/tutorial_java.md ==)
+//   rules:
+//   - selector: google.calendar.Calendar.Get
+//     description: >
+//       ...
+//   - selector: google.calendar.Calendar.Put
+//     description: >
+//       ...
+// 
+// Documentation is provided in markdown syntax. In addition to +// standard markdown features, definition lists, tables and fenced +// code blocks are supported. Section headers can be provided and are +// interpreted relative to the section nesting of the context where +// a documentation fragment is embedded. +// +// Documentation from the IDL is merged with documentation defined +// via the config at normalization time, where documentation provided +// by config rules overrides IDL provided. +// +// A number of constructs specific to the API platform are supported +// in documentation text. +// +// In order to reference a proto element, the following +// notation can be used: +//
[fully.qualified.proto.name][]
+// To override the display text used for the link, this can be used: +//
[display text][fully.qualified.proto.name]
+// Text can be excluded from doc using the following notation: +//
(-- internal comment --)
+// Comments can be made conditional using a visibility label. The below +// text will be only rendered if the `BETA` label is available: +//
(--BETA: comment for BETA users --)
+// A few directives are available in documentation. Note that +// directives must appear on a single line to be properly +// identified. The `include` directive includes a markdown file from +// an external source: +//
(== include path/to/file ==)
+// The `resource_for` directive marks a message to be the resource of +// a collection in REST view. If it is not specified, tools attempt +// to infer the resource from the operations in a collection: +//
(== resource_for v1.shelves.books ==)
+// The directive `suppress_warning` does not directly affect documentation +// and is documented together with service config validation. +type Documentation struct { + // A short summary of what the service does. Can only be provided by + // plain text. + Summary string `protobuf:"bytes,1,opt,name=summary" json:"summary,omitempty"` + // The top level pages for the documentation set. + Pages []*Page `protobuf:"bytes,5,rep,name=pages" json:"pages,omitempty"` + // A list of documentation rules that apply to individual API elements. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*DocumentationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` + // The URL to the root of documentation. + DocumentationRootUrl string `protobuf:"bytes,4,opt,name=documentation_root_url,json=documentationRootUrl" json:"documentation_root_url,omitempty"` + // Declares a single overview page. For example: + //
documentation:
+	//   summary: ...
+	//   overview: (== include overview.md ==)
+	// 
+ // This is a shortcut for the following declaration (using pages style): + //
documentation:
+	//   summary: ...
+	//   pages:
+	//   - name: Overview
+	//     content: (== include overview.md ==)
+	// 
+ // Note: you cannot specify both `overview` field and `pages` field. + Overview string `protobuf:"bytes,2,opt,name=overview" json:"overview,omitempty"` +} + +func (m *Documentation) Reset() { *m = Documentation{} } +func (m *Documentation) String() string { return proto.CompactTextString(m) } +func (*Documentation) ProtoMessage() {} +func (*Documentation) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *Documentation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Documentation) GetPages() []*Page { + if m != nil { + return m.Pages + } + return nil +} + +func (m *Documentation) GetRules() []*DocumentationRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Documentation) GetDocumentationRootUrl() string { + if m != nil { + return m.DocumentationRootUrl + } + return "" +} + +func (m *Documentation) GetOverview() string { + if m != nil { + return m.Overview + } + return "" +} + +// A documentation rule provides information about individual API elements. +type DocumentationRule struct { + // The selector is a comma-separated list of patterns. Each pattern is a + // qualified name of the element which may end in "*", indicating a wildcard. + // Wildcards are only allowed at the end and for a whole component of the + // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". To + // specify a default for all applicable elements, the whole pattern "*" + // is used. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Description of the selected API(s). + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Deprecation description of the selected element(s). It can be provided if an + // element is marked as `deprecated`. + DeprecationDescription string `protobuf:"bytes,3,opt,name=deprecation_description,json=deprecationDescription" json:"deprecation_description,omitempty"` +} + +func (m *DocumentationRule) Reset() { *m = DocumentationRule{} } +func (m *DocumentationRule) String() string { return proto.CompactTextString(m) } +func (*DocumentationRule) ProtoMessage() {} +func (*DocumentationRule) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +func (m *DocumentationRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *DocumentationRule) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *DocumentationRule) GetDeprecationDescription() string { + if m != nil { + return m.DeprecationDescription + } + return "" +} + +// Represents a documentation page. A page can contain subpages to represent +// nested documentation set structure. +type Page struct { + // The name of the page. It will be used as an identity of the page to + // generate URI of the page, text of the link to this page in navigation, + // etc. The full page name (start from the root page name to this page + // concatenated with `.`) can be used as reference to the page in your + // documentation. For example: + //
pages:
+	// - name: Tutorial
+	//   content: (== include tutorial.md ==)
+	//   subpages:
+	//   - name: Java
+	//     content: (== include tutorial_java.md ==)
+	// 
+ // You can reference `Java` page using Markdown reference link syntax: + // `[Java][Tutorial.Java]`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The Markdown content of the page. You can use (== include {path} ==) + // to include content from a Markdown file. + Content string `protobuf:"bytes,2,opt,name=content" json:"content,omitempty"` + // Subpages of this page. The order of subpages specified here will be + // honored in the generated docset. + Subpages []*Page `protobuf:"bytes,3,rep,name=subpages" json:"subpages,omitempty"` +} + +func (m *Page) Reset() { *m = Page{} } +func (m *Page) String() string { return proto.CompactTextString(m) } +func (*Page) ProtoMessage() {} +func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } + +func (m *Page) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Page) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *Page) GetSubpages() []*Page { + if m != nil { + return m.Subpages + } + return nil +} + +func init() { + proto.RegisterType((*Documentation)(nil), "google.api.Documentation") + proto.RegisterType((*DocumentationRule)(nil), "google.api.DocumentationRule") + proto.RegisterType((*Page)(nil), "google.api.Page") +} + +func init() { proto.RegisterFile("google/api/documentation.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xe3, 0x30, + 0x14, 0x45, 0x71, 0xec, 0xcc, 0x64, 0x5e, 0x98, 0x61, 0x46, 0x0c, 0x19, 0x33, 0xd0, 0x12, 0xb2, + 0x28, 0x59, 0x14, 0x1b, 0x9a, 0x42, 0x17, 0x5d, 0x35, 0xa4, 0x94, 0xee, 0x8c, 0xa1, 0x9b, 0x6e, + 0x82, 0xa2, 0xbc, 0x0a, 0x83, 0xad, 0x67, 0x24, 0x39, 0xa5, 0xbf, 0xd0, 0xcf, 0xe8, 0x57, 0xf5, + 0x73, 0x8a, 0x65, 0x27, 0xb1, 0x29, 0xdd, 0xf9, 0xfa, 0x1e, 0xe9, 0x3e, 0x5d, 0x09, 0x4e, 0x25, + 0x91, 0xcc, 0x31, 0xe6, 0x65, 0x16, 0x6f, 0x49, 0x54, 0x05, 0x2a, 0xcb, 0x6d, 0x46, 0x2a, 0x2a, + 0x35, 0x59, 0x62, 0xd0, 0xf8, 0x11, 0x2f, 0xb3, 0xd9, 0xbb, 0x07, 0x3f, 0x57, 0x5d, 0x86, 0x85, + 0xf0, 0xdd, 0x54, 0x45, 0xc1, 0xf5, 0x4b, 0xe8, 0x4d, 0xbd, 0xf9, 0x8f, 0x74, 0x2f, 0xd9, 0x19, + 0x0c, 0x4b, 0x2e, 0xd1, 0x84, 0xc3, 0xa9, 0x3f, 0x1f, 0x5f, 0xfc, 0x8e, 0x8e, 0xfb, 0x44, 0x09, + 0x97, 0x98, 0x36, 0x36, 0x5b, 0xc0, 0x50, 0x57, 0x39, 0x9a, 0xd0, 0x77, 0xdc, 0x49, 0x97, 0xeb, + 0x65, 0xa5, 0x55, 0x8e, 0x69, 0xc3, 0xb2, 0x4b, 0x98, 0xf4, 0x66, 0x5d, 0x6b, 0x22, 0xbb, 0xae, + 0x74, 0x1e, 0x06, 0x6e, 0x8a, 0xbf, 0x3d, 0x37, 0x25, 0xb2, 0x0f, 0x3a, 0x67, 0xff, 0x61, 0x44, + 0x3b, 0xd4, 0xbb, 0x0c, 0x9f, 0xc3, 0x81, 0xe3, 0x0e, 0x7a, 0xf6, 0xea, 0xc1, 0x9f, 0x4f, 0x71, + 0xf5, 0x0a, 0x83, 0x39, 0x0a, 0x4b, 0xba, 0x3d, 0xdf, 0x41, 0xb3, 0x29, 0x8c, 0xb7, 0x68, 0x84, + 0xce, 0xca, 0x1a, 0x6f, 0x37, 0xec, 0xfe, 0x62, 0x57, 0xf0, 0x6f, 0x8b, 0xa5, 0x46, 0xd1, 0xcc, + 0xd8, 0xa5, 0x7d, 0x47, 0x4f, 0x3a, 0xf6, 0xea, 0xe8, 0xce, 0x36, 0x10, 0xd4, 0x15, 0x31, 0x06, + 0x81, 0xe2, 0x05, 0xb6, 0xd1, 0xee, 0xbb, 0x6e, 0x5c, 0x90, 0xb2, 0xa8, 0x6c, 0x1b, 0xb9, 0x97, + 0xec, 0x1c, 0x46, 0xa6, 0xda, 0x34, 0xa5, 0xfb, 0x5f, 0x94, 0x7e, 0x20, 0x96, 0x16, 0x7e, 0x09, + 0x2a, 0x3a, 0xc0, 0x92, 0xf5, 0xce, 0x9f, 0xd4, 0xb7, 0x9f, 0x78, 0x8f, 0xb7, 0x2d, 0x21, 0x29, + 0xe7, 0x4a, 0x46, 0xa4, 0x65, 0x2c, 0x51, 0xb9, 0xb7, 0x11, 0x37, 0x16, 0x2f, 0x33, 0xe3, 0x9e, + 0x8f, 0xa9, 0xbb, 0x14, 0x28, 0x48, 0x3d, 0x65, 0xf2, 0xba, 0xa7, 0xde, 0x06, 0xc1, 0xdd, 0x4d, + 0x72, 0xbf, 0xf9, 0xe6, 0x16, 0x2e, 0x3e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x04, 0x32, 0xbf, + 0x76, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go new file mode 100644 index 000000000..4bca1828b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/endpoint.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/endpoint.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `Endpoint` describes a network endpoint that serves a set of APIs. +// A service may expose any number of endpoints, and all endpoints share the +// same service configuration, such as quota configuration and monitoring +// configuration. +// +// Example service configuration: +// +// name: library-example.googleapis.com +// endpoints: +// # Below entry makes 'google.example.library.v1.Library' +// # API be served from endpoint address library-example.googleapis.com. +// # It also allows HTTP OPTIONS calls to be passed to the backend, for +// # it to decide whether the subsequent cross-origin request is +// # allowed to proceed. +// - name: library-example.googleapis.com +// allow_cors: true +type Endpoint struct { + // The canonical name of this endpoint. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // DEPRECATED: This field is no longer supported. Instead of using aliases, + // please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intented + // alias. + // + // Additional names that this endpoint will be hosted on. + Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"` + // The list of APIs served by this endpoint. + Apis []string `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"` + // The list of features enabled on this endpoint. + Features []string `protobuf:"bytes,4,rep,name=features" json:"features,omitempty"` + // The specification of an Internet routable address of API frontend that will + // handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary). + // It should be either a valid IPv4 address or a fully-qualified domain name. + // For example, "8.8.8.8" or "myservice.appspot.com". + Target string `protobuf:"bytes,101,opt,name=target" json:"target,omitempty"` + // Allowing + // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka + // cross-domain traffic, would allow the backends served from this endpoint to + // receive and respond to HTTP OPTIONS requests. The response will be used by + // the browser to determine whether the subsequent cross-origin request is + // allowed to proceed. + AllowCors bool `protobuf:"varint,5,opt,name=allow_cors,json=allowCors" json:"allow_cors,omitempty"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } + +func (m *Endpoint) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Endpoint) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func (m *Endpoint) GetApis() []string { + if m != nil { + return m.Apis + } + return nil +} + +func (m *Endpoint) GetFeatures() []string { + if m != nil { + return m.Features + } + return nil +} + +func (m *Endpoint) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +func (m *Endpoint) GetAllowCors() bool { + if m != nil { + return m.AllowCors + } + return false +} + +func init() { + proto.RegisterType((*Endpoint)(nil), "google.api.Endpoint") +} + +func init() { proto.RegisterFile("google/api/endpoint.proto", fileDescriptor7) } + +var fileDescriptor7 = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x41, 0x4b, 0xc4, 0x30, + 0x10, 0x85, 0xe9, 0x6e, 0x5d, 0xdb, 0x01, 0x3d, 0xe4, 0x20, 0x71, 0x51, 0x28, 0x9e, 0x7a, 0x6a, + 0x0f, 0x1e, 0x3d, 0xb9, 0xb2, 0x88, 0xb7, 0xd2, 0xa3, 0x17, 0x19, 0xeb, 0x6c, 0x08, 0x64, 0x33, + 0x21, 0x89, 0xfa, 0x73, 0x04, 0x7f, 0xa9, 0x34, 0xed, 0xaa, 0x7b, 0x9b, 0xef, 0xbd, 0x37, 0x61, + 0x5e, 0xe0, 0x52, 0x31, 0x2b, 0x43, 0x2d, 0x3a, 0xdd, 0x92, 0x7d, 0x73, 0xac, 0x6d, 0x6c, 0x9c, + 0xe7, 0xc8, 0x02, 0x26, 0xab, 0x41, 0xa7, 0xd7, 0x57, 0xff, 0x62, 0x68, 0x2d, 0x47, 0x8c, 0x9a, + 0x6d, 0x98, 0x92, 0x37, 0x5f, 0x19, 0x14, 0xdb, 0x79, 0x59, 0x08, 0xc8, 0x2d, 0xee, 0x49, 0x66, + 0x55, 0x56, 0x97, 0x7d, 0x9a, 0x85, 0x84, 0x53, 0x34, 0x1a, 0x03, 0x05, 0xb9, 0xa8, 0x96, 0x75, + 0xd9, 0x1f, 0x70, 0x4c, 0xa3, 0xd3, 0x41, 0x2e, 0x93, 0x9c, 0x66, 0xb1, 0x86, 0x62, 0x47, 0x18, + 0xdf, 0x3d, 0x05, 0x99, 0x27, 0xfd, 0x97, 0xc5, 0x05, 0xac, 0x22, 0x7a, 0x45, 0x51, 0x52, 0x7a, + 0x7f, 0x26, 0x71, 0x0d, 0x80, 0xc6, 0xf0, 0xe7, 0xcb, 0xc0, 0x3e, 0xc8, 0x93, 0x2a, 0xab, 0x8b, + 0xbe, 0x4c, 0xca, 0x03, 0xfb, 0xb0, 0x61, 0x38, 0x1f, 0x78, 0xdf, 0xfc, 0x35, 0xda, 0x9c, 0x1d, + 0x0e, 0xee, 0xc6, 0x0a, 0x5d, 0xf6, 0xbc, 0x9d, 0x4d, 0xc5, 0x06, 0xad, 0x6a, 0xd8, 0xab, 0x56, + 0x91, 0x4d, 0x05, 0xdb, 0xc9, 0x1a, 0x8f, 0x4b, 0x3f, 0x10, 0xc8, 0x7f, 0xe8, 0x81, 0x06, 0xb6, + 0x3b, 0xad, 0xee, 0x8e, 0xe8, 0x7b, 0x91, 0x3f, 0xde, 0x77, 0x4f, 0xaf, 0xab, 0xb4, 0x78, 0xfb, + 0x13, 0x00, 0x00, 0xff, 0xff, 0x34, 0x0e, 0xdd, 0x70, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go new file mode 100644 index 000000000..09ed8bd83 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/log.pb.go @@ -0,0 +1,98 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/log.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api2 "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A description of a log type. Example in YAML format: +// +// - name: library.googleapis.com/activity_history +// description: The history of borrowing and returning library items. +// display_name: Activity +// labels: +// - key: /customer_id +// description: Identifier of a library customer +type LogDescriptor struct { + // The name of the log. It must be less than 512 characters long and can + // include the following characters: upper- and lower-case alphanumeric + // characters [A-Za-z0-9], and punctuation characters including + // slash, underscore, hyphen, period [/_-.]. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The set of labels that are available to describe a specific log entry. + // Runtime requests that contain labels not specified here are + // considered invalid. + Labels []*google_api2.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"` + // A human-readable description of this log. This information appears in + // the documentation and can contain details. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The human-readable name for this log. This information appears on + // the user interface and should be concise. + DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *LogDescriptor) Reset() { *m = LogDescriptor{} } +func (m *LogDescriptor) String() string { return proto.CompactTextString(m) } +func (*LogDescriptor) ProtoMessage() {} +func (*LogDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +func (m *LogDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogDescriptor) GetLabels() []*google_api2.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *LogDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *LogDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func init() { + proto.RegisterType((*LogDescriptor)(nil), "google.api.LogDescriptor") +} + +func init() { proto.RegisterFile("google/api/log.proto", fileDescriptor8) } + +var fileDescriptor8 = []byte{ + // 238 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xc1, 0x4a, 0xc3, 0x40, + 0x10, 0x86, 0x49, 0x1b, 0x8a, 0x6e, 0xd5, 0xc3, 0x22, 0x12, 0xf4, 0x12, 0x3d, 0xf5, 0xb4, 0x01, + 0x7b, 0xf4, 0x64, 0x51, 0x44, 0x08, 0x12, 0x7a, 0xf4, 0x22, 0xd3, 0x74, 0x1c, 0x46, 0x36, 0x3b, + 0xcb, 0x6e, 0x11, 0x7c, 0x18, 0x2f, 0x3e, 0xa9, 0x74, 0x13, 0x68, 0x7a, 0xdb, 0xfd, 0xe6, 0x9b, + 0x7f, 0x66, 0xd4, 0x25, 0x89, 0x90, 0xc5, 0x0a, 0x3c, 0x57, 0x56, 0xc8, 0xf8, 0x20, 0x3b, 0xd1, + 0xaa, 0xa7, 0x06, 0x3c, 0x5f, 0x5f, 0x8d, 0x0d, 0xd8, 0xa0, 0xed, 0x9d, 0xbb, 0xdf, 0x4c, 0x9d, + 0xd7, 0x42, 0x4f, 0x18, 0xdb, 0xc0, 0x7e, 0x27, 0x41, 0x6b, 0x95, 0x3b, 0xe8, 0xb0, 0xc8, 0xca, + 0x6c, 0x71, 0xba, 0x4e, 0x6f, 0xbd, 0x54, 0xb3, 0xd4, 0x14, 0x8b, 0x49, 0x39, 0x5d, 0xcc, 0xef, + 0x6f, 0xcc, 0x21, 0xda, 0xd4, 0xfb, 0xca, 0x21, 0x60, 0x3d, 0xa8, 0xba, 0x54, 0xf3, 0xed, 0x40, + 0x59, 0x5c, 0x31, 0x4d, 0x79, 0x63, 0xa4, 0x6f, 0xd5, 0xd9, 0x96, 0xa3, 0xb7, 0xf0, 0xf3, 0x91, + 0x46, 0xe6, 0x83, 0xd2, 0xb3, 0x37, 0xe8, 0x70, 0xf5, 0xa5, 0x2e, 0x5a, 0xe9, 0x46, 0xe3, 0x56, + 0x27, 0xb5, 0x50, 0xb3, 0xdf, 0xbd, 0xc9, 0xde, 0x9f, 0x07, 0x4e, 0x62, 0xc1, 0x91, 0x91, 0x40, + 0x15, 0xa1, 0x4b, 0x97, 0x55, 0x7d, 0x09, 0x3c, 0xc7, 0x74, 0x74, 0xc4, 0xf0, 0xcd, 0x2d, 0xb6, + 0xe2, 0x3e, 0x99, 0x1e, 0x8e, 0x7e, 0x7f, 0x93, 0xfc, 0xe5, 0xb1, 0x79, 0xdd, 0xcc, 0x52, 0xe3, + 0xf2, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x25, 0x6c, 0x32, 0xff, 0x4e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go new file mode 100644 index 000000000..b62778aeb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/logging.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/logging.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Logging configuration of the service. +// +// The following example shows how to configure logs to be sent to the +// producer and consumer projects. In the example, the `activity_history` +// log is sent to both the producer and consumer projects, whereas the +// `purchase_history` log is only sent to the producer project. +// +// monitored_resources: +// - type: library.googleapis.com/branch +// labels: +// - key: /city +// description: The city where the library branch is located in. +// - key: /name +// description: The name of the branch. +// logs: +// - name: activity_history +// labels: +// - key: /customer_id +// - name: purchase_history +// logging: +// producer_destinations: +// - monitored_resource: library.googleapis.com/branch +// logs: +// - activity_history +// - purchase_history +// consumer_destinations: +// - monitored_resource: library.googleapis.com/branch +// logs: +// - activity_history +type Logging struct { + // Logging configurations for sending logs to the producer project. + // There can be multiple producer destinations, each one must have a + // different monitored resource type. A log can be used in at most + // one producer destination. + ProducerDestinations []*Logging_LoggingDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"` + // Logging configurations for sending logs to the consumer project. + // There can be multiple consumer destinations, each one must have a + // different monitored resource type. A log can be used in at most + // one consumer destination. + ConsumerDestinations []*Logging_LoggingDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"` +} + +func (m *Logging) Reset() { *m = Logging{} } +func (m *Logging) String() string { return proto.CompactTextString(m) } +func (*Logging) ProtoMessage() {} +func (*Logging) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } + +func (m *Logging) GetProducerDestinations() []*Logging_LoggingDestination { + if m != nil { + return m.ProducerDestinations + } + return nil +} + +func (m *Logging) GetConsumerDestinations() []*Logging_LoggingDestination { + if m != nil { + return m.ConsumerDestinations + } + return nil +} + +// Configuration of a specific logging destination (the producer project +// or the consumer project). +type Logging_LoggingDestination struct { + // The monitored resource type. The type must be defined in the + // [Service.monitored_resources][google.api.Service.monitored_resources] section. + MonitoredResource string `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"` + // Names of the logs to be sent to this destination. Each name must + // be defined in the [Service.logs][google.api.Service.logs] section. If the log name is + // not a domain scoped name, it will be automatically prefixed with + // the service name followed by "/". + Logs []string `protobuf:"bytes,1,rep,name=logs" json:"logs,omitempty"` +} + +func (m *Logging_LoggingDestination) Reset() { *m = Logging_LoggingDestination{} } +func (m *Logging_LoggingDestination) String() string { return proto.CompactTextString(m) } +func (*Logging_LoggingDestination) ProtoMessage() {} +func (*Logging_LoggingDestination) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0, 0} } + +func (m *Logging_LoggingDestination) GetMonitoredResource() string { + if m != nil { + return m.MonitoredResource + } + return "" +} + +func (m *Logging_LoggingDestination) GetLogs() []string { + if m != nil { + return m.Logs + } + return nil +} + +func init() { + proto.RegisterType((*Logging)(nil), "google.api.Logging") + proto.RegisterType((*Logging_LoggingDestination)(nil), "google.api.Logging.LoggingDestination") +} + +func init() { proto.RegisterFile("google/api/logging.proto", fileDescriptor9) } + +var fileDescriptor9 = []byte{ + // 270 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x4f, 0x4b, 0xc4, 0x30, + 0x10, 0xc5, 0x69, 0x77, 0x51, 0x36, 0x8a, 0x60, 0x50, 0x28, 0x8b, 0x87, 0xc5, 0x83, 0xec, 0xc5, + 0x14, 0xf4, 0xe8, 0xc9, 0x45, 0x11, 0xc1, 0x43, 0xe9, 0x45, 0xd0, 0xc3, 0x12, 0xd3, 0x38, 0x04, + 0xda, 0x99, 0x90, 0xa4, 0x7e, 0x1a, 0x4f, 0x7e, 0x52, 0xd9, 0xa6, 0x75, 0xab, 0x9e, 0xf6, 0x94, + 0x3f, 0xef, 0xbd, 0x5f, 0x32, 0x8f, 0x65, 0x40, 0x04, 0xb5, 0xce, 0xa5, 0x35, 0x79, 0x4d, 0x00, + 0x06, 0x41, 0x58, 0x47, 0x81, 0x38, 0x8b, 0x8a, 0x90, 0xd6, 0xcc, 0xcf, 0x46, 0x2e, 0x89, 0x48, + 0x41, 0x06, 0x43, 0xe8, 0xa3, 0xf3, 0xfc, 0x33, 0x65, 0xfb, 0x4f, 0x31, 0xcb, 0x5f, 0xd9, 0xa9, + 0x75, 0x54, 0xb5, 0x4a, 0xbb, 0x75, 0xa5, 0x7d, 0x30, 0x18, 0xad, 0x59, 0xb2, 0x98, 0x2c, 0x0f, + 0xae, 0x2e, 0xc4, 0x96, 0x2a, 0xfa, 0xcc, 0xb0, 0xde, 0x6d, 0xed, 0xe5, 0xc9, 0x00, 0x19, 0x5d, + 0xfa, 0x0d, 0x5c, 0x11, 0xfa, 0xb6, 0xf9, 0x0b, 0x4f, 0x77, 0x83, 0x0f, 0x90, 0x31, 0x7c, 0xfe, + 0xcc, 0xf8, 0x7f, 0x2f, 0xbf, 0x64, 0xbc, 0x21, 0x34, 0x81, 0x9c, 0xae, 0xd6, 0x4e, 0x7b, 0x6a, + 0x9d, 0xd2, 0xd9, 0x64, 0x91, 0x2c, 0x67, 0xe5, 0xf1, 0x8f, 0x52, 0xf6, 0x02, 0xe7, 0x6c, 0x5a, + 0x13, 0xc4, 0x69, 0x67, 0x65, 0xb7, 0x5f, 0x21, 0x3b, 0x52, 0xd4, 0x8c, 0xfe, 0xb6, 0x3a, 0xec, + 0x1f, 0x2a, 0x36, 0xf5, 0x15, 0xc9, 0xcb, 0x7d, 0xaf, 0x01, 0xd5, 0x12, 0x41, 0x90, 0x83, 0x1c, + 0x34, 0x76, 0xe5, 0xe6, 0x51, 0x92, 0xd6, 0xf8, 0xae, 0x7d, 0xaf, 0xdd, 0x87, 0x51, 0x5a, 0x11, + 0xbe, 0x1b, 0xb8, 0xf9, 0x75, 0xfa, 0x4a, 0xa7, 0x0f, 0xb7, 0xc5, 0xe3, 0xdb, 0x5e, 0x17, 0xbc, + 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x73, 0x4f, 0x86, 0x6e, 0xdb, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go new file mode 100644 index 000000000..3ebb6e141 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/monitoring.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/monitoring.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Monitoring configuration of the service. +// +// The example below shows how to configure monitored resources and metrics +// for monitoring. In the example, a monitored resource and two metrics are +// defined. The `library.googleapis.com/book/returned_count` metric is sent +// to both producer and consumer projects, whereas the +// `library.googleapis.com/book/overdue_count` metric is only sent to the +// consumer project. +// +// monitored_resources: +// - type: library.googleapis.com/branch +// labels: +// - key: /city +// description: The city where the library branch is located in. +// - key: /name +// description: The name of the branch. +// metrics: +// - name: library.googleapis.com/book/returned_count +// metric_kind: DELTA +// value_type: INT64 +// labels: +// - key: /customer_id +// - name: library.googleapis.com/book/overdue_count +// metric_kind: GAUGE +// value_type: INT64 +// labels: +// - key: /customer_id +// monitoring: +// producer_destinations: +// - monitored_resource: library.googleapis.com/branch +// metrics: +// - library.googleapis.com/book/returned_count +// consumer_destinations: +// - monitored_resource: library.googleapis.com/branch +// metrics: +// - library.googleapis.com/book/returned_count +// - library.googleapis.com/book/overdue_count +type Monitoring struct { + // Monitoring configurations for sending metrics to the producer project. + // There can be multiple producer destinations, each one must have a + // different monitored resource type. A metric can be used in at most + // one producer destination. + ProducerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"` + // Monitoring configurations for sending metrics to the consumer project. + // There can be multiple consumer destinations, each one must have a + // different monitored resource type. A metric can be used in at most + // one consumer destination. + ConsumerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"` +} + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (m *Monitoring) String() string { return proto.CompactTextString(m) } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } + +func (m *Monitoring) GetProducerDestinations() []*Monitoring_MonitoringDestination { + if m != nil { + return m.ProducerDestinations + } + return nil +} + +func (m *Monitoring) GetConsumerDestinations() []*Monitoring_MonitoringDestination { + if m != nil { + return m.ConsumerDestinations + } + return nil +} + +// Configuration of a specific monitoring destination (the producer project +// or the consumer project). +type Monitoring_MonitoringDestination struct { + // The monitored resource type. The type must be defined in + // [Service.monitored_resources][google.api.Service.monitored_resources] section. + MonitoredResource string `protobuf:"bytes,1,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"` + // Names of the metrics to report to this monitoring destination. + // Each name must be defined in [Service.metrics][google.api.Service.metrics] section. + Metrics []string `protobuf:"bytes,2,rep,name=metrics" json:"metrics,omitempty"` +} + +func (m *Monitoring_MonitoringDestination) Reset() { *m = Monitoring_MonitoringDestination{} } +func (m *Monitoring_MonitoringDestination) String() string { return proto.CompactTextString(m) } +func (*Monitoring_MonitoringDestination) ProtoMessage() {} +func (*Monitoring_MonitoringDestination) Descriptor() ([]byte, []int) { + return fileDescriptor10, []int{0, 0} +} + +func (m *Monitoring_MonitoringDestination) GetMonitoredResource() string { + if m != nil { + return m.MonitoredResource + } + return "" +} + +func (m *Monitoring_MonitoringDestination) GetMetrics() []string { + if m != nil { + return m.Metrics + } + return nil +} + +func init() { + proto.RegisterType((*Monitoring)(nil), "google.api.Monitoring") + proto.RegisterType((*Monitoring_MonitoringDestination)(nil), "google.api.Monitoring.MonitoringDestination") +} + +func init() { proto.RegisterFile("google/api/monitoring.proto", fileDescriptor10) } + +var fileDescriptor10 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0xcd, 0x4a, 0xc4, 0x30, + 0x10, 0xa6, 0x55, 0x94, 0x8d, 0xa0, 0x58, 0x5c, 0x28, 0xab, 0x87, 0xc5, 0xd3, 0x1e, 0xb4, 0x05, + 0x3d, 0x7a, 0x72, 0x51, 0xc4, 0x83, 0x50, 0x7a, 0xf4, 0xb2, 0xc6, 0x74, 0x0c, 0x03, 0xdb, 0x99, + 0x9a, 0xa4, 0x3e, 0x90, 0xcf, 0xe0, 0x03, 0xca, 0x36, 0xed, 0x36, 0x8a, 0x27, 0x6f, 0x99, 0x7c, + 0x7f, 0xc3, 0x37, 0xe2, 0x54, 0x33, 0xeb, 0x35, 0xe4, 0xb2, 0xc1, 0xbc, 0x66, 0x42, 0xc7, 0x06, + 0x49, 0x67, 0x8d, 0x61, 0xc7, 0x89, 0xf0, 0x60, 0x26, 0x1b, 0x9c, 0x9d, 0x05, 0x44, 0x49, 0xc4, + 0x4e, 0x3a, 0x64, 0xb2, 0x9e, 0x79, 0xfe, 0x15, 0x0b, 0xf1, 0xb4, 0x95, 0x27, 0x52, 0x4c, 0x1b, + 0xc3, 0x55, 0xab, 0xc0, 0xac, 0x2a, 0xb0, 0x0e, 0xc9, 0xb3, 0xd3, 0x68, 0xbe, 0xb3, 0x38, 0xb8, + 0xba, 0xc8, 0x46, 0xe3, 0x6c, 0x94, 0x05, 0xcf, 0xbb, 0x51, 0x54, 0x9e, 0x0c, 0x56, 0xc1, 0xa7, + 0xdd, 0x44, 0x28, 0x26, 0xdb, 0xd6, 0xbf, 0x23, 0xe2, 0xff, 0x44, 0x0c, 0x56, 0x61, 0xc4, 0xec, + 0x45, 0x4c, 0xff, 0xa4, 0x27, 0x97, 0x22, 0xe9, 0xbb, 0x82, 0x6a, 0x65, 0xc0, 0x72, 0x6b, 0x14, + 0xa4, 0xd1, 0x3c, 0x5a, 0x4c, 0xca, 0xe3, 0x2d, 0x52, 0xf6, 0x40, 0x92, 0x8a, 0xfd, 0x1a, 0x9c, + 0x41, 0xe5, 0x97, 0x9b, 0x94, 0xc3, 0xb8, 0x7c, 0x17, 0x87, 0x8a, 0xeb, 0x60, 0xd5, 0xe5, 0xd1, + 0x98, 0x58, 0x6c, 0x9a, 0x2d, 0xa2, 0xe7, 0xfb, 0x1e, 0xd6, 0xbc, 0x96, 0xa4, 0x33, 0x36, 0x3a, + 0xd7, 0x40, 0x5d, 0xef, 0xb9, 0x87, 0x64, 0x83, 0xb6, 0x3b, 0x8c, 0x05, 0xf3, 0x81, 0x0a, 0x14, + 0xd3, 0x1b, 0xea, 0x9b, 0x1f, 0xd3, 0x67, 0xbc, 0xfb, 0x70, 0x5b, 0x3c, 0xbe, 0xee, 0x75, 0xc2, + 0xeb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x35, 0xf3, 0xe2, 0xf9, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/quota.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/quota.pb.go new file mode 100644 index 000000000..ac5ab0975 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/quota.pb.go @@ -0,0 +1,391 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/quota.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Quota configuration helps to achieve fairness and budgeting in service +// usage. +// +// The quota configuration works this way: +// - The service configuration defines a set of metrics. +// - For API calls, the quota.metric_rules maps methods to metrics with +// corresponding costs. +// - The quota.limits defines limits on the metrics, which will be used for +// quota checks at runtime. +// +// An example quota configuration in yaml format: +// +// quota: +// limits: +// +// - name: apiWriteQpsPerProject +// metric: library.googleapis.com/write_calls +// unit: "1/min/{project}" # rate limit for consumer projects +// values: +// STANDARD: 10000 +// +// # The metric rules bind all methods to the read_calls metric, +// # except for the UpdateBook and DeleteBook methods. These two methods +// # are mapped to the write_calls metric, with the UpdateBook method +// # consuming at twice rate as the DeleteBook method. +// metric_rules: +// - selector: "*" +// metric_costs: +// library.googleapis.com/read_calls: 1 +// - selector: google.example.library.v1.LibraryService.UpdateBook +// metric_costs: +// library.googleapis.com/write_calls: 2 +// - selector: google.example.library.v1.LibraryService.DeleteBook +// metric_costs: +// library.googleapis.com/write_calls: 1 +// +// Corresponding Metric definition: +// +// metrics: +// - name: library.googleapis.com/read_calls +// display_name: Read requests +// metric_kind: DELTA +// value_type: INT64 +// +// - name: library.googleapis.com/write_calls +// display_name: Write requests +// metric_kind: DELTA +// value_type: INT64 +// +type Quota struct { + // List of `QuotaLimit` definitions for the service. + // + // Used by metric-based quotas only. + Limits []*QuotaLimit `protobuf:"bytes,3,rep,name=limits" json:"limits,omitempty"` + // List of `MetricRule` definitions, each one mapping a selected method to one + // or more metrics. + // + // Used by metric-based quotas only. + MetricRules []*MetricRule `protobuf:"bytes,4,rep,name=metric_rules,json=metricRules" json:"metric_rules,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } + +func (m *Quota) GetLimits() []*QuotaLimit { + if m != nil { + return m.Limits + } + return nil +} + +func (m *Quota) GetMetricRules() []*MetricRule { + if m != nil { + return m.MetricRules + } + return nil +} + +// Bind API methods to metrics. Binding a method to a metric causes that +// metric's configured quota, billing, and monitoring behaviors to apply to the +// method call. +// +// Used by metric-based quotas only. +type MetricRule struct { + // Selects the methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Metrics to update when the selected methods are called, and the associated + // cost applied to each metric. + // + // The key of the map is the metric name, and the values are the amount + // increased for the metric against which the quota limits are defined. + // The value must not be negative. + MetricCosts map[string]int64 `protobuf:"bytes,2,rep,name=metric_costs,json=metricCosts" json:"metric_costs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *MetricRule) Reset() { *m = MetricRule{} } +func (m *MetricRule) String() string { return proto.CompactTextString(m) } +func (*MetricRule) ProtoMessage() {} +func (*MetricRule) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } + +func (m *MetricRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *MetricRule) GetMetricCosts() map[string]int64 { + if m != nil { + return m.MetricCosts + } + return nil +} + +// `QuotaLimit` defines a specific limit that applies over a specified duration +// for a limit type. There can be at most one limit for a duration and limit +// type combination defined within a `QuotaGroup`. +type QuotaLimit struct { + // Name of the quota limit. The name is used to refer to the limit when + // overriding the default limit on per-consumer basis. + // + // For group-based quota limits, the name must be unique within the quota + // group. If a name is not provided, it will be generated from the limit_by + // and duration fields. + // + // For metric-based quota limits, the name must be provided, and it must be + // unique within the service. The name can only include alphanumeric + // characters as well as '-'. + // + // The maximum length of the limit name is 64 characters. + // + // The name of a limit is used as a unique identifier for this limit. + // Therefore, once a limit has been put into use, its name should be + // immutable. You can use the display_name field to provide a user-friendly + // name for the limit. The display name can be evolved over time without + // affecting the identity of the limit. + Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"` + // Optional. User-visible, extended description for this quota limit. + // Should be used only when more context is needed to understand this limit + // than provided by the limit's display name (see: `display_name`). + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Default number of tokens that can be consumed during the specified + // duration. This is the number of tokens assigned when a client + // application developer activates the service for his/her project. + // + // Specifying a value of 0 will block all requests. This can be used if you + // are provisioning quota to selected consumers and blocking others. + // Similarly, a value of -1 will indicate an unlimited quota. No other + // negative values are allowed. + // + // Used by group-based quotas only. + DefaultLimit int64 `protobuf:"varint,3,opt,name=default_limit,json=defaultLimit" json:"default_limit,omitempty"` + // Maximum number of tokens that can be consumed during the specified + // duration. Client application developers can override the default limit up + // to this maximum. If specified, this value cannot be set to a value less + // than the default limit. If not specified, it is set to the default limit. + // + // To allow clients to apply overrides with no upper bound, set this to -1, + // indicating unlimited maximum quota. + // + // Used by group-based quotas only. + MaxLimit int64 `protobuf:"varint,4,opt,name=max_limit,json=maxLimit" json:"max_limit,omitempty"` + // Free tier value displayed in the Developers Console for this limit. + // The free tier is the number of tokens that will be subtracted from the + // billed amount when billing is enabled. + // This field can only be set on a limit with duration "1d", in a billable + // group; it is invalid on any other limit. If this field is not set, it + // defaults to 0, indicating that there is no free tier for this service. + // + // Used by group-based quotas only. + FreeTier int64 `protobuf:"varint,7,opt,name=free_tier,json=freeTier" json:"free_tier,omitempty"` + // Duration of this limit in textual notation. Example: "100s", "24h", "1d". + // For duration longer than a day, only multiple of days is supported. We + // support only "100s" and "1d" for now. Additional support will be added in + // the future. "0" indicates indefinite duration. + // + // Used by group-based quotas only. + Duration string `protobuf:"bytes,5,opt,name=duration" json:"duration,omitempty"` + // The name of the metric this quota limit applies to. The quota limits with + // the same metric will be checked together during runtime. The metric must be + // defined within the service config. + // + // Used by metric-based quotas only. + Metric string `protobuf:"bytes,8,opt,name=metric" json:"metric,omitempty"` + // Specify the unit of the quota limit. It uses the same syntax as + // [Metric.unit][]. The supported unit kinds are determined by the quota + // backend system. + // + // The [Google Service Control](https://cloud.google.com/service-control) + // supports the following unit components: + // * One of the time intevals: + // * "/min" for quota every minute. + // * "/d" for quota every 24 hours, starting 00:00 US Pacific Time. + // * Otherwise the quota won't be reset by time, such as storage limit. + // * One and only one of the granted containers: + // * "/{organization}" quota for an organization. + // * "/{project}" quota for a project. + // * "/{folder}" quota for a folder. + // * "/{resource}" quota for a universal resource. + // * Zero or more quota segmentation dimension. Not all combos are valid. + // * "/{region}" quota for every region. Not to be used with time intervals. + // * Otherwise the resources granted on the target is not segmented. + // * "/{zone}" quota for every zone. Not to be used with time intervals. + // * Otherwise the resources granted on the target is not segmented. + // * "/{resource}" quota for a resource associated with a project or org. + // + // Here are some examples: + // * "1/min/{project}" for quota per minute per project. + // * "1/min/{user}" for quota per minute per user. + // * "1/min/{organization}" for quota per minute per organization. + // + // Note: the order of unit components is insignificant. + // The "1" at the beginning is required to follow the metric unit syntax. + // + // Used by metric-based quotas only. + Unit string `protobuf:"bytes,9,opt,name=unit" json:"unit,omitempty"` + // Tiered limit values. Also allows for regional or zone overrides for these + // values if "/{region}" or "/{zone}" is specified in the unit field. + // + // Currently supported tiers from low to high: + // VERY_LOW, LOW, STANDARD, HIGH, VERY_HIGH + // + // To apply different limit values for users according to their tiers, specify + // the values for the tiers you want to differentiate. For example: + // {LOW:100, STANDARD:500, HIGH:1000, VERY_HIGH:5000} + // + // The limit value for each tier is optional except for the tier STANDARD. + // The limit value for an unspecified tier falls to the value of its next + // tier towards tier STANDARD. For the above example, the limit value for tier + // STANDARD is 500. + // + // To apply the same limit value for all users, just specify limit value for + // tier STANDARD. For example: {STANDARD:500}. + // + // To apply a regional overide for a tier, add a map entry with key + // "/", where is a region name. Similarly, for a zone + // override, add a map entry with key "/{zone}". + // Further, a wildcard can be used at the end of a zone name in order to + // specify zone level overrides. For example: + // LOW: 10, STANDARD: 50, HIGH: 100, + // LOW/us-central1: 20, STANDARD/us-central1: 60, HIGH/us-central1: 200, + // LOW/us-central1-*: 10, STANDARD/us-central1-*: 20, HIGH/us-central1-*: 80 + // + // The regional overrides tier set for each region must be the same as + // the tier set for default limit values. Same rule applies for zone overrides + // tier as well. + // + // Used by metric-based quotas only. + Values map[string]int64 `protobuf:"bytes,10,rep,name=values" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + // User-visible display name for this limit. + // Optional. If not set, the UI will provide a default display name based on + // the quota configuration. This field can be used to override the default + // display name generated from the configuration. + DisplayName string `protobuf:"bytes,12,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *QuotaLimit) Reset() { *m = QuotaLimit{} } +func (m *QuotaLimit) String() string { return proto.CompactTextString(m) } +func (*QuotaLimit) ProtoMessage() {} +func (*QuotaLimit) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} } + +func (m *QuotaLimit) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QuotaLimit) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QuotaLimit) GetDefaultLimit() int64 { + if m != nil { + return m.DefaultLimit + } + return 0 +} + +func (m *QuotaLimit) GetMaxLimit() int64 { + if m != nil { + return m.MaxLimit + } + return 0 +} + +func (m *QuotaLimit) GetFreeTier() int64 { + if m != nil { + return m.FreeTier + } + return 0 +} + +func (m *QuotaLimit) GetDuration() string { + if m != nil { + return m.Duration + } + return "" +} + +func (m *QuotaLimit) GetMetric() string { + if m != nil { + return m.Metric + } + return "" +} + +func (m *QuotaLimit) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *QuotaLimit) GetValues() map[string]int64 { + if m != nil { + return m.Values + } + return nil +} + +func (m *QuotaLimit) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func init() { + proto.RegisterType((*Quota)(nil), "google.api.Quota") + proto.RegisterType((*MetricRule)(nil), "google.api.MetricRule") + proto.RegisterType((*QuotaLimit)(nil), "google.api.QuotaLimit") +} + +func init() { proto.RegisterFile("google/api/quota.proto", fileDescriptor11) } + +var fileDescriptor11 = []byte{ + // 466 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x55, 0x9a, 0xb6, 0xb4, 0xd3, 0x82, 0x56, 0x16, 0xaa, 0xac, 0xc2, 0xa1, 0x94, 0x03, 0x3d, + 0xa5, 0x12, 0x5c, 0xd8, 0x45, 0x42, 0x62, 0xd1, 0x0a, 0x81, 0x00, 0x95, 0x08, 0x71, 0xe0, 0x52, + 0x99, 0x74, 0x1a, 0x59, 0x38, 0x71, 0xb0, 0x9d, 0xd5, 0xf6, 0xcc, 0x9f, 0xf0, 0x0d, 0x7c, 0x20, + 0xf2, 0xd8, 0xdb, 0x16, 0xd8, 0xcb, 0xde, 0x66, 0xe6, 0xbd, 0xe7, 0x17, 0x3f, 0x4f, 0x60, 0x52, + 0x6a, 0x5d, 0x2a, 0x5c, 0x8a, 0x46, 0x2e, 0x7f, 0xb4, 0xda, 0x89, 0xac, 0x31, 0xda, 0x69, 0x06, + 0x61, 0x9e, 0x89, 0x46, 0x4e, 0x1f, 0x1e, 0x71, 0x44, 0x5d, 0x6b, 0x27, 0x9c, 0xd4, 0xb5, 0x0d, + 0xcc, 0xb9, 0x81, 0xde, 0x27, 0x2f, 0x64, 0x19, 0xf4, 0x95, 0xac, 0xa4, 0xb3, 0x3c, 0x9d, 0xa5, + 0x8b, 0xd1, 0xd3, 0x49, 0x76, 0x38, 0x23, 0x23, 0xca, 0x7b, 0x0f, 0xe7, 0x91, 0xc5, 0x4e, 0x61, + 0x5c, 0xa1, 0x33, 0xb2, 0x58, 0x9b, 0x56, 0xa1, 0xe5, 0xdd, 0xff, 0x55, 0x1f, 0x08, 0xcf, 0x5b, + 0x85, 0xf9, 0xa8, 0xda, 0xd7, 0x76, 0xfe, 0x3b, 0x01, 0x38, 0x60, 0x6c, 0x0a, 0x03, 0x8b, 0x0a, + 0x0b, 0xa7, 0x0d, 0x4f, 0x66, 0xc9, 0x62, 0x98, 0xef, 0x7b, 0xf6, 0x6e, 0xef, 0x52, 0x68, 0xeb, + 0x2c, 0xef, 0x90, 0xcb, 0x93, 0x9b, 0x5d, 0x62, 0xf9, 0xda, 0x33, 0x2f, 0x6a, 0x67, 0x76, 0xd7, + 0xb6, 0x34, 0x99, 0xbe, 0x84, 0x93, 0x7f, 0x09, 0xec, 0x04, 0xd2, 0xef, 0xb8, 0x8b, 0xb6, 0xbe, + 0x64, 0xf7, 0xa1, 0x77, 0x29, 0x54, 0x8b, 0xbc, 0x33, 0x4b, 0x16, 0x69, 0x1e, 0x9a, 0xb3, 0xce, + 0xf3, 0x64, 0xfe, 0x33, 0x05, 0x38, 0x04, 0xc1, 0x18, 0x74, 0x6b, 0x51, 0x21, 0xef, 0x93, 0x96, + 0x6a, 0x36, 0x83, 0xd1, 0x06, 0x6d, 0x61, 0x64, 0xe3, 0x33, 0xa6, 0x23, 0x86, 0xf9, 0xf1, 0x88, + 0x3d, 0x86, 0xbb, 0x1b, 0xdc, 0x8a, 0x56, 0xb9, 0x35, 0x05, 0xc9, 0x53, 0xb2, 0x19, 0xc7, 0x61, + 0x38, 0xfa, 0x01, 0x0c, 0x2b, 0x71, 0x15, 0x09, 0x5d, 0x22, 0x0c, 0x2a, 0x71, 0xb5, 0x07, 0xb7, + 0x06, 0x71, 0xed, 0x24, 0x1a, 0x7e, 0x27, 0x80, 0x7e, 0xf0, 0x59, 0xa2, 0xf1, 0x59, 0x6e, 0x5a, + 0x43, 0x2f, 0xcc, 0x7b, 0x21, 0xcb, 0xeb, 0x9e, 0x4d, 0xa0, 0x1f, 0xe2, 0xe0, 0x03, 0x42, 0x62, + 0xe7, 0x2f, 0xd2, 0xd6, 0xd2, 0xf1, 0x61, 0xb8, 0x88, 0xaf, 0xd9, 0x19, 0xf4, 0xe9, 0xe2, 0x96, + 0x03, 0x25, 0x3e, 0xbf, 0x79, 0x1b, 0xb2, 0x2f, 0x44, 0x0a, 0x61, 0x47, 0x05, 0x7b, 0x04, 0xe3, + 0x8d, 0xb4, 0x8d, 0x12, 0xbb, 0x35, 0x05, 0x34, 0x8e, 0x29, 0x84, 0xd9, 0x47, 0x51, 0xe1, 0xf4, + 0x14, 0x46, 0x47, 0xca, 0xdb, 0xbc, 0xc2, 0xb9, 0x82, 0x7b, 0x85, 0xae, 0x8e, 0x3e, 0xe7, 0x3c, + 0x3c, 0xca, 0xca, 0xaf, 0xf3, 0x2a, 0xf9, 0x7a, 0x11, 0x91, 0x52, 0x2b, 0x51, 0x97, 0x99, 0x36, + 0xe5, 0xb2, 0xc4, 0x9a, 0x96, 0x7d, 0x19, 0x20, 0xd1, 0x48, 0x4b, 0x7f, 0x83, 0x45, 0x73, 0x29, + 0x0b, 0x2c, 0x74, 0xbd, 0x95, 0xe5, 0x8b, 0xbf, 0xba, 0x5f, 0x9d, 0xee, 0x9b, 0x57, 0xab, 0xb7, + 0xdf, 0xfa, 0x24, 0x7c, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x90, 0x7e, 0xf5, 0xab, 0x69, 0x03, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go new file mode 100644 index 000000000..c8275cffb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/service.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/service.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api5 "google.golang.org/genproto/googleapis/api" +import google_api "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/label" +import google_api3 "google.golang.org/genproto/googleapis/api/metric" +import google_api6 "google.golang.org/genproto/googleapis/api/monitoredres" +import _ "github.com/golang/protobuf/ptypes/any" +import google_protobuf4 "google.golang.org/genproto/protobuf/api" +import google_protobuf3 "google.golang.org/genproto/protobuf/ptype" +import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `Service` is the root object of Google service configuration schema. It +// describes basic information about a service, such as the name and the +// title, and delegates other aspects to sub-sections. Each sub-section is +// either a proto message or a repeated proto message that configures a +// specific aspect, such as auth. See each proto message definition for details. +// +// Example: +// +// type: google.api.Service +// config_version: 3 +// name: calendar.googleapis.com +// title: Google Calendar API +// apis: +// - name: google.calendar.v3.Calendar +// authentication: +// providers: +// - id: google_calendar_auth +// jwks_uri: https://www.googleapis.com/oauth2/v1/certs +// issuer: https://securetoken.google.com +// rules: +// - selector: "*" +// requirements: +// provider_id: google_calendar_auth +type Service struct { + // The version of the service configuration. The config version may + // influence interpretation of the configuration, for example, to + // determine defaults. This is documented together with applicable + // options. The current default for the config version itself is `3`. + ConfigVersion *google_protobuf5.UInt32Value `protobuf:"bytes,20,opt,name=config_version,json=configVersion" json:"config_version,omitempty"` + // The DNS address at which this service is available, + // e.g. `calendar.googleapis.com`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A unique ID for a specific instance of this message, typically assigned + // by the client for tracking purpose. If empty, the server may choose to + // generate one instead. + Id string `protobuf:"bytes,33,opt,name=id" json:"id,omitempty"` + // The product title associated with this service. + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + // The id of the Google developer project that owns the service. + // Members of this project can manage the service configuration, + // manage consumption of the service, etc. + ProducerProjectId string `protobuf:"bytes,22,opt,name=producer_project_id,json=producerProjectId" json:"producer_project_id,omitempty"` + // A list of API interfaces exported by this service. Only the `name` field + // of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration + // author, as the remaining fields will be derived from the IDL during the + // normalization process. It is an error to specify an API interface here + // which cannot be resolved against the associated IDL files. + Apis []*google_protobuf4.Api `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"` + // A list of all proto message types included in this API service. + // Types referenced directly or indirectly by the `apis` are + // automatically included. Messages which are not referenced but + // shall be included, such as types used by the `google.protobuf.Any` type, + // should be listed here by name. Example: + // + // types: + // - name: google.protobuf.Int32 + Types []*google_protobuf3.Type `protobuf:"bytes,4,rep,name=types" json:"types,omitempty"` + // A list of all enum types included in this API service. Enums + // referenced directly or indirectly by the `apis` are automatically + // included. Enums which are not referenced but shall be included + // should be listed here by name. Example: + // + // enums: + // - name: google.someapi.v1.SomeEnum + Enums []*google_protobuf3.Enum `protobuf:"bytes,5,rep,name=enums" json:"enums,omitempty"` + // Additional API documentation. + Documentation *Documentation `protobuf:"bytes,6,opt,name=documentation" json:"documentation,omitempty"` + // API backend configuration. + Backend *Backend `protobuf:"bytes,8,opt,name=backend" json:"backend,omitempty"` + // HTTP configuration. + Http *google_api.Http `protobuf:"bytes,9,opt,name=http" json:"http,omitempty"` + // Quota configuration. + Quota *Quota `protobuf:"bytes,10,opt,name=quota" json:"quota,omitempty"` + // Auth configuration. + Authentication *Authentication `protobuf:"bytes,11,opt,name=authentication" json:"authentication,omitempty"` + // Context configuration. + Context *Context `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + // Configuration controlling usage of this service. + Usage *Usage `protobuf:"bytes,15,opt,name=usage" json:"usage,omitempty"` + // Configuration for network endpoints. If this is empty, then an endpoint + // with the same name as the service is automatically generated to service all + // defined APIs. + Endpoints []*Endpoint `protobuf:"bytes,18,rep,name=endpoints" json:"endpoints,omitempty"` + // Configuration for the service control plane. + Control *Control `protobuf:"bytes,21,opt,name=control" json:"control,omitempty"` + // Defines the logs used by this service. + Logs []*LogDescriptor `protobuf:"bytes,23,rep,name=logs" json:"logs,omitempty"` + // Defines the metrics used by this service. + Metrics []*google_api3.MetricDescriptor `protobuf:"bytes,24,rep,name=metrics" json:"metrics,omitempty"` + // Defines the monitored resources used by this service. This is required + // by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations. + MonitoredResources []*google_api6.MonitoredResourceDescriptor `protobuf:"bytes,25,rep,name=monitored_resources,json=monitoredResources" json:"monitored_resources,omitempty"` + // Logging configuration. + Logging *Logging `protobuf:"bytes,27,opt,name=logging" json:"logging,omitempty"` + // Monitoring configuration. + Monitoring *Monitoring `protobuf:"bytes,28,opt,name=monitoring" json:"monitoring,omitempty"` + // System parameter configuration. + SystemParameters *SystemParameters `protobuf:"bytes,29,opt,name=system_parameters,json=systemParameters" json:"system_parameters,omitempty"` + // Output only. The source information for this configuration if available. + SourceInfo *SourceInfo `protobuf:"bytes,37,opt,name=source_info,json=sourceInfo" json:"source_info,omitempty"` + // Experimental configuration. + Experimental *google_api5.Experimental `protobuf:"bytes,101,opt,name=experimental" json:"experimental,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{0} } + +func (m *Service) GetConfigVersion() *google_protobuf5.UInt32Value { + if m != nil { + return m.ConfigVersion + } + return nil +} + +func (m *Service) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Service) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Service) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Service) GetProducerProjectId() string { + if m != nil { + return m.ProducerProjectId + } + return "" +} + +func (m *Service) GetApis() []*google_protobuf4.Api { + if m != nil { + return m.Apis + } + return nil +} + +func (m *Service) GetTypes() []*google_protobuf3.Type { + if m != nil { + return m.Types + } + return nil +} + +func (m *Service) GetEnums() []*google_protobuf3.Enum { + if m != nil { + return m.Enums + } + return nil +} + +func (m *Service) GetDocumentation() *Documentation { + if m != nil { + return m.Documentation + } + return nil +} + +func (m *Service) GetBackend() *Backend { + if m != nil { + return m.Backend + } + return nil +} + +func (m *Service) GetHttp() *google_api.Http { + if m != nil { + return m.Http + } + return nil +} + +func (m *Service) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *Service) GetAuthentication() *Authentication { + if m != nil { + return m.Authentication + } + return nil +} + +func (m *Service) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *Service) GetUsage() *Usage { + if m != nil { + return m.Usage + } + return nil +} + +func (m *Service) GetEndpoints() []*Endpoint { + if m != nil { + return m.Endpoints + } + return nil +} + +func (m *Service) GetControl() *Control { + if m != nil { + return m.Control + } + return nil +} + +func (m *Service) GetLogs() []*LogDescriptor { + if m != nil { + return m.Logs + } + return nil +} + +func (m *Service) GetMetrics() []*google_api3.MetricDescriptor { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *Service) GetMonitoredResources() []*google_api6.MonitoredResourceDescriptor { + if m != nil { + return m.MonitoredResources + } + return nil +} + +func (m *Service) GetLogging() *Logging { + if m != nil { + return m.Logging + } + return nil +} + +func (m *Service) GetMonitoring() *Monitoring { + if m != nil { + return m.Monitoring + } + return nil +} + +func (m *Service) GetSystemParameters() *SystemParameters { + if m != nil { + return m.SystemParameters + } + return nil +} + +func (m *Service) GetSourceInfo() *SourceInfo { + if m != nil { + return m.SourceInfo + } + return nil +} + +func (m *Service) GetExperimental() *google_api5.Experimental { + if m != nil { + return m.Experimental + } + return nil +} + +func init() { + proto.RegisterType((*Service)(nil), "google.api.Service") +} + +func init() { proto.RegisterFile("google/api/service.proto", fileDescriptor12) } + +var fileDescriptor12 = []byte{ + // 809 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0xdd, 0x6e, 0xdb, 0x36, + 0x14, 0x80, 0x61, 0xd7, 0x6e, 0x66, 0x3a, 0xcd, 0x1a, 0xc6, 0x49, 0x19, 0xd7, 0x1b, 0xd2, 0xfd, + 0xa0, 0xc6, 0x86, 0xca, 0x80, 0x0b, 0x74, 0x17, 0x1b, 0x30, 0xc4, 0x6d, 0xb0, 0x19, 0xe8, 0x00, + 0x8f, 0x59, 0x8b, 0x61, 0x37, 0x06, 0x2d, 0xd1, 0x0a, 0x37, 0x89, 0xe4, 0x48, 0x2a, 0x8b, 0x5f, + 0x67, 0xcf, 0xb6, 0x07, 0x19, 0x44, 0x52, 0x31, 0x65, 0x39, 0x77, 0xd6, 0xf9, 0xbe, 0x73, 0x7c, + 0x28, 0x92, 0x47, 0x00, 0xa5, 0x42, 0xa4, 0x19, 0x9d, 0x10, 0xc9, 0x26, 0x9a, 0xaa, 0x5b, 0x16, + 0xd3, 0x48, 0x2a, 0x61, 0x04, 0x04, 0x8e, 0x44, 0x44, 0xb2, 0xe1, 0x28, 0xb0, 0x08, 0xe7, 0xc2, + 0x10, 0xc3, 0x04, 0xd7, 0xce, 0x1c, 0x9e, 0x86, 0xb4, 0x30, 0x37, 0x3e, 0x1c, 0x96, 0x5e, 0x91, + 0xf8, 0x2f, 0xca, 0x93, 0x3d, 0x24, 0x16, 0xdc, 0xd0, 0x3b, 0xf3, 0x00, 0x51, 0x22, 0xf3, 0xe4, + 0xf3, 0x80, 0x24, 0x22, 0x2e, 0x72, 0xca, 0x5d, 0x17, 0x9e, 0x9f, 0x07, 0x9c, 0xf2, 0x44, 0x0a, + 0xc6, 0xab, 0xa2, 0xdf, 0x84, 0xe8, 0x4e, 0x52, 0xc5, 0x6c, 0x72, 0x56, 0x7b, 0xd8, 0xb3, 0x96, + 0x1b, 0x63, 0xa4, 0x0f, 0x9f, 0x05, 0xe1, 0x8c, 0xac, 0x68, 0xa5, 0x0f, 0xc2, 0xb8, 0x48, 0xf7, + 0xac, 0x22, 0x13, 0x69, 0xca, 0x78, 0x45, 0x9e, 0x05, 0x24, 0xa7, 0x46, 0xb1, 0xd8, 0x83, 0x2f, + 0x43, 0x20, 0x38, 0x33, 0x42, 0xd1, 0x64, 0xa9, 0xa8, 0x16, 0x85, 0xaa, 0xb6, 0x64, 0xf8, 0xbc, + 0x29, 0x6d, 0x4b, 0x87, 0x2d, 0xfe, 0x5d, 0x08, 0x43, 0x7c, 0x3c, 0xdc, 0x3b, 0x57, 0x6d, 0xc9, + 0xf8, 0x5a, 0x78, 0xfa, 0x22, 0xa4, 0x1b, 0x6d, 0x68, 0xbe, 0x94, 0x44, 0x91, 0x9c, 0x1a, 0xaa, + 0xf6, 0x14, 0x2e, 0x34, 0x49, 0xe9, 0xce, 0x1b, 0xb7, 0x4f, 0xab, 0x62, 0x3d, 0x21, 0x7c, 0xf3, + 0x20, 0x92, 0xcc, 0xa3, 0xe1, 0x2e, 0x32, 0x1b, 0x49, 0x77, 0xf6, 0xf8, 0x9e, 0xfd, 0xa3, 0x88, + 0x94, 0x54, 0xf9, 0x83, 0xf6, 0xc5, 0x7f, 0x3d, 0x70, 0x70, 0xed, 0x0e, 0x29, 0x7c, 0x0b, 0x8e, + 0x62, 0xc1, 0xd7, 0x2c, 0x5d, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0x68, 0x70, 0xd1, 0x1a, 0xf7, 0xa7, + 0xa3, 0xc8, 0x9f, 0xdb, 0xaa, 0x48, 0xf4, 0x61, 0xce, 0xcd, 0xeb, 0xe9, 0x47, 0x92, 0x15, 0x14, + 0x3f, 0x71, 0x39, 0x1f, 0x5d, 0x0a, 0x84, 0xa0, 0xc3, 0x49, 0x4e, 0x51, 0xeb, 0xa2, 0x35, 0xee, + 0x61, 0xfb, 0x1b, 0x1e, 0x81, 0x36, 0x4b, 0xd0, 0x0b, 0x1b, 0x69, 0xb3, 0x04, 0x0e, 0x40, 0xd7, + 0x30, 0x93, 0x51, 0xd4, 0xb6, 0x21, 0xf7, 0x00, 0x23, 0x70, 0x22, 0x95, 0x48, 0x8a, 0x98, 0xaa, + 0xa5, 0x54, 0xe2, 0x4f, 0x1a, 0x9b, 0x25, 0x4b, 0xd0, 0x99, 0x75, 0x8e, 0x2b, 0xb4, 0x70, 0x64, + 0x9e, 0xc0, 0x31, 0xe8, 0x10, 0xc9, 0x34, 0x7a, 0x74, 0xf1, 0x68, 0xdc, 0x9f, 0x0e, 0x1a, 0x4d, + 0x5e, 0x4a, 0x86, 0xad, 0x01, 0xbf, 0x05, 0xdd, 0xf2, 0x95, 0x68, 0xd4, 0xb1, 0xea, 0x69, 0x43, + 0xfd, 0x6d, 0x23, 0x29, 0x76, 0x4e, 0x29, 0x53, 0x5e, 0xe4, 0x1a, 0x75, 0x1f, 0x90, 0xaf, 0x78, + 0x91, 0x63, 0xe7, 0xc0, 0x1f, 0xc1, 0x93, 0xda, 0xcd, 0x41, 0x8f, 0xed, 0x1b, 0x3b, 0x8f, 0xb6, + 0x37, 0x3d, 0x7a, 0x17, 0x0a, 0xb8, 0xee, 0xc3, 0x57, 0xe0, 0xc0, 0x5f, 0x64, 0xf4, 0x89, 0x4d, + 0x3d, 0x09, 0x53, 0x67, 0x0e, 0xe1, 0xca, 0x81, 0x5f, 0x81, 0x4e, 0x79, 0x85, 0x50, 0xcf, 0xba, + 0x4f, 0x43, 0xf7, 0x67, 0x63, 0x24, 0xb6, 0x14, 0xbe, 0x04, 0x5d, 0x7b, 0x5c, 0x11, 0xb0, 0xda, + 0x71, 0xa8, 0xfd, 0x5a, 0x02, 0xec, 0x38, 0x9c, 0x81, 0xa3, 0x72, 0xba, 0x50, 0x6e, 0x58, 0xec, + 0xfa, 0xef, 0xdb, 0x8c, 0x61, 0x98, 0x71, 0x59, 0x33, 0xf0, 0x4e, 0x46, 0xb9, 0x02, 0x3f, 0x70, + 0xd0, 0x61, 0x73, 0x05, 0x6f, 0x1d, 0xc2, 0x95, 0x53, 0xf6, 0x66, 0x4f, 0x3c, 0xfa, 0xb4, 0xd9, + 0xdb, 0x87, 0x12, 0x60, 0xc7, 0xe1, 0x14, 0xf4, 0xaa, 0xa1, 0xa3, 0x11, 0xac, 0xef, 0x71, 0x29, + 0x5f, 0x79, 0x88, 0xb7, 0x5a, 0xd5, 0x8b, 0x12, 0x19, 0x3a, 0xdd, 0xdf, 0x8b, 0x12, 0x19, 0xae, + 0x1c, 0xf8, 0x0a, 0x74, 0x32, 0x91, 0x6a, 0xf4, 0xcc, 0x56, 0xaf, 0x6d, 0xda, 0x7b, 0x91, 0xbe, + 0xa3, 0x3a, 0x56, 0x4c, 0x1a, 0xa1, 0xb0, 0xd5, 0xe0, 0x1b, 0x70, 0xe0, 0x06, 0x8c, 0x46, 0xc8, + 0x66, 0x8c, 0xc2, 0x8c, 0x5f, 0x2c, 0x0a, 0x92, 0x2a, 0x19, 0xfe, 0x0e, 0x4e, 0x9a, 0xf3, 0x47, + 0xa3, 0x73, 0x5b, 0xe3, 0x65, 0xad, 0x46, 0xa5, 0x61, 0x6f, 0x05, 0xe5, 0x60, 0xbe, 0x0b, 0xed, + 0x7a, 0xfd, 0x30, 0x44, 0xcf, 0x9b, 0xeb, 0x7d, 0xef, 0x10, 0xae, 0x1c, 0xf8, 0x06, 0x80, 0xed, + 0x8c, 0x43, 0x23, 0x9b, 0x71, 0xb6, 0xe7, 0xff, 0xcb, 0xa4, 0xc0, 0x84, 0x73, 0x70, 0xbc, 0x3b, + 0xc8, 0x34, 0xfa, 0xac, 0x3e, 0x1b, 0xca, 0xf4, 0x6b, 0x2b, 0x2d, 0xee, 0x1d, 0xfc, 0x54, 0xef, + 0x44, 0xe0, 0x77, 0xa0, 0x1f, 0x4c, 0x4c, 0xf4, 0x75, 0xb3, 0x87, 0x6b, 0x8b, 0xe7, 0x7c, 0x2d, + 0x30, 0xd0, 0xf7, 0xbf, 0xe1, 0x0f, 0xe0, 0x30, 0xfc, 0xb6, 0x20, 0x6a, 0x33, 0x51, 0xed, 0x44, + 0x04, 0x1c, 0xd7, 0xec, 0x19, 0x2f, 0x47, 0x5b, 0x1e, 0xc8, 0xb3, 0x43, 0x3f, 0xf5, 0x16, 0xe5, + 0xb5, 0x5e, 0xb4, 0xfe, 0xb8, 0xf2, 0x2c, 0x15, 0x19, 0xe1, 0x69, 0x24, 0x54, 0x3a, 0x49, 0x29, + 0xb7, 0x97, 0x7e, 0xe2, 0x50, 0x39, 0x4a, 0xc2, 0x8f, 0xba, 0x9b, 0x7b, 0xdf, 0xd7, 0x9e, 0xfe, + 0x6d, 0x77, 0x7e, 0xba, 0x5c, 0xcc, 0x57, 0x8f, 0x6d, 0xe2, 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xcc, 0xae, 0xb3, 0x8f, 0x0c, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/source_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/source_info.pb.go new file mode 100644 index 000000000..127808447 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/source_info.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/source_info.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Source information used to create a Service Config +type SourceInfo struct { + // All files used during config generation. + SourceFiles []*google_protobuf1.Any `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles" json:"source_files,omitempty"` +} + +func (m *SourceInfo) Reset() { *m = SourceInfo{} } +func (m *SourceInfo) String() string { return proto.CompactTextString(m) } +func (*SourceInfo) ProtoMessage() {} +func (*SourceInfo) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{0} } + +func (m *SourceInfo) GetSourceFiles() []*google_protobuf1.Any { + if m != nil { + return m.SourceFiles + } + return nil +} + +func init() { + proto.RegisterType((*SourceInfo)(nil), "google.api.SourceInfo") +} + +func init() { proto.RegisterFile("google/api/source_info.proto", fileDescriptor13) } + +var fileDescriptor13 = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x8d, 0xcf, 0xcc, + 0x4b, 0xcb, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x49, 0x42, 0x55, 0x82, 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, + 0x5c, 0xb9, 0xb8, 0x82, 0xc1, 0x7a, 0x3d, 0xf3, 0xd2, 0xf2, 0x85, 0xcc, 0xb9, 0x78, 0xa0, 0x26, + 0xa5, 0x65, 0xe6, 0xa4, 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0x89, 0xe8, 0x41, 0xcd, + 0x82, 0xe9, 0xd7, 0x73, 0xcc, 0xab, 0x0c, 0xe2, 0x86, 0xa8, 0x74, 0x03, 0x29, 0x74, 0x2a, 0xe4, + 0xe2, 0x4b, 0xce, 0xcf, 0xd5, 0x43, 0xd8, 0xe9, 0xc4, 0x8f, 0x30, 0x36, 0x00, 0xa4, 0x2d, 0x80, + 0x31, 0xca, 0x15, 0x2a, 0x9d, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x97, 0x5f, 0x94, 0xae, 0x9f, + 0x9e, 0x9a, 0x07, 0x36, 0x54, 0x1f, 0x22, 0x95, 0x58, 0x90, 0x59, 0x0c, 0xf1, 0x4f, 0x6a, 0x51, + 0x59, 0x66, 0x72, 0x6a, 0x72, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x35, 0x0a, 0x6f, 0x11, 0x13, 0x8b, + 0xbb, 0x63, 0x80, 0x67, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x78, + 0x5d, 0xab, 0x07, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go new file mode 100644 index 000000000..b3a598532 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/system_parameter.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/system_parameter.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// ### System parameter configuration +// +// A system parameter is a special kind of parameter defined by the API +// system, not by an individual API. It is typically mapped to an HTTP header +// and/or a URL query parameter. This configuration specifies which methods +// change the names of the system parameters. +type SystemParameters struct { + // Define system parameters. + // + // The parameters defined here will override the default parameters + // implemented by the system. If this field is missing from the service + // config, default system parameters will be used. Default system parameters + // and names is implementation-dependent. + // + // Example: define api key for all methods + // + // system_parameters + // rules: + // - selector: "*" + // parameters: + // - name: api_key + // url_query_parameter: api_key + // + // + // Example: define 2 api key names for a specific method. + // + // system_parameters + // rules: + // - selector: "/ListShelves" + // parameters: + // - name: api_key + // http_header: Api-Key1 + // - name: api_key + // http_header: Api-Key2 + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*SystemParameterRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *SystemParameters) Reset() { *m = SystemParameters{} } +func (m *SystemParameters) String() string { return proto.CompactTextString(m) } +func (*SystemParameters) ProtoMessage() {} +func (*SystemParameters) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{0} } + +func (m *SystemParameters) GetRules() []*SystemParameterRule { + if m != nil { + return m.Rules + } + return nil +} + +// Define a system parameter rule mapping system parameter definitions to +// methods. +type SystemParameterRule struct { + // Selects the methods to which this rule applies. Use '*' to indicate all + // methods in all APIs. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // Define parameters. Multiple names may be defined for a parameter. + // For a given method call, only one of them should be used. If multiple + // names are used the behavior is implementation-dependent. + // If none of the specified names are present the behavior is + // parameter-dependent. + Parameters []*SystemParameter `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty"` +} + +func (m *SystemParameterRule) Reset() { *m = SystemParameterRule{} } +func (m *SystemParameterRule) String() string { return proto.CompactTextString(m) } +func (*SystemParameterRule) ProtoMessage() {} +func (*SystemParameterRule) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{1} } + +func (m *SystemParameterRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *SystemParameterRule) GetParameters() []*SystemParameter { + if m != nil { + return m.Parameters + } + return nil +} + +// Define a parameter's name and location. The parameter may be passed as either +// an HTTP header or a URL query parameter, and if both are passed the behavior +// is implementation-dependent. +type SystemParameter struct { + // Define the name of the parameter, such as "api_key" . It is case sensitive. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Define the HTTP header name to use for the parameter. It is case + // insensitive. + HttpHeader string `protobuf:"bytes,2,opt,name=http_header,json=httpHeader" json:"http_header,omitempty"` + // Define the URL query parameter name to use for the parameter. It is case + // sensitive. + UrlQueryParameter string `protobuf:"bytes,3,opt,name=url_query_parameter,json=urlQueryParameter" json:"url_query_parameter,omitempty"` +} + +func (m *SystemParameter) Reset() { *m = SystemParameter{} } +func (m *SystemParameter) String() string { return proto.CompactTextString(m) } +func (*SystemParameter) ProtoMessage() {} +func (*SystemParameter) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{2} } + +func (m *SystemParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SystemParameter) GetHttpHeader() string { + if m != nil { + return m.HttpHeader + } + return "" +} + +func (m *SystemParameter) GetUrlQueryParameter() string { + if m != nil { + return m.UrlQueryParameter + } + return "" +} + +func init() { + proto.RegisterType((*SystemParameters)(nil), "google.api.SystemParameters") + proto.RegisterType((*SystemParameterRule)(nil), "google.api.SystemParameterRule") + proto.RegisterType((*SystemParameter)(nil), "google.api.SystemParameter") +} + +func init() { proto.RegisterFile("google/api/system_parameter.proto", fileDescriptor14) } + +var fileDescriptor14 = []byte{ + // 286 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xbf, 0x4e, 0xc3, 0x30, + 0x10, 0x87, 0x95, 0xb6, 0x20, 0xb8, 0x4a, 0xfc, 0x71, 0x19, 0x22, 0x18, 0x5a, 0x3a, 0x75, 0x72, + 0x24, 0x10, 0x53, 0x27, 0x2a, 0x21, 0xe8, 0x16, 0xca, 0xc6, 0x12, 0x99, 0x70, 0xb8, 0x91, 0x9c, + 0xd8, 0x9c, 0x9d, 0x48, 0x7d, 0x1d, 0x9e, 0x14, 0xc5, 0x29, 0x69, 0x89, 0x10, 0x9b, 0xef, 0xbe, + 0xcf, 0xfa, 0x9d, 0xee, 0xe0, 0x5a, 0x6a, 0x2d, 0x15, 0x46, 0xc2, 0x64, 0x91, 0xdd, 0x58, 0x87, + 0x79, 0x62, 0x04, 0x89, 0x1c, 0x1d, 0x12, 0x37, 0xa4, 0x9d, 0x66, 0xd0, 0x28, 0x5c, 0x98, 0x6c, + 0xba, 0x84, 0xb3, 0x17, 0x6f, 0xc5, 0x3f, 0x92, 0x65, 0x77, 0x70, 0x40, 0xa5, 0x42, 0x1b, 0x06, + 0x93, 0xfe, 0x6c, 0x78, 0x33, 0xe6, 0x3b, 0x9f, 0x77, 0xe4, 0x55, 0xa9, 0x70, 0xd5, 0xd8, 0xd3, + 0x02, 0x46, 0x7f, 0x50, 0x76, 0x09, 0x47, 0x16, 0x15, 0xa6, 0x4e, 0x53, 0x18, 0x4c, 0x82, 0xd9, + 0xf1, 0xaa, 0xad, 0xd9, 0x1c, 0xa0, 0x1d, 0xce, 0x86, 0x3d, 0x1f, 0x77, 0xf5, 0x5f, 0xdc, 0x9e, + 0x3e, 0xad, 0xe0, 0xb4, 0x83, 0x19, 0x83, 0x41, 0x21, 0x72, 0xdc, 0xe6, 0xf8, 0x37, 0x1b, 0xc3, + 0x70, 0xed, 0x9c, 0x49, 0xd6, 0x28, 0xde, 0x91, 0xc2, 0x9e, 0x47, 0x50, 0xb7, 0x9e, 0x7c, 0x87, + 0x71, 0x18, 0x95, 0xa4, 0x92, 0xcf, 0x12, 0x69, 0xb3, 0xdb, 0x55, 0xd8, 0xf7, 0xe2, 0x79, 0x49, + 0xea, 0xb9, 0x26, 0x6d, 0xc8, 0xa2, 0x82, 0x93, 0x54, 0xe7, 0x7b, 0x53, 0x2e, 0x2e, 0x3a, 0x73, + 0xc4, 0xf5, 0x9a, 0xe3, 0xe0, 0xf5, 0x61, 0xeb, 0x48, 0xad, 0x44, 0x21, 0xb9, 0x26, 0x19, 0x49, + 0x2c, 0xfc, 0x11, 0xa2, 0x06, 0x09, 0x93, 0xd9, 0xe6, 0x54, 0x48, 0x55, 0x96, 0x62, 0xaa, 0x8b, + 0x8f, 0x4c, 0xce, 0x7f, 0x55, 0x5f, 0xbd, 0xc1, 0xe3, 0x7d, 0xbc, 0x7c, 0x3b, 0xf4, 0x1f, 0x6f, + 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x5e, 0xdf, 0x2e, 0x09, 0xe2, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go new file mode 100644 index 000000000..1e7d710bf --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/serviceconfig/usage.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/usage.proto + +package serviceconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Configuration controlling usage of a service. +type Usage struct { + // Requirements that must be satisfied before a consumer project can use the + // service. Each requirement is of the form /; + // for example 'serviceusage.googleapis.com/billing-enabled'. + Requirements []string `protobuf:"bytes,1,rep,name=requirements" json:"requirements,omitempty"` + // A list of usage rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*UsageRule `protobuf:"bytes,6,rep,name=rules" json:"rules,omitempty"` + // The full resource name of a channel used for sending notifications to the + // service producer. + // + // Google Service Management currently only supports + // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification + // channel. To use Google Cloud Pub/Sub as the channel, this must be the name + // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format + // documented in https://cloud.google.com/pubsub/docs/overview. + ProducerNotificationChannel string `protobuf:"bytes,7,opt,name=producer_notification_channel,json=producerNotificationChannel" json:"producer_notification_channel,omitempty"` +} + +func (m *Usage) Reset() { *m = Usage{} } +func (m *Usage) String() string { return proto.CompactTextString(m) } +func (*Usage) ProtoMessage() {} +func (*Usage) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{0} } + +func (m *Usage) GetRequirements() []string { + if m != nil { + return m.Requirements + } + return nil +} + +func (m *Usage) GetRules() []*UsageRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Usage) GetProducerNotificationChannel() string { + if m != nil { + return m.ProducerNotificationChannel + } + return "" +} + +// Usage configuration rules for the service. +// +// NOTE: Under development. +// +// +// Use this rule to configure unregistered calls for the service. Unregistered +// calls are calls that do not contain consumer project identity. +// (Example: calls that do not contain an API key). +// By default, API methods do not allow unregistered calls, and each method call +// must be identified by a consumer project identity. Use this rule to +// allow/disallow unregistered calls. +// +// Example of an API that wants to allow unregistered calls for entire service. +// +// usage: +// rules: +// - selector: "*" +// allow_unregistered_calls: true +// +// Example of a method that wants to allow unregistered calls. +// +// usage: +// rules: +// - selector: "google.example.library.v1.LibraryService.CreateBook" +// allow_unregistered_calls: true +type UsageRule struct { + // Selects the methods to which this rule applies. Use '*' to indicate all + // methods in all APIs. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + // True, if the method allows unregistered calls; false otherwise. + AllowUnregisteredCalls bool `protobuf:"varint,2,opt,name=allow_unregistered_calls,json=allowUnregisteredCalls" json:"allow_unregistered_calls,omitempty"` + // True, if the method should skip service control. If so, no control plane + // feature (like quota and billing) will be enabled. + SkipServiceControl bool `protobuf:"varint,3,opt,name=skip_service_control,json=skipServiceControl" json:"skip_service_control,omitempty"` +} + +func (m *UsageRule) Reset() { *m = UsageRule{} } +func (m *UsageRule) String() string { return proto.CompactTextString(m) } +func (*UsageRule) ProtoMessage() {} +func (*UsageRule) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{1} } + +func (m *UsageRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *UsageRule) GetAllowUnregisteredCalls() bool { + if m != nil { + return m.AllowUnregisteredCalls + } + return false +} + +func (m *UsageRule) GetSkipServiceControl() bool { + if m != nil { + return m.SkipServiceControl + } + return false +} + +func init() { + proto.RegisterType((*Usage)(nil), "google.api.Usage") + proto.RegisterType((*UsageRule)(nil), "google.api.UsageRule") +} + +func init() { proto.RegisterFile("google/api/usage.proto", fileDescriptor15) } + +var fileDescriptor15 = []byte{ + // 331 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x4b, 0xfb, 0x30, + 0x14, 0xc7, 0xe9, 0xf6, 0xdb, 0x7e, 0x5b, 0x14, 0x0f, 0x41, 0x47, 0x99, 0x0a, 0x65, 0xa7, 0x82, + 0xd0, 0x8a, 0x5e, 0x04, 0x4f, 0x6e, 0x88, 0x78, 0x91, 0x51, 0xd9, 0xc5, 0x4b, 0x89, 0xd9, 0x5b, + 0x0c, 0x66, 0x79, 0x35, 0x49, 0xf5, 0x0f, 0xf1, 0xea, 0xc9, 0xbf, 0x54, 0x9a, 0xcc, 0xd9, 0x1d, + 0xdf, 0xfb, 0x7c, 0xbe, 0xef, 0xb5, 0x2f, 0x64, 0x24, 0x10, 0x85, 0x82, 0x9c, 0x55, 0x32, 0xaf, + 0x2d, 0x13, 0x90, 0x55, 0x06, 0x1d, 0x52, 0x12, 0xfa, 0x19, 0xab, 0xe4, 0xf8, 0xa4, 0xe5, 0x30, + 0xad, 0xd1, 0x31, 0x27, 0x51, 0xdb, 0x60, 0x4e, 0xbe, 0x22, 0xd2, 0x5b, 0x34, 0x49, 0x3a, 0x21, + 0xfb, 0x06, 0xde, 0x6a, 0x69, 0x60, 0x0d, 0xda, 0xd9, 0x38, 0x4a, 0xba, 0xe9, 0xb0, 0xd8, 0xe9, + 0xd1, 0x33, 0xd2, 0x33, 0xb5, 0x02, 0x1b, 0xf7, 0x93, 0x6e, 0xba, 0x77, 0x71, 0x94, 0xfd, 0xed, + 0xc9, 0xfc, 0x94, 0xa2, 0x56, 0x50, 0x04, 0x87, 0x4e, 0xc9, 0x69, 0x65, 0x70, 0x59, 0x73, 0x30, + 0xa5, 0x46, 0x27, 0x57, 0x92, 0xfb, 0xd5, 0x25, 0x7f, 0x61, 0x5a, 0x83, 0x8a, 0xff, 0x27, 0x51, + 0x3a, 0x2c, 0x8e, 0x7f, 0xa5, 0x87, 0x96, 0x33, 0x0b, 0xca, 0xe4, 0x33, 0x22, 0xc3, 0xed, 0x60, + 0x3a, 0x26, 0x03, 0x0b, 0x0a, 0xb8, 0x43, 0x13, 0x47, 0x3e, 0xbc, 0xad, 0xe9, 0x15, 0x89, 0x99, + 0x52, 0xf8, 0x51, 0xd6, 0xda, 0x80, 0x90, 0xd6, 0x81, 0x81, 0x65, 0xc9, 0x99, 0x52, 0x36, 0xee, + 0x24, 0x51, 0x3a, 0x28, 0x46, 0x9e, 0x2f, 0x5a, 0x78, 0xd6, 0x50, 0x7a, 0x4e, 0x0e, 0xed, 0xab, + 0xac, 0x4a, 0x0b, 0xe6, 0x5d, 0x72, 0x28, 0x39, 0x6a, 0x67, 0x50, 0xc5, 0x5d, 0x9f, 0xa2, 0x0d, + 0x7b, 0x0c, 0x68, 0x16, 0xc8, 0x54, 0x91, 0x03, 0x8e, 0xeb, 0xd6, 0xcf, 0x4f, 0x89, 0xff, 0xc8, + 0x79, 0x73, 0xd2, 0x79, 0xf4, 0x74, 0xbb, 0x21, 0x02, 0x15, 0xd3, 0x22, 0x43, 0x23, 0x72, 0x01, + 0xda, 0x1f, 0x3c, 0x0f, 0x88, 0x55, 0xd2, 0xfa, 0x17, 0xd9, 0x2c, 0xe5, 0xa8, 0x57, 0x52, 0x5c, + 0xef, 0x54, 0xdf, 0x9d, 0x7f, 0x77, 0x37, 0xf3, 0xfb, 0xe7, 0xbe, 0x0f, 0x5e, 0xfe, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x9c, 0x4b, 0x8c, 0x57, 0xed, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/check_error.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/check_error.pb.go new file mode 100644 index 000000000..470d4cb3c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/check_error.pb.go @@ -0,0 +1,202 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/check_error.proto + +/* +Package servicecontrol is a generated protocol buffer package. + +It is generated from these files: + google/api/servicecontrol/v1/check_error.proto + google/api/servicecontrol/v1/distribution.proto + google/api/servicecontrol/v1/log_entry.proto + google/api/servicecontrol/v1/metric_value.proto + google/api/servicecontrol/v1/operation.proto + google/api/servicecontrol/v1/service_controller.proto + +It has these top-level messages: + CheckError + Distribution + LogEntry + MetricValue + MetricValueSet + Operation + CheckRequest + CheckResponse + ReportRequest + ReportResponse +*/ +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Error codes for Check responses. +type CheckError_Code int32 + +const ( + // This is never used in `CheckResponse`. + CheckError_ERROR_CODE_UNSPECIFIED CheckError_Code = 0 + // The consumer's project id was not found. + // Same as [google.rpc.Code.NOT_FOUND][]. + CheckError_NOT_FOUND CheckError_Code = 5 + // The consumer doesn't have access to the specified resource. + // Same as [google.rpc.Code.PERMISSION_DENIED][]. + CheckError_PERMISSION_DENIED CheckError_Code = 7 + // Quota check failed. Same as [google.rpc.Code.RESOURCE_EXHAUSTED][]. + CheckError_RESOURCE_EXHAUSTED CheckError_Code = 8 + // The consumer hasn't activated the service. + CheckError_SERVICE_NOT_ACTIVATED CheckError_Code = 104 + // The consumer cannot access the service because billing is disabled. + CheckError_BILLING_DISABLED CheckError_Code = 107 + // The consumer's project has been marked as deleted (soft deletion). + CheckError_PROJECT_DELETED CheckError_Code = 108 + // The consumer's project number or id does not represent a valid project. + CheckError_PROJECT_INVALID CheckError_Code = 114 + // The IP address of the consumer is invalid for the specific consumer + // project. + CheckError_IP_ADDRESS_BLOCKED CheckError_Code = 109 + // The referer address of the consumer request is invalid for the specific + // consumer project. + CheckError_REFERER_BLOCKED CheckError_Code = 110 + // The client application of the consumer request is invalid for the + // specific consumer project. + CheckError_CLIENT_APP_BLOCKED CheckError_Code = 111 + // The consumer's API key is invalid. + CheckError_API_KEY_INVALID CheckError_Code = 105 + // The consumer's API Key has expired. + CheckError_API_KEY_EXPIRED CheckError_Code = 112 + // The consumer's API Key was not found in config record. + CheckError_API_KEY_NOT_FOUND CheckError_Code = 113 + // The backend server for looking up project id/number is unavailable. + CheckError_NAMESPACE_LOOKUP_UNAVAILABLE CheckError_Code = 300 + // The backend server for checking service status is unavailable. + CheckError_SERVICE_STATUS_UNAVAILABLE CheckError_Code = 301 + // The backend server for checking billing status is unavailable. + CheckError_BILLING_STATUS_UNAVAILABLE CheckError_Code = 302 +) + +var CheckError_Code_name = map[int32]string{ + 0: "ERROR_CODE_UNSPECIFIED", + 5: "NOT_FOUND", + 7: "PERMISSION_DENIED", + 8: "RESOURCE_EXHAUSTED", + 104: "SERVICE_NOT_ACTIVATED", + 107: "BILLING_DISABLED", + 108: "PROJECT_DELETED", + 114: "PROJECT_INVALID", + 109: "IP_ADDRESS_BLOCKED", + 110: "REFERER_BLOCKED", + 111: "CLIENT_APP_BLOCKED", + 105: "API_KEY_INVALID", + 112: "API_KEY_EXPIRED", + 113: "API_KEY_NOT_FOUND", + 300: "NAMESPACE_LOOKUP_UNAVAILABLE", + 301: "SERVICE_STATUS_UNAVAILABLE", + 302: "BILLING_STATUS_UNAVAILABLE", +} +var CheckError_Code_value = map[string]int32{ + "ERROR_CODE_UNSPECIFIED": 0, + "NOT_FOUND": 5, + "PERMISSION_DENIED": 7, + "RESOURCE_EXHAUSTED": 8, + "SERVICE_NOT_ACTIVATED": 104, + "BILLING_DISABLED": 107, + "PROJECT_DELETED": 108, + "PROJECT_INVALID": 114, + "IP_ADDRESS_BLOCKED": 109, + "REFERER_BLOCKED": 110, + "CLIENT_APP_BLOCKED": 111, + "API_KEY_INVALID": 105, + "API_KEY_EXPIRED": 112, + "API_KEY_NOT_FOUND": 113, + "NAMESPACE_LOOKUP_UNAVAILABLE": 300, + "SERVICE_STATUS_UNAVAILABLE": 301, + "BILLING_STATUS_UNAVAILABLE": 302, +} + +func (x CheckError_Code) String() string { + return proto.EnumName(CheckError_Code_name, int32(x)) +} +func (CheckError_Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// Defines the errors to be returned in +// [google.api.servicecontrol.v1.CheckResponse.check_errors][google.api.servicecontrol.v1.CheckResponse.check_errors]. +type CheckError struct { + // The error code. + Code CheckError_Code `protobuf:"varint,1,opt,name=code,enum=google.api.servicecontrol.v1.CheckError_Code" json:"code,omitempty"` + // Free-form text providing details on the error cause of the error. + Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` +} + +func (m *CheckError) Reset() { *m = CheckError{} } +func (m *CheckError) String() string { return proto.CompactTextString(m) } +func (*CheckError) ProtoMessage() {} +func (*CheckError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CheckError) GetCode() CheckError_Code { + if m != nil { + return m.Code + } + return CheckError_ERROR_CODE_UNSPECIFIED +} + +func (m *CheckError) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func init() { + proto.RegisterType((*CheckError)(nil), "google.api.servicecontrol.v1.CheckError") + proto.RegisterEnum("google.api.servicecontrol.v1.CheckError_Code", CheckError_Code_name, CheckError_Code_value) +} + +func init() { proto.RegisterFile("google/api/servicecontrol/v1/check_error.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 484 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x6e, 0xd3, 0x3e, + 0x18, 0xc6, 0xff, 0xe9, 0xbf, 0x0c, 0x66, 0x09, 0x16, 0x0c, 0xab, 0x46, 0x55, 0x89, 0xb2, 0xa3, + 0x9d, 0x90, 0x68, 0x70, 0xc8, 0x91, 0x6b, 0xbf, 0x05, 0xaf, 0x59, 0x62, 0xd9, 0x49, 0x35, 0x38, + 0xb1, 0x42, 0x1a, 0x65, 0xd1, 0xba, 0xb8, 0xa4, 0x51, 0xaf, 0x80, 0x0b, 0xe0, 0x2a, 0x38, 0x02, + 0xae, 0x8f, 0x43, 0xe4, 0x76, 0xfd, 0x92, 0xa6, 0x1d, 0xfa, 0x79, 0x7f, 0xcf, 0x63, 0xbd, 0x1f, + 0xc8, 0x2b, 0x8c, 0x29, 0xa6, 0xb9, 0x9f, 0xce, 0x4a, 0x7f, 0x9e, 0xd7, 0x8b, 0x32, 0xcb, 0x33, + 0x53, 0x35, 0xb5, 0x99, 0xfa, 0x8b, 0x73, 0x3f, 0xbb, 0xce, 0xb3, 0x1b, 0x9d, 0xd7, 0xb5, 0xa9, + 0xbd, 0x59, 0x6d, 0x1a, 0x83, 0x7b, 0x2b, 0xde, 0x4b, 0x67, 0xa5, 0xb7, 0xcf, 0x7b, 0x8b, 0xf3, + 0x6e, 0x6f, 0x27, 0x2d, 0xad, 0x2a, 0xd3, 0xa4, 0x4d, 0x69, 0xaa, 0xf9, 0xca, 0x7b, 0xfa, 0xa3, + 0x8d, 0x10, 0xb5, 0x89, 0x60, 0x03, 0x31, 0x41, 0xed, 0xcc, 0x4c, 0xf2, 0x13, 0xa7, 0xef, 0x9c, + 0x3d, 0x7b, 0xf7, 0xd6, 0x7b, 0x28, 0xd9, 0xdb, 0xfa, 0x3c, 0x6a, 0x26, 0xb9, 0x5c, 0x5a, 0x71, + 0x07, 0x1d, 0x4c, 0xf2, 0x26, 0x2d, 0xa7, 0x27, 0xad, 0xbe, 0x73, 0x76, 0x28, 0xef, 0x5e, 0xa7, + 0x3f, 0xff, 0x47, 0x6d, 0x8b, 0xe1, 0x2e, 0xea, 0x80, 0x94, 0x91, 0xd4, 0x34, 0x62, 0xa0, 0x93, + 0x50, 0x09, 0xa0, 0x7c, 0xc8, 0x81, 0xb9, 0xff, 0xe1, 0xa7, 0xe8, 0x30, 0x8c, 0x62, 0x3d, 0x8c, + 0x92, 0x90, 0xb9, 0x8f, 0xf0, 0x31, 0x7a, 0x2e, 0x40, 0x5e, 0x72, 0xa5, 0x78, 0x14, 0x6a, 0x06, + 0xa1, 0xa5, 0x1e, 0xe3, 0x0e, 0xc2, 0x12, 0x54, 0x94, 0x48, 0x0a, 0x1a, 0xae, 0x3e, 0x91, 0x44, + 0xc5, 0xc0, 0xdc, 0x27, 0xf8, 0x15, 0x3a, 0x56, 0x20, 0xc7, 0x9c, 0x82, 0xb6, 0x29, 0x84, 0xc6, + 0x7c, 0x4c, 0x6c, 0xe9, 0x1a, 0xbf, 0x44, 0xee, 0x80, 0x07, 0x01, 0x0f, 0x3f, 0x6a, 0xc6, 0x15, + 0x19, 0x04, 0xc0, 0xdc, 0x1b, 0xfc, 0x02, 0x1d, 0x09, 0x19, 0x5d, 0x00, 0x8d, 0x35, 0x83, 0x00, + 0x2c, 0x3a, 0xdd, 0x15, 0x79, 0x38, 0x26, 0x01, 0x67, 0x6e, 0x6d, 0xbf, 0xe4, 0x42, 0x13, 0xc6, + 0x24, 0x28, 0xa5, 0x07, 0x41, 0x44, 0x47, 0xc0, 0xdc, 0x5b, 0x0b, 0x4b, 0x18, 0x82, 0x04, 0xb9, + 0x11, 0x2b, 0x0b, 0xd3, 0x80, 0x43, 0x18, 0x6b, 0x22, 0xc4, 0x46, 0x37, 0x16, 0x26, 0x82, 0xeb, + 0x11, 0x7c, 0xde, 0x24, 0x97, 0xbb, 0x22, 0x5c, 0x09, 0x2e, 0x81, 0xb9, 0x33, 0xdb, 0xf8, 0x5a, + 0xdc, 0xce, 0xe3, 0x1b, 0x7e, 0x83, 0x7a, 0x21, 0xb9, 0x04, 0x25, 0x08, 0x05, 0x1d, 0x44, 0xd1, + 0x28, 0x11, 0x3a, 0x09, 0xc9, 0x98, 0xf0, 0xc0, 0xb6, 0xe4, 0xfe, 0x6a, 0xe1, 0xd7, 0xa8, 0xbb, + 0x9e, 0x81, 0x8a, 0x49, 0x9c, 0xa8, 0x3d, 0xe0, 0xf7, 0x12, 0x58, 0x4f, 0xe2, 0x1e, 0xe0, 0x4f, + 0x6b, 0xf0, 0xdd, 0x41, 0xfd, 0xcc, 0xdc, 0x3e, 0xb8, 0xfb, 0xc1, 0xd1, 0x76, 0xf9, 0xc2, 0x1e, + 0x92, 0x70, 0xbe, 0x5c, 0xdc, 0x19, 0x0a, 0x33, 0x4d, 0xab, 0xc2, 0x33, 0x75, 0xe1, 0x17, 0x79, + 0xb5, 0x3c, 0x33, 0x7f, 0x55, 0x4a, 0x67, 0xe5, 0xfc, 0xfe, 0xab, 0xfe, 0xb0, 0xaf, 0xfc, 0x75, + 0x9c, 0xaf, 0x07, 0x4b, 0xe7, 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x65, 0x26, 0xbf, + 0x0e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/distribution.pb.go new file mode 100644 index 000000000..5f0348228 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/distribution.pb.go @@ -0,0 +1,430 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/distribution.proto + +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Distribution represents a frequency distribution of double-valued sample +// points. It contains the size of the population of sample points plus +// additional optional information: +// +// - the arithmetic mean of the samples +// - the minimum and maximum of the samples +// - the sum-squared-deviation of the samples, used to compute variance +// - a histogram of the values of the sample points +type Distribution struct { + // The total number of samples in the distribution. Must be >= 0. + Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + // The arithmetic mean of the samples in the distribution. If `count` is + // zero then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean" json:"mean,omitempty"` + // The minimum of the population of values. Ignored if `count` is zero. + Minimum float64 `protobuf:"fixed64,3,opt,name=minimum" json:"minimum,omitempty"` + // The maximum of the population of values. Ignored if `count` is zero. + Maximum float64 `protobuf:"fixed64,4,opt,name=maximum" json:"maximum,omitempty"` + // The sum of squared deviations from the mean: + // Sum[i=1..count]((x_i - mean)^2) + // where each x_i is a sample values. If `count` is zero then this field + // must be zero, otherwise validation of the request fails. + SumOfSquaredDeviation float64 `protobuf:"fixed64,5,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation" json:"sum_of_squared_deviation,omitempty"` + // The number of samples in each histogram bucket. `bucket_counts` are + // optional. If present, they must sum to the `count` value. + // + // The buckets are defined below in `bucket_option`. There are N buckets. + // `bucket_counts[0]` is the number of samples in the underflow bucket. + // `bucket_counts[1]` to `bucket_counts[N-1]` are the numbers of samples + // in each of the finite buckets. And `bucket_counts[N] is the number + // of samples in the overflow bucket. See the comments of `bucket_option` + // below for more details. + // + // Any suffix of trailing zeros may be omitted. + BucketCounts []int64 `protobuf:"varint,6,rep,packed,name=bucket_counts,json=bucketCounts" json:"bucket_counts,omitempty"` + // Defines the buckets in the histogram. `bucket_option` and `bucket_counts` + // must be both set, or both unset. + // + // Buckets are numbered the the range of [0, N], with a total of N+1 buckets. + // There must be at least two buckets (a single-bucket histogram gives + // no information that isn't already provided by `count`). + // + // The first bucket is the underflow bucket which has a lower bound + // of -inf. The last bucket is the overflow bucket which has an + // upper bound of +inf. All other buckets (if any) are called "finite" + // buckets because they have finite lower and upper bounds. As described + // below, there are three ways to define the finite buckets. + // + // (1) Buckets with constant width. + // (2) Buckets with exponentially growing widths. + // (3) Buckets with arbitrary user-provided widths. + // + // In all cases, the buckets cover the entire real number line (-inf, + // +inf). Bucket upper bounds are exclusive and lower bounds are + // inclusive. The upper bound of the underflow bucket is equal to the + // lower bound of the smallest finite bucket; the lower bound of the + // overflow bucket is equal to the upper bound of the largest finite + // bucket. + // + // Types that are valid to be assigned to BucketOption: + // *Distribution_LinearBuckets_ + // *Distribution_ExponentialBuckets_ + // *Distribution_ExplicitBuckets_ + BucketOption isDistribution_BucketOption `protobuf_oneof:"bucket_option"` +} + +func (m *Distribution) Reset() { *m = Distribution{} } +func (m *Distribution) String() string { return proto.CompactTextString(m) } +func (*Distribution) ProtoMessage() {} +func (*Distribution) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +type isDistribution_BucketOption interface { + isDistribution_BucketOption() +} + +type Distribution_LinearBuckets_ struct { + LinearBuckets *Distribution_LinearBuckets `protobuf:"bytes,7,opt,name=linear_buckets,json=linearBuckets,oneof"` +} +type Distribution_ExponentialBuckets_ struct { + ExponentialBuckets *Distribution_ExponentialBuckets `protobuf:"bytes,8,opt,name=exponential_buckets,json=exponentialBuckets,oneof"` +} +type Distribution_ExplicitBuckets_ struct { + ExplicitBuckets *Distribution_ExplicitBuckets `protobuf:"bytes,9,opt,name=explicit_buckets,json=explicitBuckets,oneof"` +} + +func (*Distribution_LinearBuckets_) isDistribution_BucketOption() {} +func (*Distribution_ExponentialBuckets_) isDistribution_BucketOption() {} +func (*Distribution_ExplicitBuckets_) isDistribution_BucketOption() {} + +func (m *Distribution) GetBucketOption() isDistribution_BucketOption { + if m != nil { + return m.BucketOption + } + return nil +} + +func (m *Distribution) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Distribution) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *Distribution) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Distribution) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Distribution) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *Distribution) GetBucketCounts() []int64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *Distribution) GetLinearBuckets() *Distribution_LinearBuckets { + if x, ok := m.GetBucketOption().(*Distribution_LinearBuckets_); ok { + return x.LinearBuckets + } + return nil +} + +func (m *Distribution) GetExponentialBuckets() *Distribution_ExponentialBuckets { + if x, ok := m.GetBucketOption().(*Distribution_ExponentialBuckets_); ok { + return x.ExponentialBuckets + } + return nil +} + +func (m *Distribution) GetExplicitBuckets() *Distribution_ExplicitBuckets { + if x, ok := m.GetBucketOption().(*Distribution_ExplicitBuckets_); ok { + return x.ExplicitBuckets + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Distribution) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Distribution_OneofMarshaler, _Distribution_OneofUnmarshaler, _Distribution_OneofSizer, []interface{}{ + (*Distribution_LinearBuckets_)(nil), + (*Distribution_ExponentialBuckets_)(nil), + (*Distribution_ExplicitBuckets_)(nil), + } +} + +func _Distribution_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Distribution) + // bucket_option + switch x := m.BucketOption.(type) { + case *Distribution_LinearBuckets_: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LinearBuckets); err != nil { + return err + } + case *Distribution_ExponentialBuckets_: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExponentialBuckets); err != nil { + return err + } + case *Distribution_ExplicitBuckets_: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExplicitBuckets); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Distribution.BucketOption has unexpected type %T", x) + } + return nil +} + +func _Distribution_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Distribution) + switch tag { + case 7: // bucket_option.linear_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_LinearBuckets) + err := b.DecodeMessage(msg) + m.BucketOption = &Distribution_LinearBuckets_{msg} + return true, err + case 8: // bucket_option.exponential_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_ExponentialBuckets) + err := b.DecodeMessage(msg) + m.BucketOption = &Distribution_ExponentialBuckets_{msg} + return true, err + case 9: // bucket_option.explicit_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_ExplicitBuckets) + err := b.DecodeMessage(msg) + m.BucketOption = &Distribution_ExplicitBuckets_{msg} + return true, err + default: + return false, nil + } +} + +func _Distribution_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Distribution) + // bucket_option + switch x := m.BucketOption.(type) { + case *Distribution_LinearBuckets_: + s := proto.Size(x.LinearBuckets) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_ExponentialBuckets_: + s := proto.Size(x.ExponentialBuckets) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_ExplicitBuckets_: + s := proto.Size(x.ExplicitBuckets) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Describing buckets with constant width. +type Distribution_LinearBuckets struct { + // The number of finite buckets. With the underflow and overflow buckets, + // the total number of buckets is `num_finite_buckets` + 2. + // See comments on `bucket_options` for details. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets" json:"num_finite_buckets,omitempty"` + // The i'th linear bucket covers the interval + // [offset + (i-1) * width, offset + i * width) + // where i ranges from 1 to num_finite_buckets, inclusive. + // Must be strictly positive. + Width float64 `protobuf:"fixed64,2,opt,name=width" json:"width,omitempty"` + // The i'th linear bucket covers the interval + // [offset + (i-1) * width, offset + i * width) + // where i ranges from 1 to num_finite_buckets, inclusive. + Offset float64 `protobuf:"fixed64,3,opt,name=offset" json:"offset,omitempty"` +} + +func (m *Distribution_LinearBuckets) Reset() { *m = Distribution_LinearBuckets{} } +func (m *Distribution_LinearBuckets) String() string { return proto.CompactTextString(m) } +func (*Distribution_LinearBuckets) ProtoMessage() {} +func (*Distribution_LinearBuckets) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +func (m *Distribution_LinearBuckets) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_LinearBuckets) GetWidth() float64 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Distribution_LinearBuckets) GetOffset() float64 { + if m != nil { + return m.Offset + } + return 0 +} + +// Describing buckets with exponentially growing width. +type Distribution_ExponentialBuckets struct { + // The number of finite buckets. With the underflow and overflow buckets, + // the total number of buckets is `num_finite_buckets` + 2. + // See comments on `bucket_options` for details. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets" json:"num_finite_buckets,omitempty"` + // The i'th exponential bucket covers the interval + // [scale * growth_factor^(i-1), scale * growth_factor^i) + // where i ranges from 1 to num_finite_buckets inclusive. + // Must be larger than 1.0. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor" json:"growth_factor,omitempty"` + // The i'th exponential bucket covers the interval + // [scale * growth_factor^(i-1), scale * growth_factor^i) + // where i ranges from 1 to num_finite_buckets inclusive. + // Must be > 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale" json:"scale,omitempty"` +} + +func (m *Distribution_ExponentialBuckets) Reset() { *m = Distribution_ExponentialBuckets{} } +func (m *Distribution_ExponentialBuckets) String() string { return proto.CompactTextString(m) } +func (*Distribution_ExponentialBuckets) ProtoMessage() {} +func (*Distribution_ExponentialBuckets) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{0, 1} +} + +func (m *Distribution_ExponentialBuckets) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_ExponentialBuckets) GetGrowthFactor() float64 { + if m != nil { + return m.GrowthFactor + } + return 0 +} + +func (m *Distribution_ExponentialBuckets) GetScale() float64 { + if m != nil { + return m.Scale + } + return 0 +} + +// Describing buckets with arbitrary user-provided width. +type Distribution_ExplicitBuckets struct { + // 'bound' is a list of strictly increasing boundaries between + // buckets. Note that a list of length N-1 defines N buckets because + // of fenceposting. See comments on `bucket_options` for details. + // + // The i'th finite bucket covers the interval + // [bound[i-1], bound[i]) + // where i ranges from 1 to bound_size() - 1. Note that there are no + // finite buckets at all if 'bound' only contains a single element; in + // that special case the single bound defines the boundary between the + // underflow and overflow buckets. + // + // bucket number lower bound upper bound + // i == 0 (underflow) -inf bound[i] + // 0 < i < bound_size() bound[i-1] bound[i] + // i == bound_size() (overflow) bound[i-1] +inf + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds" json:"bounds,omitempty"` +} + +func (m *Distribution_ExplicitBuckets) Reset() { *m = Distribution_ExplicitBuckets{} } +func (m *Distribution_ExplicitBuckets) String() string { return proto.CompactTextString(m) } +func (*Distribution_ExplicitBuckets) ProtoMessage() {} +func (*Distribution_ExplicitBuckets) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 2} } + +func (m *Distribution_ExplicitBuckets) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +func init() { + proto.RegisterType((*Distribution)(nil), "google.api.servicecontrol.v1.Distribution") + proto.RegisterType((*Distribution_LinearBuckets)(nil), "google.api.servicecontrol.v1.Distribution.LinearBuckets") + proto.RegisterType((*Distribution_ExponentialBuckets)(nil), "google.api.servicecontrol.v1.Distribution.ExponentialBuckets") + proto.RegisterType((*Distribution_ExplicitBuckets)(nil), "google.api.servicecontrol.v1.Distribution.ExplicitBuckets") +} + +func init() { proto.RegisterFile("google/api/servicecontrol/v1/distribution.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 486 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x86, 0x31, 0x6e, 0x52, 0x18, 0x12, 0x52, 0x96, 0x82, 0xac, 0x88, 0x83, 0x45, 0x2f, 0x41, + 0x42, 0xb6, 0x0a, 0x07, 0x10, 0x88, 0x4b, 0x28, 0x15, 0x42, 0x48, 0x54, 0xe6, 0xc6, 0xc5, 0xda, + 0xd8, 0x6b, 0x77, 0x55, 0x7b, 0xc7, 0xec, 0x47, 0x9a, 0x0b, 0x57, 0x7e, 0x0f, 0x3f, 0x8f, 0x23, + 0xf2, 0xae, 0xf3, 0x45, 0xab, 0x48, 0xb9, 0xf9, 0x7d, 0xc7, 0x33, 0xcf, 0xec, 0x68, 0x06, 0xe2, + 0x12, 0xb1, 0xac, 0x58, 0x4c, 0x1b, 0x1e, 0x2b, 0x26, 0xe7, 0x3c, 0x63, 0x19, 0x0a, 0x2d, 0xb1, + 0x8a, 0xe7, 0xa7, 0x71, 0xce, 0x95, 0x96, 0x7c, 0x66, 0x34, 0x47, 0x11, 0x35, 0x12, 0x35, 0x92, + 0x67, 0x2e, 0x21, 0xa2, 0x0d, 0x8f, 0xb6, 0x13, 0xa2, 0xf9, 0xe9, 0xf3, 0x3f, 0x7d, 0x18, 0x9c, + 0x6d, 0x24, 0x91, 0x63, 0xe8, 0x65, 0x68, 0x84, 0x0e, 0xbc, 0xd0, 0x9b, 0xf8, 0x89, 0x13, 0x84, + 0xc0, 0x41, 0xcd, 0xa8, 0x08, 0xee, 0x86, 0xde, 0xc4, 0x4b, 0xec, 0x37, 0x09, 0xe0, 0xb0, 0xe6, + 0x82, 0xd7, 0xa6, 0x0e, 0x7c, 0x6b, 0x2f, 0xa5, 0x8d, 0xd0, 0x85, 0x8d, 0x1c, 0x74, 0x11, 0x27, + 0xc9, 0x1b, 0x08, 0x94, 0xa9, 0x53, 0x2c, 0x52, 0xf5, 0xd3, 0x50, 0xc9, 0xf2, 0x34, 0x67, 0x73, + 0x4e, 0x5b, 0x72, 0xd0, 0xb3, 0xbf, 0x3e, 0x51, 0xa6, 0xfe, 0x56, 0x7c, 0x77, 0xd1, 0xb3, 0x65, + 0x90, 0x9c, 0xc0, 0x70, 0x66, 0xb2, 0x2b, 0xa6, 0x53, 0xdb, 0x90, 0x0a, 0xfa, 0xa1, 0x3f, 0xf1, + 0x93, 0x81, 0x33, 0x3f, 0x5a, 0x8f, 0x50, 0x78, 0x58, 0x71, 0xc1, 0xa8, 0x4c, 0x9d, 0xad, 0x82, + 0xc3, 0xd0, 0x9b, 0x3c, 0x78, 0xf5, 0x36, 0xda, 0x35, 0x83, 0x68, 0xf3, 0xfd, 0xd1, 0x57, 0x5b, + 0x60, 0xea, 0xf2, 0x3f, 0xdf, 0x49, 0x86, 0xd5, 0xa6, 0x41, 0x1a, 0x78, 0xcc, 0x16, 0x0d, 0x0a, + 0x26, 0x34, 0xa7, 0xd5, 0x8a, 0x73, 0xcf, 0x72, 0x3e, 0xec, 0xc1, 0xf9, 0xb4, 0xae, 0xb2, 0x86, + 0x11, 0x76, 0xc3, 0x25, 0x25, 0x1c, 0xb1, 0x45, 0x53, 0xf1, 0x8c, 0xeb, 0x15, 0xee, 0xbe, 0xc5, + 0xbd, 0xdb, 0x0f, 0x67, 0x4b, 0xac, 0x59, 0x23, 0xb6, 0x6d, 0x8d, 0xaf, 0x60, 0xb8, 0xf5, 0x78, + 0xf2, 0x12, 0x88, 0x30, 0x75, 0x5a, 0x70, 0xc1, 0x35, 0x5b, 0xb1, 0xdb, 0xbd, 0xe8, 0x25, 0x47, + 0xc2, 0xd4, 0xe7, 0x36, 0xb0, 0xfc, 0xfb, 0x18, 0x7a, 0xd7, 0x3c, 0xd7, 0x97, 0xdd, 0x8e, 0x38, + 0x41, 0x9e, 0x42, 0x1f, 0x8b, 0x42, 0x31, 0xdd, 0xed, 0x48, 0xa7, 0xc6, 0xbf, 0x80, 0xdc, 0x9c, + 0xc0, 0x9e, 0xc4, 0x13, 0x18, 0x96, 0x12, 0xaf, 0xf5, 0x65, 0x5a, 0xd0, 0x4c, 0xa3, 0xec, 0xc8, + 0x03, 0x67, 0x9e, 0x5b, 0xaf, 0x6d, 0x4b, 0x65, 0xb4, 0x62, 0x1d, 0xdf, 0x89, 0xf1, 0x0b, 0x18, + 0xfd, 0x37, 0x91, 0xb6, 0xd3, 0x19, 0x1a, 0x91, 0xb7, 0x3c, 0xbf, 0xed, 0xd4, 0xa9, 0xe9, 0x68, + 0xb5, 0x79, 0xd8, 0xb4, 0xa3, 0x9c, 0xfe, 0xf6, 0x20, 0xcc, 0xb0, 0xde, 0x39, 0xfc, 0xe9, 0xa3, + 0xcd, 0xe9, 0x5f, 0xb4, 0x87, 0x78, 0xe1, 0xfd, 0xf8, 0xd2, 0xa5, 0x94, 0x58, 0x51, 0x51, 0x46, + 0x28, 0xcb, 0xb8, 0x64, 0xc2, 0x9e, 0x69, 0x77, 0xd6, 0xb4, 0xe1, 0xea, 0xf6, 0xd3, 0x7e, 0xbf, + 0xed, 0xfc, 0xf5, 0xbc, 0x59, 0xdf, 0x66, 0xbe, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0x14, 0x52, + 0xaf, 0xef, 0x13, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/log_entry.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/log_entry.pb.go new file mode 100644 index 000000000..727a19b6e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/log_entry.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/log_entry.proto + +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_logging_type "google.golang.org/genproto/googleapis/logging/type" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// An individual log entry. +type LogEntry struct { + // Required. The log to which this log entry belongs. Examples: `"syslog"`, + // `"book_log"`. + Name string `protobuf:"bytes,10,opt,name=name" json:"name,omitempty"` + // The time the event described by the log entry occurred. If + // omitted, defaults to operation start time. + Timestamp *google_protobuf3.Timestamp `protobuf:"bytes,11,opt,name=timestamp" json:"timestamp,omitempty"` + // The severity of the log entry. The default value is + // `LogSeverity.DEFAULT`. + Severity google_logging_type.LogSeverity `protobuf:"varint,12,opt,name=severity,enum=google.logging.type.LogSeverity" json:"severity,omitempty"` + // A unique ID for the log entry used for deduplication. If omitted, + // the implementation will generate one based on operation_id. + InsertId string `protobuf:"bytes,4,opt,name=insert_id,json=insertId" json:"insert_id,omitempty"` + // A set of user-defined (key, value) data that provides additional + // information about the log entry. + Labels map[string]string `protobuf:"bytes,13,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The log entry payload, which can be one of multiple types. + // + // Types that are valid to be assigned to Payload: + // *LogEntry_ProtoPayload + // *LogEntry_TextPayload + // *LogEntry_StructPayload + Payload isLogEntry_Payload `protobuf_oneof:"payload"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type isLogEntry_Payload interface { + isLogEntry_Payload() +} + +type LogEntry_ProtoPayload struct { + ProtoPayload *google_protobuf1.Any `protobuf:"bytes,2,opt,name=proto_payload,json=protoPayload,oneof"` +} +type LogEntry_TextPayload struct { + TextPayload string `protobuf:"bytes,3,opt,name=text_payload,json=textPayload,oneof"` +} +type LogEntry_StructPayload struct { + StructPayload *google_protobuf2.Struct `protobuf:"bytes,6,opt,name=struct_payload,json=structPayload,oneof"` +} + +func (*LogEntry_ProtoPayload) isLogEntry_Payload() {} +func (*LogEntry_TextPayload) isLogEntry_Payload() {} +func (*LogEntry_StructPayload) isLogEntry_Payload() {} + +func (m *LogEntry) GetPayload() isLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *LogEntry) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogEntry) GetTimestamp() *google_protobuf3.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogEntry) GetSeverity() google_logging_type.LogSeverity { + if m != nil { + return m.Severity + } + return google_logging_type.LogSeverity_DEFAULT +} + +func (m *LogEntry) GetInsertId() string { + if m != nil { + return m.InsertId + } + return "" +} + +func (m *LogEntry) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *LogEntry) GetProtoPayload() *google_protobuf1.Any { + if x, ok := m.GetPayload().(*LogEntry_ProtoPayload); ok { + return x.ProtoPayload + } + return nil +} + +func (m *LogEntry) GetTextPayload() string { + if x, ok := m.GetPayload().(*LogEntry_TextPayload); ok { + return x.TextPayload + } + return "" +} + +func (m *LogEntry) GetStructPayload() *google_protobuf2.Struct { + if x, ok := m.GetPayload().(*LogEntry_StructPayload); ok { + return x.StructPayload + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LogEntry_OneofMarshaler, _LogEntry_OneofUnmarshaler, _LogEntry_OneofSizer, []interface{}{ + (*LogEntry_ProtoPayload)(nil), + (*LogEntry_TextPayload)(nil), + (*LogEntry_StructPayload)(nil), + } +} + +func _LogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LogEntry) + // payload + switch x := m.Payload.(type) { + case *LogEntry_ProtoPayload: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProtoPayload); err != nil { + return err + } + case *LogEntry_TextPayload: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.TextPayload) + case *LogEntry_StructPayload: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructPayload); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _LogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LogEntry) + switch tag { + case 2: // payload.proto_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Any) + err := b.DecodeMessage(msg) + m.Payload = &LogEntry_ProtoPayload{msg} + return true, err + case 3: // payload.text_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Payload = &LogEntry_TextPayload{x} + return true, err + case 6: // payload.struct_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Struct) + err := b.DecodeMessage(msg) + m.Payload = &LogEntry_StructPayload{msg} + return true, err + default: + return false, nil + } +} + +func _LogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LogEntry) + // payload + switch x := m.Payload.(type) { + case *LogEntry_ProtoPayload: + s := proto.Size(x.ProtoPayload) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LogEntry_TextPayload: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TextPayload))) + n += len(x.TextPayload) + case *LogEntry_StructPayload: + s := proto.Size(x.StructPayload) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "google.api.servicecontrol.v1.LogEntry") +} + +func init() { proto.RegisterFile("google/api/servicecontrol/v1/log_entry.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 454 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4f, 0x8f, 0xd3, 0x30, + 0x10, 0xc5, 0x9b, 0xed, 0x52, 0x1a, 0xa7, 0x5d, 0x21, 0x6b, 0x25, 0x42, 0xa8, 0x44, 0x04, 0x12, + 0xea, 0x01, 0x39, 0xda, 0x72, 0x59, 0x58, 0x0e, 0x50, 0x09, 0xa9, 0xac, 0x7a, 0x58, 0x65, 0x39, + 0x71, 0xa9, 0xdc, 0xd6, 0x58, 0x16, 0xae, 0x27, 0x4a, 0xdc, 0x88, 0x9c, 0xf8, 0xbc, 0x7c, 0x0b, + 0x94, 0xb1, 0x53, 0xf6, 0x9f, 0xf6, 0xe6, 0xf1, 0xfb, 0xbd, 0x99, 0x79, 0x8e, 0x42, 0xde, 0x49, + 0x00, 0xa9, 0x45, 0xc6, 0x0b, 0x95, 0x55, 0xa2, 0xac, 0xd5, 0x46, 0x6c, 0xc0, 0xd8, 0x12, 0x74, + 0x56, 0x9f, 0x65, 0x1a, 0xe4, 0x4a, 0x18, 0x5b, 0x36, 0xac, 0x28, 0xc1, 0x02, 0x9d, 0x38, 0x9a, + 0xf1, 0x42, 0xb1, 0xdb, 0x34, 0xab, 0xcf, 0x92, 0xc9, 0x8d, 0x5e, 0xdc, 0x18, 0xb0, 0xdc, 0x2a, + 0x30, 0x95, 0xf3, 0x26, 0x6f, 0xbd, 0xaa, 0x41, 0x4a, 0x65, 0x64, 0x66, 0x9b, 0x02, 0x8b, 0x55, + 0x25, 0x6a, 0x51, 0x2a, 0xeb, 0x67, 0x24, 0x2f, 0x3c, 0x87, 0xd5, 0x7a, 0xff, 0x33, 0xe3, 0xa6, + 0x93, 0x26, 0x77, 0xa5, 0xca, 0x96, 0xfb, 0x8d, 0xf5, 0xea, 0xab, 0xbb, 0xaa, 0x55, 0x3b, 0x51, + 0x59, 0xbe, 0x2b, 0x1c, 0xf0, 0xfa, 0x6f, 0x9f, 0x0c, 0x97, 0x20, 0xbf, 0xb6, 0x81, 0x28, 0x25, + 0xc7, 0x86, 0xef, 0x44, 0x4c, 0xd2, 0x60, 0x1a, 0xe6, 0x78, 0xa6, 0xe7, 0x24, 0x3c, 0x78, 0xe2, + 0x28, 0x0d, 0xa6, 0xd1, 0x2c, 0x61, 0x3e, 0x72, 0xd7, 0x95, 0x7d, 0xef, 0x88, 0xfc, 0x3f, 0x4c, + 0x3f, 0x91, 0x61, 0x17, 0x23, 0x1e, 0xa5, 0xc1, 0xf4, 0x64, 0x96, 0x76, 0x46, 0x9f, 0x97, 0xb5, + 0x79, 0xd9, 0x12, 0xe4, 0xb5, 0xe7, 0xf2, 0x83, 0x83, 0xbe, 0x24, 0xa1, 0x32, 0x95, 0x28, 0xed, + 0x4a, 0x6d, 0xe3, 0x63, 0x5c, 0x68, 0xe8, 0x2e, 0xbe, 0x6d, 0xe9, 0x25, 0x19, 0x68, 0xbe, 0x16, + 0xba, 0x8a, 0xc7, 0x69, 0x7f, 0x1a, 0xcd, 0x66, 0xec, 0xb1, 0x8f, 0xc0, 0xba, 0x80, 0x6c, 0x89, + 0x26, 0x3c, 0xe7, 0xbe, 0x03, 0xbd, 0x20, 0x63, 0xcc, 0xb1, 0x2a, 0x78, 0xa3, 0x81, 0x6f, 0xe3, + 0x23, 0x0c, 0x79, 0x7a, 0x2f, 0xe4, 0x17, 0xd3, 0x2c, 0x7a, 0xf9, 0x08, 0xeb, 0x2b, 0xc7, 0xd2, + 0x37, 0x64, 0x64, 0xc5, 0x6f, 0x7b, 0xf0, 0xf6, 0xdb, 0x45, 0x17, 0xbd, 0x3c, 0x6a, 0x6f, 0x3b, + 0xe8, 0x33, 0x39, 0x71, 0x1f, 0xe5, 0x80, 0x0d, 0x70, 0xc4, 0xf3, 0x7b, 0x23, 0xae, 0x11, 0x5b, + 0xf4, 0xf2, 0xb1, 0x33, 0xf8, 0x0e, 0xc9, 0x07, 0x12, 0xdd, 0x58, 0x9d, 0x3e, 0x23, 0xfd, 0x5f, + 0xa2, 0x89, 0x03, 0x7c, 0x95, 0xf6, 0x48, 0x4f, 0xc9, 0x93, 0x9a, 0xeb, 0xbd, 0xc0, 0xe5, 0xc3, + 0xdc, 0x15, 0x1f, 0x8f, 0xce, 0x83, 0x79, 0x48, 0x9e, 0xfa, 0xa9, 0xf3, 0x3f, 0x24, 0xdd, 0xc0, + 0xee, 0xd1, 0xa7, 0x9a, 0x8f, 0xbb, 0xb7, 0xba, 0xc2, 0x98, 0xc1, 0x8f, 0x4b, 0x8f, 0x4b, 0xd0, + 0xdc, 0x48, 0x06, 0xa5, 0xcc, 0xa4, 0x30, 0xb8, 0x71, 0xe6, 0x24, 0x5e, 0xa8, 0xea, 0xe1, 0x7f, + 0xe5, 0xe2, 0xf6, 0xcd, 0x7a, 0x80, 0xb6, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x67, 0x50, + 0x6e, 0x13, 0x61, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/metric_value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/metric_value.pb.go new file mode 100644 index 000000000..ab7ccb1e3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/metric_value.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/metric_value.proto + +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/type/money" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Represents a single metric value. +type MetricValue struct { + // The labels describing the metric value. + // See comments on [google.api.servicecontrol.v1.Operation.labels][google.api.servicecontrol.v1.Operation.labels] for + // the overriding relationship. + Labels map[string]string `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The start of the time period over which this metric value's measurement + // applies. The time period has different semantics for different metric + // types (cumulative, delta, and gauge). See the metric definition + // documentation in the service configuration for details. + StartTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The end of the time period over which this metric value's measurement + // applies. + EndTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // The value. The type of value used in the request must + // agree with the metric definition in the service configuration, otherwise + // the MetricValue is rejected. + // + // Types that are valid to be assigned to Value: + // *MetricValue_BoolValue + // *MetricValue_Int64Value + // *MetricValue_DoubleValue + // *MetricValue_StringValue + // *MetricValue_DistributionValue + Value isMetricValue_Value `protobuf_oneof:"value"` +} + +func (m *MetricValue) Reset() { *m = MetricValue{} } +func (m *MetricValue) String() string { return proto.CompactTextString(m) } +func (*MetricValue) ProtoMessage() {} +func (*MetricValue) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +type isMetricValue_Value interface { + isMetricValue_Value() +} + +type MetricValue_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type MetricValue_Int64Value struct { + Int64Value int64 `protobuf:"varint,5,opt,name=int64_value,json=int64Value,oneof"` +} +type MetricValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue,oneof"` +} +type MetricValue_StringValue struct { + StringValue string `protobuf:"bytes,7,opt,name=string_value,json=stringValue,oneof"` +} +type MetricValue_DistributionValue struct { + DistributionValue *Distribution `protobuf:"bytes,8,opt,name=distribution_value,json=distributionValue,oneof"` +} + +func (*MetricValue_BoolValue) isMetricValue_Value() {} +func (*MetricValue_Int64Value) isMetricValue_Value() {} +func (*MetricValue_DoubleValue) isMetricValue_Value() {} +func (*MetricValue_StringValue) isMetricValue_Value() {} +func (*MetricValue_DistributionValue) isMetricValue_Value() {} + +func (m *MetricValue) GetValue() isMetricValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *MetricValue) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MetricValue) GetStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *MetricValue) GetEndTime() *google_protobuf3.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *MetricValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*MetricValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *MetricValue) GetInt64Value() int64 { + if x, ok := m.GetValue().(*MetricValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *MetricValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*MetricValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *MetricValue) GetStringValue() string { + if x, ok := m.GetValue().(*MetricValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *MetricValue) GetDistributionValue() *Distribution { + if x, ok := m.GetValue().(*MetricValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MetricValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MetricValue_OneofMarshaler, _MetricValue_OneofUnmarshaler, _MetricValue_OneofSizer, []interface{}{ + (*MetricValue_BoolValue)(nil), + (*MetricValue_Int64Value)(nil), + (*MetricValue_DoubleValue)(nil), + (*MetricValue_StringValue)(nil), + (*MetricValue_DistributionValue)(nil), + } +} + +func _MetricValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MetricValue) + // value + switch x := m.Value.(type) { + case *MetricValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *MetricValue_Int64Value: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int64Value)) + case *MetricValue_DoubleValue: + b.EncodeVarint(6<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *MetricValue_StringValue: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *MetricValue_DistributionValue: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DistributionValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("MetricValue.Value has unexpected type %T", x) + } + return nil +} + +func _MetricValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MetricValue) + switch tag { + case 4: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &MetricValue_BoolValue{x != 0} + return true, err + case 5: // value.int64_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &MetricValue_Int64Value{int64(x)} + return true, err + case 6: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &MetricValue_DoubleValue{math.Float64frombits(x)} + return true, err + case 7: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &MetricValue_StringValue{x} + return true, err + case 8: // value.distribution_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution) + err := b.DecodeMessage(msg) + m.Value = &MetricValue_DistributionValue{msg} + return true, err + default: + return false, nil + } +} + +func _MetricValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MetricValue) + // value + switch x := m.Value.(type) { + case *MetricValue_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *MetricValue_Int64Value: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Int64Value)) + case *MetricValue_DoubleValue: + n += proto.SizeVarint(6<<3 | proto.WireFixed64) + n += 8 + case *MetricValue_StringValue: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *MetricValue_DistributionValue: + s := proto.Size(x.DistributionValue) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a set of metric values in the same metric. +// Each metric value in the set should have a unique combination of start time, +// end time, and label values. +type MetricValueSet struct { + // The metric name defined in the service configuration. + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"` + // The values in this metric. + MetricValues []*MetricValue `protobuf:"bytes,2,rep,name=metric_values,json=metricValues" json:"metric_values,omitempty"` +} + +func (m *MetricValueSet) Reset() { *m = MetricValueSet{} } +func (m *MetricValueSet) String() string { return proto.CompactTextString(m) } +func (*MetricValueSet) ProtoMessage() {} +func (*MetricValueSet) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *MetricValueSet) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func (m *MetricValueSet) GetMetricValues() []*MetricValue { + if m != nil { + return m.MetricValues + } + return nil +} + +func init() { + proto.RegisterType((*MetricValue)(nil), "google.api.servicecontrol.v1.MetricValue") + proto.RegisterType((*MetricValueSet)(nil), "google.api.servicecontrol.v1.MetricValueSet") +} + +func init() { proto.RegisterFile("google/api/servicecontrol/v1/metric_value.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 482 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0x8d, 0xdb, 0x1f, 0x2f, 0xab, 0x68, 0x14, 0x0c, 0x65, 0xa1, 0x71, 0xbd, 0x44, + 0x0f, 0x13, 0x76, 0x75, 0xc5, 0xd5, 0x5b, 0x51, 0x28, 0xe2, 0x2e, 0x4b, 0x14, 0x0f, 0x7a, 0x58, + 0x26, 0xed, 0x33, 0x0c, 0x26, 0x33, 0x21, 0x33, 0x2d, 0xf4, 0xe8, 0xcd, 0x3f, 0xd9, 0x8b, 0x20, + 0xf3, 0xa3, 0x9a, 0x82, 0xd4, 0xbd, 0xe5, 0x7d, 0xf3, 0xfd, 0xbc, 0x7c, 0x27, 0xef, 0x0d, 0x64, + 0xa5, 0x94, 0x65, 0x85, 0x19, 0x6b, 0x78, 0xa6, 0xb0, 0x5d, 0xf3, 0x05, 0x2e, 0xa4, 0xd0, 0xad, + 0xac, 0xb2, 0xf5, 0x49, 0x56, 0xa3, 0x6e, 0xf9, 0xe2, 0x7a, 0xcd, 0xaa, 0x15, 0xd2, 0xa6, 0x95, + 0x5a, 0x46, 0x47, 0x0e, 0xa0, 0xac, 0xe1, 0x74, 0x17, 0xa0, 0xeb, 0x93, 0xc9, 0x51, 0xa7, 0x1d, + 0x13, 0x42, 0x6a, 0xa6, 0xb9, 0x14, 0xca, 0xb1, 0x93, 0xfd, 0x1f, 0x5b, 0x72, 0xa5, 0x5b, 0x5e, + 0xac, 0x0c, 0xe1, 0x81, 0xa9, 0x07, 0x6c, 0x55, 0xac, 0xbe, 0x66, 0x9a, 0xd7, 0xa8, 0x34, 0xab, + 0x1b, 0x6f, 0x78, 0xe8, 0x0d, 0x7a, 0xd3, 0x60, 0x56, 0x4b, 0x81, 0x1b, 0xf7, 0xe2, 0xf8, 0x57, + 0x00, 0xe1, 0x85, 0x4d, 0xff, 0xc9, 0x84, 0x8f, 0x2e, 0x60, 0x50, 0xb1, 0x02, 0x2b, 0x15, 0x93, + 0x24, 0x48, 0xc3, 0xd3, 0x33, 0xba, 0xef, 0x1c, 0xb4, 0x83, 0xd2, 0xf7, 0x96, 0x7b, 0x2b, 0x74, + 0xbb, 0xc9, 0x7d, 0x93, 0xe8, 0x1c, 0x40, 0x69, 0xd6, 0xea, 0x6b, 0x13, 0x28, 0xee, 0x27, 0x24, + 0x0d, 0x4f, 0x27, 0xdb, 0x96, 0xdb, 0xb4, 0xf4, 0xe3, 0x36, 0x6d, 0x3e, 0xb6, 0x6e, 0x53, 0x47, + 0x67, 0x30, 0x42, 0xb1, 0x74, 0x60, 0xf0, 0x5f, 0x70, 0x88, 0x62, 0x69, 0xb1, 0x29, 0x40, 0x21, + 0x65, 0xe5, 0x66, 0x11, 0xdf, 0x4a, 0x48, 0x3a, 0x9a, 0xf7, 0xf2, 0xb1, 0xd1, 0xdc, 0x09, 0x1f, + 0x41, 0xc8, 0x85, 0x7e, 0xf1, 0xdc, 0x3b, 0x0e, 0x12, 0x92, 0x06, 0xf3, 0x5e, 0x0e, 0x56, 0x74, + 0x96, 0xc7, 0x70, 0xb8, 0x94, 0xab, 0xa2, 0x42, 0xef, 0x19, 0x24, 0x24, 0x25, 0xf3, 0x5e, 0x1e, + 0x3a, 0xf5, 0x8f, 0xc9, 0xcc, 0x41, 0x94, 0xde, 0x34, 0x4c, 0x48, 0x3a, 0x36, 0x26, 0xa7, 0x3a, + 0xd3, 0x17, 0x88, 0xba, 0xe3, 0xf2, 0xd6, 0x91, 0x3d, 0xce, 0xd3, 0xfd, 0xbf, 0xf6, 0x4d, 0x87, + 0x9b, 0xf7, 0xf2, 0x7b, 0xdd, 0x3e, 0xb6, 0xf9, 0xe4, 0x1c, 0xc2, 0xce, 0x3f, 0x8f, 0xee, 0x42, + 0xf0, 0x0d, 0x37, 0x31, 0x31, 0x39, 0x72, 0xf3, 0x18, 0x3d, 0x80, 0x03, 0xf7, 0xc1, 0xbe, 0xd5, + 0x5c, 0xf1, 0xaa, 0xff, 0x92, 0xcc, 0x86, 0xfe, 0xcd, 0xf1, 0x77, 0x02, 0x77, 0x3a, 0x43, 0xfc, + 0x80, 0x3a, 0x9a, 0x42, 0xe8, 0xf7, 0x59, 0xb0, 0x1a, 0x7d, 0x3f, 0x70, 0xd2, 0x25, 0xab, 0x31, + 0xba, 0x84, 0xdb, 0xdd, 0x85, 0x57, 0x71, 0xdf, 0xae, 0xca, 0x93, 0x1b, 0xaf, 0x4a, 0x7e, 0x58, + 0xff, 0x2d, 0xd4, 0xec, 0x07, 0x81, 0x64, 0x21, 0xeb, 0xbd, 0xf8, 0xec, 0xfe, 0x6e, 0xca, 0x2b, + 0xb3, 0x02, 0x57, 0xe4, 0xf3, 0x3b, 0x0f, 0x95, 0xb2, 0x62, 0xa2, 0xa4, 0xb2, 0x2d, 0xb3, 0x12, + 0x85, 0x5d, 0x10, 0x7f, 0x8b, 0x58, 0xc3, 0xd5, 0xbf, 0x6f, 0xd2, 0xeb, 0x5d, 0xe5, 0x27, 0x21, + 0xc5, 0xc0, 0x92, 0xcf, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x00, 0x1a, 0xde, 0xef, 0x03, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/operation.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/operation.pb.go new file mode 100644 index 000000000..f94c2e7b1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/operation.pb.go @@ -0,0 +1,215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/operation.proto + +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Defines the importance of the data contained in the operation. +type Operation_Importance int32 + +const ( + // The API implementation may cache and aggregate the data. + // The data may be lost when rare and unexpected system failures occur. + Operation_LOW Operation_Importance = 0 + // The API implementation doesn't cache and aggregate the data. + // If the method returns successfully, it's guaranteed that the data has + // been persisted in durable storage. + Operation_HIGH Operation_Importance = 1 +) + +var Operation_Importance_name = map[int32]string{ + 0: "LOW", + 1: "HIGH", +} +var Operation_Importance_value = map[string]int32{ + "LOW": 0, + "HIGH": 1, +} + +func (x Operation_Importance) String() string { + return proto.EnumName(Operation_Importance_name, int32(x)) +} +func (Operation_Importance) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 0} } + +// Represents information regarding an operation. +type Operation struct { + // Identity of the operation. This must be unique within the scope of the + // service that generated the operation. If the service calls + // Check() and Report() on the same operation, the two calls should carry + // the same id. + // + // UUID version 4 is recommended, though not required. + // In scenarios where an operation is computed from existing information + // and an idempotent id is desirable for deduplication purpose, UUID version 5 + // is recommended. See RFC 4122 for details. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // Fully qualified name of the operation. Reserved for future use. + OperationName string `protobuf:"bytes,2,opt,name=operation_name,json=operationName" json:"operation_name,omitempty"` + // Identity of the consumer who is using the service. + // This field should be filled in for the operations initiated by a + // consumer, but not for service-initiated operations that are + // not related to a specific consumer. + // + // This can be in one of the following formats: + // project:, + // project_number:, + // api_key:. + ConsumerId string `protobuf:"bytes,3,opt,name=consumer_id,json=consumerId" json:"consumer_id,omitempty"` + // Required. Start time of the operation. + StartTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // End time of the operation. + // Required when the operation is used in [ServiceController.Report][google.api.servicecontrol.v1.ServiceController.Report], + // but optional when the operation is used in [ServiceController.Check][google.api.servicecontrol.v1.ServiceController.Check]. + EndTime *google_protobuf3.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Labels describing the operation. Only the following labels are allowed: + // + // - Labels describing monitored resources as defined in + // the service configuration. + // - Default labels of metric values. When specified, labels defined in the + // metric value override these default. + // - The following labels defined by Google Cloud Platform: + // - `cloud.googleapis.com/location` describing the location where the + // operation happened, + // - `servicecontrol.googleapis.com/user_agent` describing the user agent + // of the API request, + // - `servicecontrol.googleapis.com/service_agent` describing the service + // used to handle the API request (e.g. ESP), + // - `servicecontrol.googleapis.com/platform` describing the platform + // where the API is served (e.g. GAE, GCE, GKE). + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Represents information about this operation. Each MetricValueSet + // corresponds to a metric defined in the service configuration. + // The data type used in the MetricValueSet must agree with + // the data type specified in the metric definition. + // + // Within a single operation, it is not allowed to have more than one + // MetricValue instances that have the same metric names and identical + // label value combinations. If a request has such duplicated MetricValue + // instances, the entire request is rejected with + // an invalid argument error. + MetricValueSets []*MetricValueSet `protobuf:"bytes,7,rep,name=metric_value_sets,json=metricValueSets" json:"metric_value_sets,omitempty"` + // Represents information to be logged. + LogEntries []*LogEntry `protobuf:"bytes,8,rep,name=log_entries,json=logEntries" json:"log_entries,omitempty"` + // DO NOT USE. This is an experimental field. + Importance Operation_Importance `protobuf:"varint,11,opt,name=importance,enum=google.api.servicecontrol.v1.Operation_Importance" json:"importance,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetOperationName() string { + if m != nil { + return m.OperationName + } + return "" +} + +func (m *Operation) GetConsumerId() string { + if m != nil { + return m.ConsumerId + } + return "" +} + +func (m *Operation) GetStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Operation) GetEndTime() *google_protobuf3.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Operation) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Operation) GetMetricValueSets() []*MetricValueSet { + if m != nil { + return m.MetricValueSets + } + return nil +} + +func (m *Operation) GetLogEntries() []*LogEntry { + if m != nil { + return m.LogEntries + } + return nil +} + +func (m *Operation) GetImportance() Operation_Importance { + if m != nil { + return m.Importance + } + return Operation_LOW +} + +func init() { + proto.RegisterType((*Operation)(nil), "google.api.servicecontrol.v1.Operation") + proto.RegisterEnum("google.api.servicecontrol.v1.Operation_Importance", Operation_Importance_name, Operation_Importance_value) +} + +func init() { proto.RegisterFile("google/api/servicecontrol/v1/operation.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x9d, 0xa6, 0xf9, 0xf5, 0x56, 0x63, 0x1c, 0x3c, 0x2c, 0xa1, 0x90, 0x58, 0x50, 0x72, + 0x28, 0xb3, 0x34, 0x45, 0xb0, 0x7a, 0x2b, 0x48, 0x1b, 0x8d, 0xb6, 0xac, 0xa2, 0xe2, 0x25, 0x4c, + 0x36, 0xcf, 0x65, 0x70, 0x77, 0x66, 0x99, 0x99, 0x04, 0x7a, 0xf6, 0xe2, 0x9f, 0xec, 0x51, 0x76, + 0xf6, 0x47, 0x13, 0x90, 0xb5, 0xb7, 0x7d, 0x8f, 0xef, 0xe7, 0xbb, 0xdf, 0x79, 0x6f, 0x06, 0x4e, + 0x62, 0xa5, 0xe2, 0x04, 0x03, 0x9e, 0x89, 0xc0, 0xa0, 0xde, 0x8a, 0x08, 0x23, 0x25, 0xad, 0x56, + 0x49, 0xb0, 0x3d, 0x0d, 0x54, 0x86, 0x9a, 0x5b, 0xa1, 0x24, 0xcb, 0xb4, 0xb2, 0x8a, 0x1e, 0x15, + 0x6a, 0xc6, 0x33, 0xc1, 0xf6, 0xd5, 0x6c, 0x7b, 0x3a, 0x3a, 0xda, 0xf1, 0xe2, 0x52, 0x2a, 0xeb, + 0x50, 0x53, 0xb0, 0xa3, 0xe6, 0x3f, 0x25, 0x2a, 0x5e, 0xa2, 0xb4, 0xfa, 0xb6, 0x54, 0x07, 0x8d, + 0xea, 0x14, 0xad, 0x16, 0xd1, 0x72, 0xcb, 0x93, 0x0d, 0x96, 0xc0, 0xb8, 0x04, 0x5c, 0xb5, 0xda, + 0xfc, 0x08, 0xac, 0x48, 0xd1, 0x58, 0x9e, 0x66, 0x85, 0xe0, 0xf8, 0x77, 0x1b, 0xfa, 0xd7, 0xd5, + 0x79, 0xe8, 0x33, 0x78, 0x58, 0x1f, 0x6e, 0x29, 0xd6, 0x3e, 0x99, 0x90, 0x69, 0x3f, 0xf4, 0xea, + 0xde, 0x7c, 0x4d, 0x9f, 0xc3, 0xe0, 0x4e, 0x22, 0x79, 0x8a, 0xfe, 0x81, 0x13, 0x3d, 0xaa, 0xbb, + 0x1f, 0x79, 0x8a, 0x74, 0x0c, 0x5e, 0xa4, 0xa4, 0xd9, 0xa4, 0xa8, 0x73, 0xa3, 0x96, 0xd3, 0x40, + 0xd5, 0x9a, 0xaf, 0xe9, 0x39, 0x80, 0xb1, 0x5c, 0xdb, 0x65, 0x9e, 0xc8, 0x3f, 0x9c, 0x90, 0xa9, + 0x37, 0x1b, 0xb1, 0x72, 0x92, 0x55, 0x5c, 0xf6, 0xb9, 0x8a, 0x1b, 0xf6, 0x9d, 0x3a, 0xaf, 0xe9, + 0x4b, 0xe8, 0xa1, 0x5c, 0x17, 0x60, 0xfb, 0xbf, 0x60, 0x17, 0xe5, 0xda, 0x61, 0xef, 0xa1, 0x93, + 0xf0, 0x15, 0x26, 0xc6, 0xef, 0x4c, 0x5a, 0x53, 0x6f, 0x76, 0xc6, 0x9a, 0xf6, 0xc6, 0xea, 0xa9, + 0xb0, 0x85, 0xa3, 0xde, 0xe6, 0x7b, 0x08, 0x4b, 0x0b, 0xfa, 0x0d, 0x9e, 0xec, 0x8e, 0x7b, 0x69, + 0xd0, 0x1a, 0xbf, 0xeb, 0x7c, 0x4f, 0x9a, 0x7d, 0x3f, 0x38, 0xec, 0x4b, 0x4e, 0x7d, 0x42, 0x1b, + 0x3e, 0x4e, 0xf7, 0x6a, 0x43, 0x2f, 0xc1, 0xab, 0xd6, 0x2e, 0xd0, 0xf8, 0x3d, 0xe7, 0xf9, 0xa2, + 0xd9, 0x73, 0xa1, 0xe2, 0x22, 0x1e, 0x24, 0xc5, 0x97, 0x40, 0x43, 0x43, 0x00, 0x91, 0x66, 0x4a, + 0x5b, 0x2e, 0x23, 0xf4, 0xbd, 0x09, 0x99, 0x0e, 0x66, 0xb3, 0xfb, 0x9e, 0x79, 0x5e, 0x93, 0xe1, + 0x8e, 0xcb, 0xe8, 0x1c, 0xbc, 0x9d, 0x69, 0xd0, 0x21, 0xb4, 0x7e, 0xe2, 0x6d, 0x79, 0x4d, 0xf2, + 0x4f, 0xfa, 0x14, 0xda, 0x6e, 0x20, 0xe5, 0xad, 0x28, 0x8a, 0xd7, 0x07, 0xaf, 0xc8, 0xf1, 0x18, + 0xe0, 0xce, 0x94, 0x76, 0xa1, 0xb5, 0xb8, 0xfe, 0x3a, 0x7c, 0x40, 0x7b, 0x70, 0x78, 0x35, 0xbf, + 0xbc, 0x1a, 0x92, 0x8b, 0x5f, 0x04, 0x26, 0x91, 0x4a, 0x1b, 0x13, 0x5e, 0x0c, 0xea, 0x88, 0x37, + 0xf9, 0xaa, 0x6f, 0xc8, 0xf7, 0x77, 0xa5, 0x3e, 0x56, 0x09, 0x97, 0x31, 0x53, 0x3a, 0x0e, 0x62, + 0x94, 0xee, 0x22, 0x94, 0xcf, 0x85, 0x67, 0xc2, 0xfc, 0xfb, 0xc9, 0xbc, 0xd9, 0xef, 0xfc, 0x21, + 0x64, 0xd5, 0x71, 0xe4, 0xd9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x9c, 0xc2, 0x5d, 0x03, + 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/service_controller.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/service_controller.pb.go new file mode 100644 index 000000000..3f3abc7c3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1/service_controller.pb.go @@ -0,0 +1,484 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicecontrol/v1/service_controller.proto + +package servicecontrol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for the Check method. +type CheckRequest struct { + // The service name as specified in its service configuration. For example, + // `"pubsub.googleapis.com"`. + // + // See [google.api.Service][google.api.Service] for the definition of a service name. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The operation to be checked. + Operation *Operation `protobuf:"bytes,2,opt,name=operation" json:"operation,omitempty"` + // Specifies which version of service configuration should be used to process + // the request. + // + // If unspecified or no matching version can be found, the + // latest one will be used. + ServiceConfigId string `protobuf:"bytes,4,opt,name=service_config_id,json=serviceConfigId" json:"service_config_id,omitempty"` +} + +func (m *CheckRequest) Reset() { *m = CheckRequest{} } +func (m *CheckRequest) String() string { return proto.CompactTextString(m) } +func (*CheckRequest) ProtoMessage() {} +func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func (m *CheckRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *CheckRequest) GetOperation() *Operation { + if m != nil { + return m.Operation + } + return nil +} + +func (m *CheckRequest) GetServiceConfigId() string { + if m != nil { + return m.ServiceConfigId + } + return "" +} + +// Response message for the Check method. +type CheckResponse struct { + // The same operation_id value used in the CheckRequest. + // Used for logging and diagnostics purposes. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // Indicate the decision of the check. + // + // If no check errors are present, the service should process the operation. + // Otherwise the service should use the list of errors to determine the + // appropriate action. + CheckErrors []*CheckError `protobuf:"bytes,2,rep,name=check_errors,json=checkErrors" json:"check_errors,omitempty"` + // The actual config id used to process the request. + ServiceConfigId string `protobuf:"bytes,5,opt,name=service_config_id,json=serviceConfigId" json:"service_config_id,omitempty"` + // Feedback data returned from the server during processing a Check request. + CheckInfo *CheckResponse_CheckInfo `protobuf:"bytes,6,opt,name=check_info,json=checkInfo" json:"check_info,omitempty"` +} + +func (m *CheckResponse) Reset() { *m = CheckResponse{} } +func (m *CheckResponse) String() string { return proto.CompactTextString(m) } +func (*CheckResponse) ProtoMessage() {} +func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } + +func (m *CheckResponse) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *CheckResponse) GetCheckErrors() []*CheckError { + if m != nil { + return m.CheckErrors + } + return nil +} + +func (m *CheckResponse) GetServiceConfigId() string { + if m != nil { + return m.ServiceConfigId + } + return "" +} + +func (m *CheckResponse) GetCheckInfo() *CheckResponse_CheckInfo { + if m != nil { + return m.CheckInfo + } + return nil +} + +type CheckResponse_CheckInfo struct { + // Consumer info of this check. + ConsumerInfo *CheckResponse_ConsumerInfo `protobuf:"bytes,2,opt,name=consumer_info,json=consumerInfo" json:"consumer_info,omitempty"` +} + +func (m *CheckResponse_CheckInfo) Reset() { *m = CheckResponse_CheckInfo{} } +func (m *CheckResponse_CheckInfo) String() string { return proto.CompactTextString(m) } +func (*CheckResponse_CheckInfo) ProtoMessage() {} +func (*CheckResponse_CheckInfo) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1, 0} } + +func (m *CheckResponse_CheckInfo) GetConsumerInfo() *CheckResponse_ConsumerInfo { + if m != nil { + return m.ConsumerInfo + } + return nil +} + +// `ConsumerInfo` provides information about the consumer project. +type CheckResponse_ConsumerInfo struct { + // The Google cloud project number, e.g. 1234567890. A value of 0 indicates + // no project number is found. + ProjectNumber int64 `protobuf:"varint,1,opt,name=project_number,json=projectNumber" json:"project_number,omitempty"` +} + +func (m *CheckResponse_ConsumerInfo) Reset() { *m = CheckResponse_ConsumerInfo{} } +func (m *CheckResponse_ConsumerInfo) String() string { return proto.CompactTextString(m) } +func (*CheckResponse_ConsumerInfo) ProtoMessage() {} +func (*CheckResponse_ConsumerInfo) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1, 1} } + +func (m *CheckResponse_ConsumerInfo) GetProjectNumber() int64 { + if m != nil { + return m.ProjectNumber + } + return 0 +} + +// Request message for the Report method. +type ReportRequest struct { + // The service name as specified in its service configuration. For example, + // `"pubsub.googleapis.com"`. + // + // See [google.api.Service][google.api.Service] for the definition of a service name. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // Operations to be reported. + // + // Typically the service should report one operation per request. + // Putting multiple operations into a single request is allowed, but should + // be used only when multiple operations are natually available at the time + // of the report. + // + // If multiple operations are in a single request, the total request size + // should be no larger than 1MB. See [ReportResponse.report_errors][google.api.servicecontrol.v1.ReportResponse.report_errors] for + // partial failure behavior. + Operations []*Operation `protobuf:"bytes,2,rep,name=operations" json:"operations,omitempty"` + // Specifies which version of service config should be used to process the + // request. + // + // If unspecified or no matching version can be found, the + // latest one will be used. + ServiceConfigId string `protobuf:"bytes,3,opt,name=service_config_id,json=serviceConfigId" json:"service_config_id,omitempty"` +} + +func (m *ReportRequest) Reset() { *m = ReportRequest{} } +func (m *ReportRequest) String() string { return proto.CompactTextString(m) } +func (*ReportRequest) ProtoMessage() {} +func (*ReportRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } + +func (m *ReportRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *ReportRequest) GetOperations() []*Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ReportRequest) GetServiceConfigId() string { + if m != nil { + return m.ServiceConfigId + } + return "" +} + +// Response message for the Report method. +type ReportResponse struct { + // Partial failures, one for each `Operation` in the request that failed + // processing. There are three possible combinations of the RPC status: + // + // 1. The combination of a successful RPC status and an empty `report_errors` + // list indicates a complete success where all `Operations` in the + // request are processed successfully. + // 2. The combination of a successful RPC status and a non-empty + // `report_errors` list indicates a partial success where some + // `Operations` in the request succeeded. Each + // `Operation` that failed processing has a corresponding item + // in this list. + // 3. A failed RPC status indicates a general non-deterministic failure. + // When this happens, it's impossible to know which of the + // 'Operations' in the request succeeded or failed. + ReportErrors []*ReportResponse_ReportError `protobuf:"bytes,1,rep,name=report_errors,json=reportErrors" json:"report_errors,omitempty"` + // The actual config id used to process the request. + ServiceConfigId string `protobuf:"bytes,2,opt,name=service_config_id,json=serviceConfigId" json:"service_config_id,omitempty"` +} + +func (m *ReportResponse) Reset() { *m = ReportResponse{} } +func (m *ReportResponse) String() string { return proto.CompactTextString(m) } +func (*ReportResponse) ProtoMessage() {} +func (*ReportResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3} } + +func (m *ReportResponse) GetReportErrors() []*ReportResponse_ReportError { + if m != nil { + return m.ReportErrors + } + return nil +} + +func (m *ReportResponse) GetServiceConfigId() string { + if m != nil { + return m.ServiceConfigId + } + return "" +} + +// Represents the processing error of one `Operation` in the request. +type ReportResponse_ReportError struct { + // The [Operation.operation_id][google.api.servicecontrol.v1.Operation.operation_id] value from the request. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // Details of the error when processing the `Operation`. + Status *google_rpc.Status `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *ReportResponse_ReportError) Reset() { *m = ReportResponse_ReportError{} } +func (m *ReportResponse_ReportError) String() string { return proto.CompactTextString(m) } +func (*ReportResponse_ReportError) ProtoMessage() {} +func (*ReportResponse_ReportError) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3, 0} } + +func (m *ReportResponse_ReportError) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *ReportResponse_ReportError) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*CheckRequest)(nil), "google.api.servicecontrol.v1.CheckRequest") + proto.RegisterType((*CheckResponse)(nil), "google.api.servicecontrol.v1.CheckResponse") + proto.RegisterType((*CheckResponse_CheckInfo)(nil), "google.api.servicecontrol.v1.CheckResponse.CheckInfo") + proto.RegisterType((*CheckResponse_ConsumerInfo)(nil), "google.api.servicecontrol.v1.CheckResponse.ConsumerInfo") + proto.RegisterType((*ReportRequest)(nil), "google.api.servicecontrol.v1.ReportRequest") + proto.RegisterType((*ReportResponse)(nil), "google.api.servicecontrol.v1.ReportResponse") + proto.RegisterType((*ReportResponse_ReportError)(nil), "google.api.servicecontrol.v1.ReportResponse.ReportError") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ServiceController service + +type ServiceControllerClient interface { + // Checks an operation with Google Service Control to decide whether + // the given operation should proceed. It should be called before the + // operation is executed. + // + // If feasible, the client should cache the check results and reuse them for + // 60 seconds. In case of server errors, the client can rely on the cached + // results for longer time. + // + // NOTE: the `CheckRequest` has the size limit of 64KB. + // + // This method requires the `servicemanagement.services.check` permission + // on the specified service. For more information, see + // [Google Cloud IAM](https://cloud.google.com/iam). + Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) + // Reports operation results to Google Service Control, such as logs and + // metrics. It should be called after an operation is completed. + // + // If feasible, the client should aggregate reporting data for up to 5 + // seconds to reduce API traffic. Limiting aggregation to 5 seconds is to + // reduce data loss during client crashes. Clients should carefully choose + // the aggregation time window to avoid data loss risk more than 0.01% + // for business and compliance reasons. + // + // NOTE: the `ReportRequest` has the size limit of 1MB. + // + // This method requires the `servicemanagement.services.report` permission + // on the specified service. For more information, see + // [Google Cloud IAM](https://cloud.google.com/iam). + Report(ctx context.Context, in *ReportRequest, opts ...grpc.CallOption) (*ReportResponse, error) +} + +type serviceControllerClient struct { + cc *grpc.ClientConn +} + +func NewServiceControllerClient(cc *grpc.ClientConn) ServiceControllerClient { + return &serviceControllerClient{cc} +} + +func (c *serviceControllerClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { + out := new(CheckResponse) + err := grpc.Invoke(ctx, "/google.api.servicecontrol.v1.ServiceController/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceControllerClient) Report(ctx context.Context, in *ReportRequest, opts ...grpc.CallOption) (*ReportResponse, error) { + out := new(ReportResponse) + err := grpc.Invoke(ctx, "/google.api.servicecontrol.v1.ServiceController/Report", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ServiceController service + +type ServiceControllerServer interface { + // Checks an operation with Google Service Control to decide whether + // the given operation should proceed. It should be called before the + // operation is executed. + // + // If feasible, the client should cache the check results and reuse them for + // 60 seconds. In case of server errors, the client can rely on the cached + // results for longer time. + // + // NOTE: the `CheckRequest` has the size limit of 64KB. + // + // This method requires the `servicemanagement.services.check` permission + // on the specified service. For more information, see + // [Google Cloud IAM](https://cloud.google.com/iam). + Check(context.Context, *CheckRequest) (*CheckResponse, error) + // Reports operation results to Google Service Control, such as logs and + // metrics. It should be called after an operation is completed. + // + // If feasible, the client should aggregate reporting data for up to 5 + // seconds to reduce API traffic. Limiting aggregation to 5 seconds is to + // reduce data loss during client crashes. Clients should carefully choose + // the aggregation time window to avoid data loss risk more than 0.01% + // for business and compliance reasons. + // + // NOTE: the `ReportRequest` has the size limit of 1MB. + // + // This method requires the `servicemanagement.services.report` permission + // on the specified service. For more information, see + // [Google Cloud IAM](https://cloud.google.com/iam). + Report(context.Context, *ReportRequest) (*ReportResponse, error) +} + +func RegisterServiceControllerServer(s *grpc.Server, srv ServiceControllerServer) { + s.RegisterService(&_ServiceController_serviceDesc, srv) +} + +func _ServiceController_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceControllerServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicecontrol.v1.ServiceController/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceControllerServer).Check(ctx, req.(*CheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceController_Report_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceControllerServer).Report(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicecontrol.v1.ServiceController/Report", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceControllerServer).Report(ctx, req.(*ReportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.api.servicecontrol.v1.ServiceController", + HandlerType: (*ServiceControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _ServiceController_Check_Handler, + }, + { + MethodName: "Report", + Handler: _ServiceController_Report_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/api/servicecontrol/v1/service_controller.proto", +} + +func init() { + proto.RegisterFile("google/api/servicecontrol/v1/service_controller.proto", fileDescriptor5) +} + +var fileDescriptor5 = []byte{ + // 619 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xd6, 0x3a, 0x6d, 0xa4, 0x4c, 0x9c, 0xfe, 0xea, 0x1e, 0x7e, 0x22, 0xab, 0x87, 0xd4, 0x12, + 0x34, 0x4a, 0x8b, 0xad, 0x16, 0x55, 0x42, 0xe1, 0x44, 0xa3, 0xaa, 0x0a, 0x48, 0xa5, 0x72, 0x38, + 0x21, 0xaa, 0xc8, 0xdd, 0x6c, 0x8c, 0x4b, 0xb2, 0x6b, 0xd6, 0x4e, 0x2e, 0x88, 0x0b, 0x0f, 0xc0, + 0xa1, 0xbc, 0x01, 0xaa, 0xc4, 0x33, 0xf0, 0x1c, 0xbc, 0x02, 0x0f, 0x01, 0x37, 0x94, 0xdd, 0xb5, + 0xeb, 0x08, 0x63, 0x92, 0x9b, 0xf7, 0xdb, 0x99, 0xf9, 0xbe, 0x9d, 0xf9, 0x3c, 0x70, 0x1c, 0x70, + 0x1e, 0x4c, 0xa8, 0xeb, 0x47, 0xa1, 0x1b, 0x53, 0x31, 0x0f, 0x09, 0x25, 0x9c, 0x25, 0x82, 0x4f, + 0xdc, 0xf9, 0x61, 0x8a, 0x0c, 0x35, 0x34, 0xa1, 0xc2, 0x89, 0x04, 0x4f, 0x38, 0xde, 0x51, 0x69, + 0x8e, 0x1f, 0x85, 0xce, 0x72, 0x9a, 0x33, 0x3f, 0xb4, 0x76, 0x72, 0x45, 0x7d, 0xc6, 0x78, 0xe2, + 0x27, 0x21, 0x67, 0xb1, 0xca, 0xb5, 0x9c, 0x52, 0x4a, 0xf2, 0x86, 0x92, 0xb7, 0x43, 0x2a, 0x04, + 0xd7, 0x5c, 0xd6, 0x41, 0x69, 0x3c, 0x8f, 0xa8, 0x90, 0xe5, 0x75, 0xf4, 0x3d, 0x1d, 0x2d, 0x22, + 0xe2, 0xc6, 0x89, 0x9f, 0xcc, 0x34, 0xad, 0x7d, 0x8b, 0xc0, 0xec, 0x2d, 0x8a, 0x7b, 0xf4, 0xdd, + 0x8c, 0xc6, 0x09, 0xde, 0x05, 0x33, 0x7d, 0x1f, 0xf3, 0xa7, 0xb4, 0x89, 0x5a, 0xa8, 0x5d, 0xf3, + 0xea, 0x1a, 0x3b, 0xf7, 0xa7, 0x14, 0x9f, 0x42, 0x2d, 0xab, 0xdf, 0x34, 0x5a, 0xa8, 0x5d, 0x3f, + 0xda, 0x73, 0xca, 0x9e, 0xee, 0xbc, 0x48, 0xc3, 0xbd, 0xbb, 0x4c, 0xdc, 0x81, 0xed, 0x5c, 0x27, + 0xc7, 0x61, 0x30, 0x0c, 0x47, 0xcd, 0x0d, 0x49, 0xf7, 0x9f, 0xbe, 0xe8, 0x49, 0xbc, 0x3f, 0xb2, + 0x6f, 0x2b, 0xd0, 0xd0, 0x32, 0xe3, 0x88, 0xb3, 0x98, 0x2e, 0x74, 0x66, 0xa5, 0x16, 0x89, 0x5a, + 0x67, 0x86, 0xf5, 0x47, 0xf8, 0x39, 0x98, 0xb9, 0xbe, 0xc5, 0x4d, 0xa3, 0x55, 0x69, 0xd7, 0x8f, + 0xda, 0xe5, 0x52, 0x25, 0xcb, 0xe9, 0x22, 0xc1, 0xab, 0x93, 0xec, 0x3b, 0x2e, 0x56, 0xbb, 0x59, + 0xa8, 0x16, 0xbf, 0x04, 0x50, 0xc4, 0x21, 0x1b, 0xf3, 0x66, 0x55, 0x76, 0xe8, 0x78, 0x05, 0xda, + 0xf4, 0x71, 0xea, 0xd4, 0x67, 0x63, 0xee, 0xd5, 0x48, 0xfa, 0x69, 0x5d, 0x43, 0x2d, 0xc3, 0xf1, + 0x25, 0x34, 0x08, 0x67, 0xf1, 0x6c, 0x4a, 0x85, 0x62, 0x51, 0x73, 0x78, 0xbc, 0x16, 0x8b, 0x2e, + 0x20, 0x89, 0x4c, 0x92, 0x3b, 0x59, 0xc7, 0x60, 0xe6, 0x6f, 0xf1, 0x7d, 0xd8, 0x8a, 0x04, 0xbf, + 0xa6, 0x24, 0x19, 0xb2, 0xd9, 0xf4, 0x8a, 0x0a, 0xd9, 0xef, 0x8a, 0xd7, 0xd0, 0xe8, 0xb9, 0x04, + 0xed, 0xaf, 0x08, 0x1a, 0x1e, 0x8d, 0xb8, 0x48, 0xd6, 0xb0, 0xd3, 0x19, 0x40, 0x36, 0xb5, 0x74, + 0x48, 0x2b, 0xfb, 0x29, 0x97, 0x5a, 0x3c, 0xa2, 0x4a, 0xb1, 0xa1, 0x7e, 0x21, 0xd8, 0x4a, 0x95, + 0x6a, 0x47, 0x5d, 0x42, 0x43, 0x48, 0x24, 0xf5, 0x0b, 0x92, 0x52, 0xfe, 0xd1, 0xd2, 0xe5, 0x22, + 0xfa, 0xa8, 0xfc, 0x63, 0x8a, 0xbb, 0xc3, 0x5f, 0xd4, 0x19, 0x85, 0xea, 0xac, 0xd7, 0x50, 0xcf, + 0x15, 0x5a, 0xc5, 0xeb, 0x1d, 0xa8, 0xaa, 0xff, 0x5a, 0x1b, 0x01, 0xa7, 0xaa, 0x45, 0x44, 0x9c, + 0x81, 0xbc, 0xf1, 0x74, 0xc4, 0xd1, 0x37, 0x03, 0xb6, 0x07, 0x19, 0xa3, 0x5e, 0x61, 0xf8, 0x13, + 0x82, 0x4d, 0xe9, 0x0f, 0xdc, 0x59, 0xc9, 0x44, 0x72, 0xbe, 0xd6, 0xfe, 0x1a, 0x86, 0xb3, 0x0f, + 0x3e, 0x7e, 0xff, 0xf1, 0xd9, 0x78, 0x60, 0xef, 0xe6, 0xb6, 0x68, 0xec, 0xbe, 0xcf, 0x1b, 0xe4, + 0x43, 0x57, 0x1a, 0xbe, 0x8b, 0x3a, 0xf8, 0x06, 0x41, 0x55, 0x75, 0x01, 0xef, 0xaf, 0x36, 0x03, + 0x25, 0xe9, 0x60, 0x9d, 0x81, 0xd9, 0x0f, 0xa5, 0xa6, 0x3d, 0xdb, 0x2e, 0xd3, 0xa4, 0x06, 0xd9, + 0x45, 0x9d, 0x93, 0x1b, 0x04, 0x2d, 0xc2, 0xa7, 0xa5, 0x14, 0x27, 0xff, 0xff, 0xd1, 0xdd, 0x8b, + 0xc5, 0xb2, 0xbd, 0x40, 0xaf, 0x9e, 0xe9, 0xbc, 0x80, 0x4f, 0x7c, 0x16, 0x38, 0x5c, 0x04, 0x6e, + 0x40, 0x99, 0x5c, 0xc5, 0xae, 0xba, 0xf2, 0xa3, 0x30, 0x2e, 0x5e, 0xea, 0x4f, 0x96, 0x91, 0x9f, + 0x08, 0x7d, 0x31, 0x36, 0xce, 0x9e, 0x0e, 0x7a, 0x57, 0x55, 0x59, 0xe0, 0xd1, 0xef, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x5e, 0x28, 0x7b, 0xe6, 0xb7, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go new file mode 100644 index 000000000..949064763 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go @@ -0,0 +1,802 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicemanagement/v1/resources.proto + +/* +Package servicemanagement is a generated protocol buffer package. + +It is generated from these files: + google/api/servicemanagement/v1/resources.proto + google/api/servicemanagement/v1/servicemanager.proto + +It has these top-level messages: + ManagedService + OperationMetadata + Diagnostic + ConfigSource + ConfigFile + ConfigRef + ChangeReport + Rollout + ListServicesRequest + ListServicesResponse + GetServiceRequest + CreateServiceRequest + DeleteServiceRequest + UndeleteServiceRequest + UndeleteServiceResponse + GetServiceConfigRequest + ListServiceConfigsRequest + ListServiceConfigsResponse + CreateServiceConfigRequest + SubmitConfigSourceRequest + SubmitConfigSourceResponse + CreateServiceRolloutRequest + ListServiceRolloutsRequest + ListServiceRolloutsResponse + GetServiceRolloutRequest + EnableServiceRequest + DisableServiceRequest + GenerateConfigReportRequest + GenerateConfigReportResponse +*/ +package servicemanagement + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api2 "google.golang.org/genproto/googleapis/api/configchange" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import _ "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/any" +import _ "google.golang.org/genproto/protobuf/field_mask" +import _ "github.com/golang/protobuf/ptypes/struct" +import google_protobuf9 "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Code describes the status of one operation step. +type OperationMetadata_Status int32 + +const ( + // Unspecifed code. + OperationMetadata_STATUS_UNSPECIFIED OperationMetadata_Status = 0 + // The step has completed without errors. + OperationMetadata_DONE OperationMetadata_Status = 1 + // The step has not started yet. + OperationMetadata_NOT_STARTED OperationMetadata_Status = 2 + // The step is in progress. + OperationMetadata_IN_PROGRESS OperationMetadata_Status = 3 + // The step has completed with errors. + OperationMetadata_FAILED OperationMetadata_Status = 4 + // The step has completed with cancellation. + OperationMetadata_CANCELLED OperationMetadata_Status = 5 +) + +var OperationMetadata_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "DONE", + 2: "NOT_STARTED", + 3: "IN_PROGRESS", + 4: "FAILED", + 5: "CANCELLED", +} +var OperationMetadata_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "DONE": 1, + "NOT_STARTED": 2, + "IN_PROGRESS": 3, + "FAILED": 4, + "CANCELLED": 5, +} + +func (x OperationMetadata_Status) String() string { + return proto.EnumName(OperationMetadata_Status_name, int32(x)) +} +func (OperationMetadata_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// The kind of diagnostic information possible. +type Diagnostic_Kind int32 + +const ( + // Warnings and errors + Diagnostic_WARNING Diagnostic_Kind = 0 + // Only errors + Diagnostic_ERROR Diagnostic_Kind = 1 +) + +var Diagnostic_Kind_name = map[int32]string{ + 0: "WARNING", + 1: "ERROR", +} +var Diagnostic_Kind_value = map[string]int32{ + "WARNING": 0, + "ERROR": 1, +} + +func (x Diagnostic_Kind) String() string { + return proto.EnumName(Diagnostic_Kind_name, int32(x)) +} +func (Diagnostic_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type ConfigFile_FileType int32 + +const ( + // Unknown file type. + ConfigFile_FILE_TYPE_UNSPECIFIED ConfigFile_FileType = 0 + // YAML-specification of service. + ConfigFile_SERVICE_CONFIG_YAML ConfigFile_FileType = 1 + // OpenAPI specification, serialized in JSON. + ConfigFile_OPEN_API_JSON ConfigFile_FileType = 2 + // OpenAPI specification, serialized in YAML. + ConfigFile_OPEN_API_YAML ConfigFile_FileType = 3 + // FileDescriptorSet, generated by protoc. + // + // To generate, use protoc with imports and source info included. + // For an example test.proto file, the following command would put the value + // in a new file named out.pb. + // + // $protoc --include_imports --include_source_info test.proto -o out.pb + ConfigFile_FILE_DESCRIPTOR_SET_PROTO ConfigFile_FileType = 4 +) + +var ConfigFile_FileType_name = map[int32]string{ + 0: "FILE_TYPE_UNSPECIFIED", + 1: "SERVICE_CONFIG_YAML", + 2: "OPEN_API_JSON", + 3: "OPEN_API_YAML", + 4: "FILE_DESCRIPTOR_SET_PROTO", +} +var ConfigFile_FileType_value = map[string]int32{ + "FILE_TYPE_UNSPECIFIED": 0, + "SERVICE_CONFIG_YAML": 1, + "OPEN_API_JSON": 2, + "OPEN_API_YAML": 3, + "FILE_DESCRIPTOR_SET_PROTO": 4, +} + +func (x ConfigFile_FileType) String() string { + return proto.EnumName(ConfigFile_FileType_name, int32(x)) +} +func (ConfigFile_FileType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +// Status of a Rollout. +type Rollout_RolloutStatus int32 + +const ( + // No status specified. + Rollout_ROLLOUT_STATUS_UNSPECIFIED Rollout_RolloutStatus = 0 + // The Rollout is in progress. + Rollout_IN_PROGRESS Rollout_RolloutStatus = 1 + // The Rollout has completed successfully. + Rollout_SUCCESS Rollout_RolloutStatus = 2 + // The Rollout has been cancelled. This can happen if you have overlapping + // Rollout pushes, and the previous ones will be cancelled. + Rollout_CANCELLED Rollout_RolloutStatus = 3 + // The Rollout has failed. It is typically caused by configuration errors. + Rollout_FAILED Rollout_RolloutStatus = 4 + // The Rollout has not started yet and is pending for execution. + Rollout_PENDING Rollout_RolloutStatus = 5 +) + +var Rollout_RolloutStatus_name = map[int32]string{ + 0: "ROLLOUT_STATUS_UNSPECIFIED", + 1: "IN_PROGRESS", + 2: "SUCCESS", + 3: "CANCELLED", + 4: "FAILED", + 5: "PENDING", +} +var Rollout_RolloutStatus_value = map[string]int32{ + "ROLLOUT_STATUS_UNSPECIFIED": 0, + "IN_PROGRESS": 1, + "SUCCESS": 2, + "CANCELLED": 3, + "FAILED": 4, + "PENDING": 5, +} + +func (x Rollout_RolloutStatus) String() string { + return proto.EnumName(Rollout_RolloutStatus_name, int32(x)) +} +func (Rollout_RolloutStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// The full representation of a Service that is managed by +// Google Service Management. +type ManagedService struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // ID of the project that produces and owns this service. + ProducerProjectId string `protobuf:"bytes,3,opt,name=producer_project_id,json=producerProjectId" json:"producer_project_id,omitempty"` +} + +func (m *ManagedService) Reset() { *m = ManagedService{} } +func (m *ManagedService) String() string { return proto.CompactTextString(m) } +func (*ManagedService) ProtoMessage() {} +func (*ManagedService) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ManagedService) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *ManagedService) GetProducerProjectId() string { + if m != nil { + return m.ProducerProjectId + } + return "" +} + +// The metadata associated with a long running operation resource. +type OperationMetadata struct { + // The full name of the resources that this operation is directly + // associated with. + ResourceNames []string `protobuf:"bytes,1,rep,name=resource_names,json=resourceNames" json:"resource_names,omitempty"` + // Detailed status information for each step. The order is undetermined. + Steps []*OperationMetadata_Step `protobuf:"bytes,2,rep,name=steps" json:"steps,omitempty"` + // Percentage of completion of this operation, ranging from 0 to 100. + ProgressPercentage int32 `protobuf:"varint,3,opt,name=progress_percentage,json=progressPercentage" json:"progress_percentage,omitempty"` + // The start time of the operation. + StartTime *google_protobuf9.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime" json:"start_time,omitempty"` +} + +func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } +func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata) ProtoMessage() {} +func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *OperationMetadata) GetResourceNames() []string { + if m != nil { + return m.ResourceNames + } + return nil +} + +func (m *OperationMetadata) GetSteps() []*OperationMetadata_Step { + if m != nil { + return m.Steps + } + return nil +} + +func (m *OperationMetadata) GetProgressPercentage() int32 { + if m != nil { + return m.ProgressPercentage + } + return 0 +} + +func (m *OperationMetadata) GetStartTime() *google_protobuf9.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +// Represents the status of one operation step. +type OperationMetadata_Step struct { + // The short description of the step. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // The status code. + Status OperationMetadata_Status `protobuf:"varint,4,opt,name=status,enum=google.api.servicemanagement.v1.OperationMetadata_Status" json:"status,omitempty"` +} + +func (m *OperationMetadata_Step) Reset() { *m = OperationMetadata_Step{} } +func (m *OperationMetadata_Step) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata_Step) ProtoMessage() {} +func (*OperationMetadata_Step) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +func (m *OperationMetadata_Step) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *OperationMetadata_Step) GetStatus() OperationMetadata_Status { + if m != nil { + return m.Status + } + return OperationMetadata_STATUS_UNSPECIFIED +} + +// Represents a diagnostic message (error or warning) +type Diagnostic struct { + // File name and line number of the error or warning. + Location string `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // The kind of diagnostic information provided. + Kind Diagnostic_Kind `protobuf:"varint,2,opt,name=kind,enum=google.api.servicemanagement.v1.Diagnostic_Kind" json:"kind,omitempty"` + // Message describing the error or warning. + Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Diagnostic) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *Diagnostic) GetKind() Diagnostic_Kind { + if m != nil { + return m.Kind + } + return Diagnostic_WARNING +} + +func (m *Diagnostic) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// Represents a source file which is used to generate the service configuration +// defined by `google.api.Service`. +type ConfigSource struct { + // A unique ID for a specific instance of this message, typically assigned + // by the client for tracking purpose. If empty, the server may choose to + // generate one instead. + Id string `protobuf:"bytes,5,opt,name=id" json:"id,omitempty"` + // Set of source configuration files that are used to generate a service + // configuration (`google.api.Service`). + Files []*ConfigFile `protobuf:"bytes,2,rep,name=files" json:"files,omitempty"` +} + +func (m *ConfigSource) Reset() { *m = ConfigSource{} } +func (m *ConfigSource) String() string { return proto.CompactTextString(m) } +func (*ConfigSource) ProtoMessage() {} +func (*ConfigSource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ConfigSource) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ConfigSource) GetFiles() []*ConfigFile { + if m != nil { + return m.Files + } + return nil +} + +// Generic specification of a source configuration file +type ConfigFile struct { + // The file name of the configuration file (full or relative path). + FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath" json:"file_path,omitempty"` + // The bytes that constitute the file. + FileContents []byte `protobuf:"bytes,3,opt,name=file_contents,json=fileContents,proto3" json:"file_contents,omitempty"` + // The type of configuration file this represents. + FileType ConfigFile_FileType `protobuf:"varint,4,opt,name=file_type,json=fileType,enum=google.api.servicemanagement.v1.ConfigFile_FileType" json:"file_type,omitempty"` +} + +func (m *ConfigFile) Reset() { *m = ConfigFile{} } +func (m *ConfigFile) String() string { return proto.CompactTextString(m) } +func (*ConfigFile) ProtoMessage() {} +func (*ConfigFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ConfigFile) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *ConfigFile) GetFileContents() []byte { + if m != nil { + return m.FileContents + } + return nil +} + +func (m *ConfigFile) GetFileType() ConfigFile_FileType { + if m != nil { + return m.FileType + } + return ConfigFile_FILE_TYPE_UNSPECIFIED +} + +// Represents a service configuration with its name and id. +type ConfigRef struct { + // Resource name of a service config. It must have the following + // format: "services/{service name}/configs/{config id}". + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ConfigRef) Reset() { *m = ConfigRef{} } +func (m *ConfigRef) String() string { return proto.CompactTextString(m) } +func (*ConfigRef) ProtoMessage() {} +func (*ConfigRef) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ConfigRef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Change report associated with a particular service configuration. +// +// It contains a list of ConfigChanges based on the comparison between +// two service configurations. +type ChangeReport struct { + // List of changes between two service configurations. + // The changes will be alphabetically sorted based on the identifier + // of each change. + // A ConfigChange identifier is a dot separated path to the configuration. + // Example: visibility.rules[selector='LibraryService.CreateBook'].restriction + ConfigChanges []*google_api2.ConfigChange `protobuf:"bytes,1,rep,name=config_changes,json=configChanges" json:"config_changes,omitempty"` +} + +func (m *ChangeReport) Reset() { *m = ChangeReport{} } +func (m *ChangeReport) String() string { return proto.CompactTextString(m) } +func (*ChangeReport) ProtoMessage() {} +func (*ChangeReport) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ChangeReport) GetConfigChanges() []*google_api2.ConfigChange { + if m != nil { + return m.ConfigChanges + } + return nil +} + +// A rollout resource that defines how service configuration versions are pushed +// to control plane systems. Typically, you create a new version of the +// service config, and then create a Rollout to push the service config. +type Rollout struct { + // Optional unique identifier of this Rollout. Only lower case letters, digits + // and '-' are allowed. + // + // If not specified by client, the server will generate one. The generated id + // will have the form of , where "date" is the create + // date in ISO 8601 format. "revision number" is a monotonically increasing + // positive number that is reset every day for each service. + // An example of the generated rollout_id is '2016-02-16r1' + RolloutId string `protobuf:"bytes,1,opt,name=rollout_id,json=rolloutId" json:"rollout_id,omitempty"` + // Creation time of the rollout. Readonly. + CreateTime *google_protobuf9.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The user who created the Rollout. Readonly. + CreatedBy string `protobuf:"bytes,3,opt,name=created_by,json=createdBy" json:"created_by,omitempty"` + // The status of this rollout. Readonly. In case of a failed rollout, + // the system will automatically rollback to the current Rollout + // version. Readonly. + Status Rollout_RolloutStatus `protobuf:"varint,4,opt,name=status,enum=google.api.servicemanagement.v1.Rollout_RolloutStatus" json:"status,omitempty"` + // Strategy that defines which versions of service configurations should be + // pushed + // and how they should be used at runtime. + // + // Types that are valid to be assigned to Strategy: + // *Rollout_TrafficPercentStrategy_ + // *Rollout_DeleteServiceStrategy_ + Strategy isRollout_Strategy `protobuf_oneof:"strategy"` + // The name of the service associated with this Rollout. + ServiceName string `protobuf:"bytes,8,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` +} + +func (m *Rollout) Reset() { *m = Rollout{} } +func (m *Rollout) String() string { return proto.CompactTextString(m) } +func (*Rollout) ProtoMessage() {} +func (*Rollout) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isRollout_Strategy interface { + isRollout_Strategy() +} + +type Rollout_TrafficPercentStrategy_ struct { + TrafficPercentStrategy *Rollout_TrafficPercentStrategy `protobuf:"bytes,5,opt,name=traffic_percent_strategy,json=trafficPercentStrategy,oneof"` +} +type Rollout_DeleteServiceStrategy_ struct { + DeleteServiceStrategy *Rollout_DeleteServiceStrategy `protobuf:"bytes,200,opt,name=delete_service_strategy,json=deleteServiceStrategy,oneof"` +} + +func (*Rollout_TrafficPercentStrategy_) isRollout_Strategy() {} +func (*Rollout_DeleteServiceStrategy_) isRollout_Strategy() {} + +func (m *Rollout) GetStrategy() isRollout_Strategy { + if m != nil { + return m.Strategy + } + return nil +} + +func (m *Rollout) GetRolloutId() string { + if m != nil { + return m.RolloutId + } + return "" +} + +func (m *Rollout) GetCreateTime() *google_protobuf9.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Rollout) GetCreatedBy() string { + if m != nil { + return m.CreatedBy + } + return "" +} + +func (m *Rollout) GetStatus() Rollout_RolloutStatus { + if m != nil { + return m.Status + } + return Rollout_ROLLOUT_STATUS_UNSPECIFIED +} + +func (m *Rollout) GetTrafficPercentStrategy() *Rollout_TrafficPercentStrategy { + if x, ok := m.GetStrategy().(*Rollout_TrafficPercentStrategy_); ok { + return x.TrafficPercentStrategy + } + return nil +} + +func (m *Rollout) GetDeleteServiceStrategy() *Rollout_DeleteServiceStrategy { + if x, ok := m.GetStrategy().(*Rollout_DeleteServiceStrategy_); ok { + return x.DeleteServiceStrategy + } + return nil +} + +func (m *Rollout) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Rollout) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Rollout_OneofMarshaler, _Rollout_OneofUnmarshaler, _Rollout_OneofSizer, []interface{}{ + (*Rollout_TrafficPercentStrategy_)(nil), + (*Rollout_DeleteServiceStrategy_)(nil), + } +} + +func _Rollout_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Rollout) + // strategy + switch x := m.Strategy.(type) { + case *Rollout_TrafficPercentStrategy_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrafficPercentStrategy); err != nil { + return err + } + case *Rollout_DeleteServiceStrategy_: + b.EncodeVarint(200<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteServiceStrategy); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Rollout.Strategy has unexpected type %T", x) + } + return nil +} + +func _Rollout_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Rollout) + switch tag { + case 5: // strategy.traffic_percent_strategy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Rollout_TrafficPercentStrategy) + err := b.DecodeMessage(msg) + m.Strategy = &Rollout_TrafficPercentStrategy_{msg} + return true, err + case 200: // strategy.delete_service_strategy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Rollout_DeleteServiceStrategy) + err := b.DecodeMessage(msg) + m.Strategy = &Rollout_DeleteServiceStrategy_{msg} + return true, err + default: + return false, nil + } +} + +func _Rollout_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Rollout) + // strategy + switch x := m.Strategy.(type) { + case *Rollout_TrafficPercentStrategy_: + s := proto.Size(x.TrafficPercentStrategy) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Rollout_DeleteServiceStrategy_: + s := proto.Size(x.DeleteServiceStrategy) + n += proto.SizeVarint(200<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Strategy that specifies how Google Service Control should select +// different +// versions of service configurations based on traffic percentage. +// +// One example of how to gradually rollout a new service configuration using +// this +// strategy: +// Day 1 +// +// Rollout { +// id: "example.googleapis.com/rollout_20160206" +// traffic_percent_strategy { +// percentages: { +// "example.googleapis.com/20160201": 70.00 +// "example.googleapis.com/20160206": 30.00 +// } +// } +// } +// +// Day 2 +// +// Rollout { +// id: "example.googleapis.com/rollout_20160207" +// traffic_percent_strategy: { +// percentages: { +// "example.googleapis.com/20160206": 100.00 +// } +// } +// } +type Rollout_TrafficPercentStrategy struct { + // Maps service configuration IDs to their corresponding traffic percentage. + // Key is the service configuration ID, Value is the traffic percentage + // which must be greater than 0.0 and the sum must equal to 100.0. + Percentages map[string]float64 `protobuf:"bytes,1,rep,name=percentages" json:"percentages,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` +} + +func (m *Rollout_TrafficPercentStrategy) Reset() { *m = Rollout_TrafficPercentStrategy{} } +func (m *Rollout_TrafficPercentStrategy) String() string { return proto.CompactTextString(m) } +func (*Rollout_TrafficPercentStrategy) ProtoMessage() {} +func (*Rollout_TrafficPercentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 0} +} + +func (m *Rollout_TrafficPercentStrategy) GetPercentages() map[string]float64 { + if m != nil { + return m.Percentages + } + return nil +} + +// Strategy used to delete a service. This strategy is a placeholder only +// used by the system generated rollout to delete a service. +type Rollout_DeleteServiceStrategy struct { +} + +func (m *Rollout_DeleteServiceStrategy) Reset() { *m = Rollout_DeleteServiceStrategy{} } +func (m *Rollout_DeleteServiceStrategy) String() string { return proto.CompactTextString(m) } +func (*Rollout_DeleteServiceStrategy) ProtoMessage() {} +func (*Rollout_DeleteServiceStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 1} +} + +func init() { + proto.RegisterType((*ManagedService)(nil), "google.api.servicemanagement.v1.ManagedService") + proto.RegisterType((*OperationMetadata)(nil), "google.api.servicemanagement.v1.OperationMetadata") + proto.RegisterType((*OperationMetadata_Step)(nil), "google.api.servicemanagement.v1.OperationMetadata.Step") + proto.RegisterType((*Diagnostic)(nil), "google.api.servicemanagement.v1.Diagnostic") + proto.RegisterType((*ConfigSource)(nil), "google.api.servicemanagement.v1.ConfigSource") + proto.RegisterType((*ConfigFile)(nil), "google.api.servicemanagement.v1.ConfigFile") + proto.RegisterType((*ConfigRef)(nil), "google.api.servicemanagement.v1.ConfigRef") + proto.RegisterType((*ChangeReport)(nil), "google.api.servicemanagement.v1.ChangeReport") + proto.RegisterType((*Rollout)(nil), "google.api.servicemanagement.v1.Rollout") + proto.RegisterType((*Rollout_TrafficPercentStrategy)(nil), "google.api.servicemanagement.v1.Rollout.TrafficPercentStrategy") + proto.RegisterType((*Rollout_DeleteServiceStrategy)(nil), "google.api.servicemanagement.v1.Rollout.DeleteServiceStrategy") + proto.RegisterEnum("google.api.servicemanagement.v1.OperationMetadata_Status", OperationMetadata_Status_name, OperationMetadata_Status_value) + proto.RegisterEnum("google.api.servicemanagement.v1.Diagnostic_Kind", Diagnostic_Kind_name, Diagnostic_Kind_value) + proto.RegisterEnum("google.api.servicemanagement.v1.ConfigFile_FileType", ConfigFile_FileType_name, ConfigFile_FileType_value) + proto.RegisterEnum("google.api.servicemanagement.v1.Rollout_RolloutStatus", Rollout_RolloutStatus_name, Rollout_RolloutStatus_value) +} + +func init() { proto.RegisterFile("google/api/servicemanagement/v1/resources.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x8e, 0xdb, 0x44, + 0x10, 0xaf, 0xf3, 0xe7, 0xee, 0x32, 0xb9, 0x0b, 0xee, 0x96, 0xf6, 0xd2, 0xd0, 0x3f, 0xc1, 0x15, + 0xd2, 0x49, 0x48, 0x0e, 0x0d, 0x08, 0x28, 0x95, 0x5a, 0xe5, 0x12, 0xdf, 0x61, 0xc8, 0xd9, 0xee, + 0xda, 0x07, 0x2a, 0x5f, 0xac, 0xad, 0xbd, 0x71, 0x4d, 0x13, 0xdb, 0xb2, 0x37, 0x27, 0x45, 0xfd, + 0xc8, 0x0b, 0xf0, 0x0c, 0x7c, 0x81, 0x47, 0xe1, 0x03, 0x4f, 0x00, 0x2f, 0x83, 0xbc, 0x5e, 0xdf, + 0xe5, 0xcf, 0xa1, 0x14, 0xf8, 0x92, 0xec, 0xfc, 0x66, 0xf6, 0x37, 0xb3, 0xb3, 0x33, 0xb3, 0x86, + 0x5e, 0x10, 0xc7, 0xc1, 0x94, 0xf6, 0x48, 0x12, 0xf6, 0x32, 0x9a, 0x5e, 0x84, 0x1e, 0x9d, 0x91, + 0x88, 0x04, 0x74, 0x46, 0x23, 0xd6, 0xbb, 0x78, 0xdc, 0x4b, 0x69, 0x16, 0xcf, 0x53, 0x8f, 0x66, + 0x6a, 0x92, 0xc6, 0x2c, 0x46, 0x0f, 0x8b, 0x0d, 0x2a, 0x49, 0x42, 0x75, 0x63, 0x83, 0x7a, 0xf1, + 0xb8, 0x73, 0x6f, 0x89, 0x91, 0x44, 0x51, 0xcc, 0x08, 0x0b, 0xe3, 0x48, 0x6c, 0xef, 0x3c, 0x58, + 0xd2, 0x7a, 0x71, 0x34, 0x09, 0x03, 0xd7, 0x7b, 0x4d, 0xa2, 0x80, 0x0a, 0x7d, 0x7b, 0x33, 0x1e, + 0xa1, 0x79, 0x24, 0x34, 0xd3, 0x38, 0x0a, 0xd2, 0x79, 0x14, 0x85, 0x51, 0xd0, 0x8b, 0x13, 0x9a, + 0xae, 0xd0, 0xdf, 0x15, 0x46, 0x5c, 0x7a, 0x35, 0x9f, 0xf4, 0x48, 0xb4, 0x10, 0xaa, 0xee, 0xba, + 0x6a, 0x12, 0xd2, 0xa9, 0xef, 0xce, 0x48, 0xf6, 0x46, 0x58, 0xdc, 0x5b, 0xb7, 0xc8, 0x58, 0x3a, + 0xf7, 0x98, 0xd0, 0x3e, 0x5c, 0xd7, 0xb2, 0x70, 0x46, 0x33, 0x46, 0x66, 0x89, 0x30, 0x38, 0x14, + 0x06, 0x69, 0xe2, 0xf5, 0x32, 0x46, 0xd8, 0x5c, 0x04, 0xa5, 0x78, 0xd0, 0x3a, 0xe3, 0x29, 0xf2, + 0xed, 0xe2, 0x44, 0xe8, 0x43, 0xd8, 0x17, 0x87, 0x73, 0x23, 0x32, 0xa3, 0xed, 0x4a, 0x57, 0x3a, + 0x6a, 0xe0, 0xa6, 0xc0, 0x0c, 0x32, 0xa3, 0x48, 0x85, 0x5b, 0x49, 0x1a, 0xfb, 0x73, 0x8f, 0xa6, + 0x6e, 0x92, 0xc6, 0x3f, 0x52, 0x8f, 0xb9, 0xa1, 0xdf, 0xae, 0x72, 0xcb, 0x9b, 0xa5, 0xca, 0x2a, + 0x34, 0xba, 0xaf, 0xfc, 0x55, 0x85, 0x9b, 0x66, 0x99, 0x8e, 0x33, 0xca, 0x88, 0x4f, 0x18, 0x41, + 0x1f, 0x41, 0xab, 0xbc, 0x40, 0xee, 0x29, 0x6b, 0x4b, 0xdd, 0xea, 0x51, 0x03, 0x1f, 0x94, 0x68, + 0xee, 0x2b, 0x43, 0x67, 0x50, 0xcf, 0x18, 0x4d, 0xb2, 0x76, 0xa5, 0x5b, 0x3d, 0x6a, 0xf6, 0xbf, + 0x50, 0xb7, 0x5c, 0xb2, 0xba, 0xe1, 0x49, 0xb5, 0x19, 0x4d, 0x70, 0xc1, 0x82, 0x7a, 0x3c, 0xf6, + 0x20, 0xa5, 0x59, 0xe6, 0x26, 0x34, 0xf5, 0x68, 0xc4, 0x48, 0x40, 0x79, 0xec, 0x75, 0x8c, 0x4a, + 0x95, 0x75, 0xa9, 0x41, 0x4f, 0x00, 0x32, 0x46, 0x52, 0xe6, 0xe6, 0x39, 0x6d, 0xd7, 0xba, 0xd2, + 0x51, 0xb3, 0xdf, 0x29, 0x83, 0x28, 0x13, 0xae, 0x3a, 0x65, 0xc2, 0x71, 0x83, 0x5b, 0xe7, 0x72, + 0xe7, 0x2d, 0xd4, 0x72, 0xd7, 0xa8, 0x0b, 0x4d, 0x9f, 0x66, 0x5e, 0x1a, 0x26, 0x79, 0x58, 0x65, + 0x46, 0x97, 0x20, 0xf4, 0x02, 0x76, 0x8a, 0x6b, 0xe1, 0x0e, 0x5a, 0xfd, 0x27, 0xff, 0xe9, 0x94, + 0x39, 0x01, 0x16, 0x44, 0x4a, 0x00, 0x3b, 0x05, 0x82, 0xee, 0x00, 0xb2, 0x9d, 0x81, 0x73, 0x6e, + 0xbb, 0xe7, 0x86, 0x6d, 0x69, 0x43, 0xfd, 0x44, 0xd7, 0x46, 0xf2, 0x0d, 0xb4, 0x07, 0xb5, 0x91, + 0x69, 0x68, 0xb2, 0x84, 0xde, 0x83, 0xa6, 0x61, 0x3a, 0xae, 0xed, 0x0c, 0xb0, 0xa3, 0x8d, 0xe4, + 0x4a, 0x0e, 0xe8, 0x86, 0x6b, 0x61, 0xf3, 0x14, 0x6b, 0xb6, 0x2d, 0x57, 0x11, 0xc0, 0xce, 0xc9, + 0x40, 0x1f, 0x6b, 0x23, 0xb9, 0x86, 0x0e, 0xa0, 0x31, 0x1c, 0x18, 0x43, 0x6d, 0x9c, 0x8b, 0x75, + 0xe5, 0x37, 0x09, 0x60, 0x14, 0x92, 0x20, 0x8a, 0x33, 0x16, 0x7a, 0xa8, 0x03, 0x7b, 0xd3, 0xd8, + 0xe3, 0xa1, 0xb5, 0x25, 0x7e, 0xd2, 0x4b, 0x19, 0x8d, 0xa0, 0xf6, 0x26, 0x8c, 0x7c, 0x9e, 0x81, + 0x56, 0xff, 0x93, 0xad, 0x87, 0xbc, 0xa2, 0x55, 0xbf, 0x0d, 0x23, 0x1f, 0xf3, 0xdd, 0xa8, 0x0d, + 0xbb, 0x33, 0x9a, 0x65, 0xe5, 0xb5, 0x35, 0x70, 0x29, 0x2a, 0x0f, 0xa0, 0x96, 0xdb, 0xa1, 0x26, + 0xec, 0x7e, 0x3f, 0xc0, 0x86, 0x6e, 0x9c, 0xca, 0x37, 0x50, 0x03, 0xea, 0x1a, 0xc6, 0x26, 0x96, + 0x25, 0x85, 0xc0, 0xfe, 0x90, 0x37, 0xb6, 0xcd, 0x0b, 0x0c, 0xb5, 0xa0, 0x12, 0xfa, 0xed, 0x3a, + 0x27, 0xa9, 0x84, 0x3e, 0x1a, 0x40, 0x7d, 0x12, 0x4e, 0x69, 0x59, 0x6b, 0x1f, 0x6f, 0x0d, 0xb0, + 0x60, 0x3b, 0x09, 0xa7, 0x14, 0x17, 0x3b, 0x95, 0x5f, 0x2b, 0x00, 0x57, 0x28, 0xfa, 0x00, 0x1a, + 0x39, 0xee, 0x26, 0x84, 0xbd, 0x2e, 0xd3, 0x91, 0x03, 0x16, 0x61, 0xaf, 0xd1, 0x23, 0x38, 0xe0, + 0x4a, 0x2f, 0x8e, 0x18, 0x8d, 0x58, 0xc6, 0x8f, 0xb3, 0x8f, 0xf7, 0x73, 0x70, 0x28, 0x30, 0xf4, + 0x42, 0x30, 0xb0, 0x45, 0x42, 0x45, 0x75, 0x7c, 0xf6, 0x2f, 0xe2, 0x52, 0xf3, 0x1f, 0x67, 0x91, + 0xd0, 0xc2, 0x6f, 0xbe, 0x52, 0x7e, 0x92, 0x60, 0xaf, 0x84, 0xd1, 0x5d, 0xb8, 0x7d, 0xa2, 0x8f, + 0x35, 0xd7, 0x79, 0x69, 0x69, 0x6b, 0x05, 0x72, 0x08, 0xb7, 0x6c, 0x0d, 0x7f, 0xa7, 0x0f, 0x35, + 0x77, 0x68, 0x1a, 0x27, 0xfa, 0xa9, 0xfb, 0x72, 0x70, 0x36, 0x96, 0x25, 0x74, 0x13, 0x0e, 0x4c, + 0x4b, 0x33, 0xdc, 0x81, 0xa5, 0xbb, 0xdf, 0xd8, 0xa6, 0x21, 0x57, 0x56, 0x20, 0x6e, 0x55, 0x45, + 0xf7, 0xe1, 0x2e, 0x67, 0x1e, 0x69, 0xf6, 0x10, 0xeb, 0x96, 0x63, 0x62, 0xd7, 0xd6, 0x9c, 0xbc, + 0xaa, 0x1c, 0x53, 0xae, 0x29, 0x0f, 0xa1, 0x51, 0x84, 0x89, 0xe9, 0x04, 0x21, 0xa8, 0xf1, 0x69, + 0x53, 0xa4, 0x88, 0xaf, 0x15, 0x13, 0xf6, 0x87, 0x7c, 0xfe, 0x62, 0x9a, 0xc4, 0x29, 0x43, 0xcf, + 0xa1, 0xb5, 0x32, 0x96, 0x8b, 0x81, 0xd1, 0xec, 0xb7, 0x97, 0xd3, 0x51, 0x50, 0x8a, 0x7d, 0x07, + 0xde, 0x92, 0x94, 0x29, 0x7f, 0xee, 0xc0, 0x2e, 0x8e, 0xa7, 0xd3, 0x78, 0xce, 0xd0, 0x7d, 0x80, + 0xb4, 0x58, 0xe6, 0xa3, 0xab, 0x70, 0xdb, 0x10, 0x88, 0xee, 0xa3, 0xa7, 0xd0, 0xf4, 0x52, 0x4a, + 0x18, 0x2d, 0xda, 0xbe, 0xb2, 0xb5, 0xed, 0xa1, 0x30, 0xcf, 0x81, 0x9c, 0xbb, 0x90, 0x7c, 0xf7, + 0xd5, 0x42, 0xd4, 0x68, 0x43, 0x20, 0xc7, 0x0b, 0x64, 0xac, 0x35, 0xfb, 0xe7, 0x5b, 0xaf, 0x53, + 0x04, 0x5d, 0xfe, 0xaf, 0x76, 0x3a, 0x7a, 0x0b, 0x6d, 0x96, 0x92, 0xc9, 0x24, 0xf4, 0xca, 0x89, + 0xe6, 0x66, 0x2c, 0x25, 0x8c, 0x06, 0x0b, 0x5e, 0xdb, 0xcd, 0xfe, 0xf3, 0x77, 0xf6, 0xe0, 0x14, + 0x44, 0x62, 0xfe, 0xd9, 0x82, 0xe6, 0xeb, 0x1b, 0xf8, 0x0e, 0xbb, 0x56, 0x83, 0x16, 0x70, 0xe8, + 0xd3, 0x29, 0x65, 0xd4, 0x2d, 0x5f, 0x8d, 0x4b, 0xdf, 0xbf, 0x4b, 0xdc, 0xf9, 0xb3, 0x77, 0x76, + 0x3e, 0xe2, 0x44, 0xe2, 0x21, 0x5a, 0xf2, 0x7d, 0xdb, 0xbf, 0x4e, 0xb1, 0xf1, 0x52, 0xed, 0x6d, + 0xbc, 0x54, 0x9d, 0x3f, 0x24, 0xb8, 0x73, 0xfd, 0x91, 0x50, 0x0a, 0xcd, 0xab, 0xf9, 0x5f, 0x96, + 0x92, 0xf5, 0x3f, 0x13, 0xa5, 0x5e, 0x3d, 0x1c, 0x99, 0x16, 0xb1, 0x74, 0x81, 0x97, 0x9d, 0x74, + 0x9e, 0x81, 0xbc, 0x6e, 0x80, 0x64, 0xa8, 0xbe, 0xa1, 0x0b, 0x51, 0x81, 0xf9, 0x12, 0xbd, 0x0f, + 0xf5, 0x0b, 0x32, 0x9d, 0x17, 0x55, 0x27, 0xe1, 0x42, 0xf8, 0xaa, 0xf2, 0xa5, 0xd4, 0x39, 0x84, + 0xdb, 0xd7, 0xe6, 0x48, 0x99, 0xc3, 0xc1, 0x4a, 0x6d, 0xa0, 0x07, 0xd0, 0xc1, 0xe6, 0x78, 0x6c, + 0x9e, 0xf3, 0xa9, 0xbe, 0x39, 0xfb, 0xd7, 0x06, 0xbc, 0x94, 0x8f, 0x4c, 0xfb, 0x7c, 0x38, 0xcc, + 0x85, 0xca, 0xea, 0x84, 0x5f, 0x1d, 0xfe, 0x4d, 0xd8, 0xb5, 0x34, 0x63, 0x94, 0x8f, 0xd6, 0xfa, + 0x31, 0xc0, 0x5e, 0x79, 0xdb, 0xc7, 0x3f, 0x4b, 0xf0, 0xc8, 0x8b, 0x67, 0xdb, 0x12, 0x78, 0xdc, + 0xc2, 0xe5, 0x57, 0x9b, 0x95, 0x77, 0x91, 0x25, 0xfd, 0x60, 0x89, 0x2d, 0x41, 0x3c, 0x25, 0x51, + 0xa0, 0xc6, 0x69, 0xd0, 0x0b, 0x68, 0xc4, 0x7b, 0x4c, 0x7c, 0x02, 0x92, 0x24, 0xcc, 0xfe, 0xf1, + 0x33, 0xf0, 0xe9, 0x06, 0xf8, 0x4b, 0xa5, 0x76, 0x3a, 0xb0, 0xcf, 0x5e, 0xed, 0x70, 0x8e, 0x4f, + 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x64, 0x4d, 0x1e, 0x49, 0x0a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go new file mode 100644 index 000000000..e274f75c1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go @@ -0,0 +1,1514 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/servicemanagement/v1/servicemanager.proto + +package servicemanagement + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_api21 "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import _ "google.golang.org/genproto/protobuf/field_mask" +import _ "github.com/golang/protobuf/ptypes/struct" +import _ "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type GetServiceConfigRequest_ConfigView int32 + +const ( + // Server response includes all fields except SourceInfo. + GetServiceConfigRequest_BASIC GetServiceConfigRequest_ConfigView = 0 + // Server response includes all fields including SourceInfo. + // SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' + // and are only available for configs created using the + // SubmitConfigSource method. + GetServiceConfigRequest_FULL GetServiceConfigRequest_ConfigView = 1 +) + +var GetServiceConfigRequest_ConfigView_name = map[int32]string{ + 0: "BASIC", + 1: "FULL", +} +var GetServiceConfigRequest_ConfigView_value = map[string]int32{ + "BASIC": 0, + "FULL": 1, +} + +func (x GetServiceConfigRequest_ConfigView) String() string { + return proto.EnumName(GetServiceConfigRequest_ConfigView_name, int32(x)) +} +func (GetServiceConfigRequest_ConfigView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor1, []int{7, 0} +} + +// Request message for `ListServices` method. +type ListServicesRequest struct { + // Include services produced by the specified project. + ProducerProjectId string `protobuf:"bytes,1,opt,name=producer_project_id,json=producerProjectId" json:"producer_project_id,omitempty"` + // Requested size of the next page of data. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Token identifying which result to start with; returned by a previous list + // call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Include services consumed by the specified consumer. + // + // The Google Service Management implementation accepts the following + // forms: + // - project: + ConsumerId string `protobuf:"bytes,7,opt,name=consumer_id,json=consumerId" json:"consumer_id,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (m *ListServicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *ListServicesRequest) GetProducerProjectId() string { + if m != nil { + return m.ProducerProjectId + } + return "" +} + +func (m *ListServicesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServicesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListServicesRequest) GetConsumerId() string { + if m != nil { + return m.ConsumerId + } + return "" +} + +// Response message for `ListServices` method. +type ListServicesResponse struct { + // The returned services will only have the name field set. + Services []*ManagedService `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` + // Token that can be passed to `ListServices` to resume a paginated query. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (m *ListServicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *ListServicesResponse) GetServices() []*ManagedService { + if m != nil { + return m.Services + } + return nil +} + +func (m *ListServicesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `GetService` method. +type GetServiceRequest struct { + // The name of the service. See the `ServiceManager` overview for naming + // requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (m *GetServiceRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *GetServiceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +// Request message for CreateService method. +type CreateServiceRequest struct { + // Initial values for the service resource. + Service *ManagedService `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (m *CreateServiceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *CreateServiceRequest) GetService() *ManagedService { + if m != nil { + return m.Service + } + return nil +} + +// Request message for DeleteService method. +type DeleteServiceRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` +} + +func (m *DeleteServiceRequest) Reset() { *m = DeleteServiceRequest{} } +func (m *DeleteServiceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceRequest) ProtoMessage() {} +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *DeleteServiceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +// Request message for UndeleteService method. +type UndeleteServiceRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` +} + +func (m *UndeleteServiceRequest) Reset() { *m = UndeleteServiceRequest{} } +func (m *UndeleteServiceRequest) String() string { return proto.CompactTextString(m) } +func (*UndeleteServiceRequest) ProtoMessage() {} +func (*UndeleteServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *UndeleteServiceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +// Response message for UndeleteService method. +type UndeleteServiceResponse struct { + // Revived service resource. + Service *ManagedService `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *UndeleteServiceResponse) Reset() { *m = UndeleteServiceResponse{} } +func (m *UndeleteServiceResponse) String() string { return proto.CompactTextString(m) } +func (*UndeleteServiceResponse) ProtoMessage() {} +func (*UndeleteServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *UndeleteServiceResponse) GetService() *ManagedService { + if m != nil { + return m.Service + } + return nil +} + +// Request message for GetServiceConfig method. +type GetServiceConfigRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The id of the service configuration resource. + ConfigId string `protobuf:"bytes,2,opt,name=config_id,json=configId" json:"config_id,omitempty"` + // Specifies which parts of the Service Config should be returned in the + // response. + View GetServiceConfigRequest_ConfigView `protobuf:"varint,3,opt,name=view,enum=google.api.servicemanagement.v1.GetServiceConfigRequest_ConfigView" json:"view,omitempty"` +} + +func (m *GetServiceConfigRequest) Reset() { *m = GetServiceConfigRequest{} } +func (m *GetServiceConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceConfigRequest) ProtoMessage() {} +func (*GetServiceConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *GetServiceConfigRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *GetServiceConfigRequest) GetConfigId() string { + if m != nil { + return m.ConfigId + } + return "" +} + +func (m *GetServiceConfigRequest) GetView() GetServiceConfigRequest_ConfigView { + if m != nil { + return m.View + } + return GetServiceConfigRequest_BASIC +} + +// Request message for ListServiceConfigs method. +type ListServiceConfigsRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The token of the page to retrieve. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The max number of items to include in the response list. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListServiceConfigsRequest) Reset() { *m = ListServiceConfigsRequest{} } +func (m *ListServiceConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceConfigsRequest) ProtoMessage() {} +func (*ListServiceConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *ListServiceConfigsRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *ListServiceConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListServiceConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for ListServiceConfigs method. +type ListServiceConfigsResponse struct { + // The list of service configuration resources. + ServiceConfigs []*google_api21.Service `protobuf:"bytes,1,rep,name=service_configs,json=serviceConfigs" json:"service_configs,omitempty"` + // The token of the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListServiceConfigsResponse) Reset() { *m = ListServiceConfigsResponse{} } +func (m *ListServiceConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceConfigsResponse) ProtoMessage() {} +func (*ListServiceConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ListServiceConfigsResponse) GetServiceConfigs() []*google_api21.Service { + if m != nil { + return m.ServiceConfigs + } + return nil +} + +func (m *ListServiceConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for CreateServiceConfig method. +type CreateServiceConfigRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The service configuration resource. + ServiceConfig *google_api21.Service `protobuf:"bytes,2,opt,name=service_config,json=serviceConfig" json:"service_config,omitempty"` +} + +func (m *CreateServiceConfigRequest) Reset() { *m = CreateServiceConfigRequest{} } +func (m *CreateServiceConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceConfigRequest) ProtoMessage() {} +func (*CreateServiceConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *CreateServiceConfigRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *CreateServiceConfigRequest) GetServiceConfig() *google_api21.Service { + if m != nil { + return m.ServiceConfig + } + return nil +} + +// Request message for SubmitConfigSource method. +type SubmitConfigSourceRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The source configuration for the service. + ConfigSource *ConfigSource `protobuf:"bytes,2,opt,name=config_source,json=configSource" json:"config_source,omitempty"` + // Optional. If set, this will result in the generation of a + // `google.api.Service` configuration based on the `ConfigSource` provided, + // but the generated config and the sources will NOT be persisted. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly" json:"validate_only,omitempty"` +} + +func (m *SubmitConfigSourceRequest) Reset() { *m = SubmitConfigSourceRequest{} } +func (m *SubmitConfigSourceRequest) String() string { return proto.CompactTextString(m) } +func (*SubmitConfigSourceRequest) ProtoMessage() {} +func (*SubmitConfigSourceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *SubmitConfigSourceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *SubmitConfigSourceRequest) GetConfigSource() *ConfigSource { + if m != nil { + return m.ConfigSource + } + return nil +} + +func (m *SubmitConfigSourceRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// Response message for SubmitConfigSource method. +type SubmitConfigSourceResponse struct { + // The generated service configuration. + ServiceConfig *google_api21.Service `protobuf:"bytes,1,opt,name=service_config,json=serviceConfig" json:"service_config,omitempty"` +} + +func (m *SubmitConfigSourceResponse) Reset() { *m = SubmitConfigSourceResponse{} } +func (m *SubmitConfigSourceResponse) String() string { return proto.CompactTextString(m) } +func (*SubmitConfigSourceResponse) ProtoMessage() {} +func (*SubmitConfigSourceResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *SubmitConfigSourceResponse) GetServiceConfig() *google_api21.Service { + if m != nil { + return m.ServiceConfig + } + return nil +} + +// Request message for 'CreateServiceRollout' +type CreateServiceRolloutRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The rollout resource. The `service_name` field is output only. + Rollout *Rollout `protobuf:"bytes,2,opt,name=rollout" json:"rollout,omitempty"` +} + +func (m *CreateServiceRolloutRequest) Reset() { *m = CreateServiceRolloutRequest{} } +func (m *CreateServiceRolloutRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceRolloutRequest) ProtoMessage() {} +func (*CreateServiceRolloutRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *CreateServiceRolloutRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *CreateServiceRolloutRequest) GetRollout() *Rollout { + if m != nil { + return m.Rollout + } + return nil +} + +// Request message for 'ListServiceRollouts' +type ListServiceRolloutsRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The token of the page to retrieve. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The max number of items to include in the response list. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Use `filter` to return subset of rollouts. + // The following filters are supported: + // -- To limit the results to only those in + // [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', + // use filter='status=SUCCESS' + // -- To limit the results to those in + // [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' + // or 'FAILED', use filter='status=CANCELLED OR status=FAILED' + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListServiceRolloutsRequest) Reset() { *m = ListServiceRolloutsRequest{} } +func (m *ListServiceRolloutsRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceRolloutsRequest) ProtoMessage() {} +func (*ListServiceRolloutsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *ListServiceRolloutsRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *ListServiceRolloutsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListServiceRolloutsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServiceRolloutsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Response message for ListServiceRollouts method. +type ListServiceRolloutsResponse struct { + // The list of rollout resources. + Rollouts []*Rollout `protobuf:"bytes,1,rep,name=rollouts" json:"rollouts,omitempty"` + // The token of the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListServiceRolloutsResponse) Reset() { *m = ListServiceRolloutsResponse{} } +func (m *ListServiceRolloutsResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceRolloutsResponse) ProtoMessage() {} +func (*ListServiceRolloutsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *ListServiceRolloutsResponse) GetRollouts() []*Rollout { + if m != nil { + return m.Rollouts + } + return nil +} + +func (m *ListServiceRolloutsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for GetServiceRollout method. +type GetServiceRolloutRequest struct { + // The name of the service. See the [overview](/service-management/overview) + // for naming requirements. For example: `example.googleapis.com`. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The id of the rollout resource. + RolloutId string `protobuf:"bytes,2,opt,name=rollout_id,json=rolloutId" json:"rollout_id,omitempty"` +} + +func (m *GetServiceRolloutRequest) Reset() { *m = GetServiceRolloutRequest{} } +func (m *GetServiceRolloutRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceRolloutRequest) ProtoMessage() {} +func (*GetServiceRolloutRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *GetServiceRolloutRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *GetServiceRolloutRequest) GetRolloutId() string { + if m != nil { + return m.RolloutId + } + return "" +} + +// Request message for EnableService method. +type EnableServiceRequest struct { + // Name of the service to enable. Specifying an unknown service name will + // cause the request to fail. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The identity of consumer resource which service enablement will be + // applied to. + // + // The Google Service Management implementation accepts the following + // forms: + // - "project:" + // + // Note: this is made compatible with + // google.api.servicecontrol.v1.Operation.consumer_id. + ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId" json:"consumer_id,omitempty"` +} + +func (m *EnableServiceRequest) Reset() { *m = EnableServiceRequest{} } +func (m *EnableServiceRequest) String() string { return proto.CompactTextString(m) } +func (*EnableServiceRequest) ProtoMessage() {} +func (*EnableServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *EnableServiceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *EnableServiceRequest) GetConsumerId() string { + if m != nil { + return m.ConsumerId + } + return "" +} + +// Request message for DisableService method. +type DisableServiceRequest struct { + // Name of the service to disable. Specifying an unknown service name + // will cause the request to fail. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The identity of consumer resource which service disablement will be + // applied to. + // + // The Google Service Management implementation accepts the following + // forms: + // - "project:" + // + // Note: this is made compatible with + // google.api.servicecontrol.v1.Operation.consumer_id. + ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId" json:"consumer_id,omitempty"` +} + +func (m *DisableServiceRequest) Reset() { *m = DisableServiceRequest{} } +func (m *DisableServiceRequest) String() string { return proto.CompactTextString(m) } +func (*DisableServiceRequest) ProtoMessage() {} +func (*DisableServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } + +func (m *DisableServiceRequest) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *DisableServiceRequest) GetConsumerId() string { + if m != nil { + return m.ConsumerId + } + return "" +} + +// Request message for GenerateConfigReport method. +type GenerateConfigReportRequest struct { + // Service configuration for which we want to generate the report. + // For this version of API, the supported types are + // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef], + // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource], + // and [google.api.Service][google.api.Service] + NewConfig *google_protobuf1.Any `protobuf:"bytes,1,opt,name=new_config,json=newConfig" json:"new_config,omitempty"` + // Service configuration against which the comparison will be done. + // For this version of API, the supported types are + // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef], + // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource], + // and [google.api.Service][google.api.Service] + OldConfig *google_protobuf1.Any `protobuf:"bytes,2,opt,name=old_config,json=oldConfig" json:"old_config,omitempty"` +} + +func (m *GenerateConfigReportRequest) Reset() { *m = GenerateConfigReportRequest{} } +func (m *GenerateConfigReportRequest) String() string { return proto.CompactTextString(m) } +func (*GenerateConfigReportRequest) ProtoMessage() {} +func (*GenerateConfigReportRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } + +func (m *GenerateConfigReportRequest) GetNewConfig() *google_protobuf1.Any { + if m != nil { + return m.NewConfig + } + return nil +} + +func (m *GenerateConfigReportRequest) GetOldConfig() *google_protobuf1.Any { + if m != nil { + return m.OldConfig + } + return nil +} + +// Response message for GenerateConfigReport method. +type GenerateConfigReportResponse struct { + // Name of the service this report belongs to. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // ID of the service configuration this report belongs to. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // list of ChangeReport, each corresponding to comparison between two + // service configurations. + ChangeReports []*ChangeReport `protobuf:"bytes,3,rep,name=change_reports,json=changeReports" json:"change_reports,omitempty"` + // Errors / Linter warnings associated with the service definition this + // report + // belongs to. + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics" json:"diagnostics,omitempty"` +} + +func (m *GenerateConfigReportResponse) Reset() { *m = GenerateConfigReportResponse{} } +func (m *GenerateConfigReportResponse) String() string { return proto.CompactTextString(m) } +func (*GenerateConfigReportResponse) ProtoMessage() {} +func (*GenerateConfigReportResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{20} } + +func (m *GenerateConfigReportResponse) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *GenerateConfigReportResponse) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *GenerateConfigReportResponse) GetChangeReports() []*ChangeReport { + if m != nil { + return m.ChangeReports + } + return nil +} + +func (m *GenerateConfigReportResponse) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterType((*ListServicesRequest)(nil), "google.api.servicemanagement.v1.ListServicesRequest") + proto.RegisterType((*ListServicesResponse)(nil), "google.api.servicemanagement.v1.ListServicesResponse") + proto.RegisterType((*GetServiceRequest)(nil), "google.api.servicemanagement.v1.GetServiceRequest") + proto.RegisterType((*CreateServiceRequest)(nil), "google.api.servicemanagement.v1.CreateServiceRequest") + proto.RegisterType((*DeleteServiceRequest)(nil), "google.api.servicemanagement.v1.DeleteServiceRequest") + proto.RegisterType((*UndeleteServiceRequest)(nil), "google.api.servicemanagement.v1.UndeleteServiceRequest") + proto.RegisterType((*UndeleteServiceResponse)(nil), "google.api.servicemanagement.v1.UndeleteServiceResponse") + proto.RegisterType((*GetServiceConfigRequest)(nil), "google.api.servicemanagement.v1.GetServiceConfigRequest") + proto.RegisterType((*ListServiceConfigsRequest)(nil), "google.api.servicemanagement.v1.ListServiceConfigsRequest") + proto.RegisterType((*ListServiceConfigsResponse)(nil), "google.api.servicemanagement.v1.ListServiceConfigsResponse") + proto.RegisterType((*CreateServiceConfigRequest)(nil), "google.api.servicemanagement.v1.CreateServiceConfigRequest") + proto.RegisterType((*SubmitConfigSourceRequest)(nil), "google.api.servicemanagement.v1.SubmitConfigSourceRequest") + proto.RegisterType((*SubmitConfigSourceResponse)(nil), "google.api.servicemanagement.v1.SubmitConfigSourceResponse") + proto.RegisterType((*CreateServiceRolloutRequest)(nil), "google.api.servicemanagement.v1.CreateServiceRolloutRequest") + proto.RegisterType((*ListServiceRolloutsRequest)(nil), "google.api.servicemanagement.v1.ListServiceRolloutsRequest") + proto.RegisterType((*ListServiceRolloutsResponse)(nil), "google.api.servicemanagement.v1.ListServiceRolloutsResponse") + proto.RegisterType((*GetServiceRolloutRequest)(nil), "google.api.servicemanagement.v1.GetServiceRolloutRequest") + proto.RegisterType((*EnableServiceRequest)(nil), "google.api.servicemanagement.v1.EnableServiceRequest") + proto.RegisterType((*DisableServiceRequest)(nil), "google.api.servicemanagement.v1.DisableServiceRequest") + proto.RegisterType((*GenerateConfigReportRequest)(nil), "google.api.servicemanagement.v1.GenerateConfigReportRequest") + proto.RegisterType((*GenerateConfigReportResponse)(nil), "google.api.servicemanagement.v1.GenerateConfigReportResponse") + proto.RegisterEnum("google.api.servicemanagement.v1.GetServiceConfigRequest_ConfigView", GetServiceConfigRequest_ConfigView_name, GetServiceConfigRequest_ConfigView_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ServiceManager service + +type ServiceManagerClient interface { + // Lists managed services. + // + // Returns all public services. For authenticated users, also returns all + // services the calling user has "servicemanagement.services.get" permission + // for. + // + // **BETA:** If the caller specifies the `consumer_id`, it returns only the + // services enabled on the consumer. The `consumer_id` must have the format + // of "project:{PROJECT-ID}". + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Gets a managed service. Authentication is required unless the service is + // public. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*ManagedService, error) + // Creates a new managed service. + // Please note one producer project can own no more than 20 services. + // + // Operation + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes a managed service. This method will change the service to the + // `Soft-Delete` state for 30 days. Within this period, service producers may + // call [UndeleteService][google.api.servicemanagement.v1.ServiceManager.UndeleteService] to restore the service. + // After 30 days, the service will be permanently deleted. + // + // Operation + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Revives a previously deleted managed service. The method restores the + // service using the configuration at the time the service was deleted. + // The target service must exist and must have been deleted within the + // last 30 days. + // + // Operation + UndeleteService(ctx context.Context, in *UndeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Lists the history of the service configuration for a managed service, + // from the newest to the oldest. + ListServiceConfigs(ctx context.Context, in *ListServiceConfigsRequest, opts ...grpc.CallOption) (*ListServiceConfigsResponse, error) + // Gets a service configuration (version) for a managed service. + GetServiceConfig(ctx context.Context, in *GetServiceConfigRequest, opts ...grpc.CallOption) (*google_api21.Service, error) + // Creates a new service configuration (version) for a managed service. + // This method only stores the service configuration. To roll out the service + // configuration to backend systems please call + // [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. + CreateServiceConfig(ctx context.Context, in *CreateServiceConfigRequest, opts ...grpc.CallOption) (*google_api21.Service, error) + // Creates a new service configuration (version) for a managed service based + // on + // user-supplied configuration source files (for example: OpenAPI + // Specification). This method stores the source configurations as well as the + // generated service configuration. To rollout the service configuration to + // other services, + // please call [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. + // + // Operation + SubmitConfigSource(ctx context.Context, in *SubmitConfigSourceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Lists the history of the service configuration rollouts for a managed + // service, from the newest to the oldest. + ListServiceRollouts(ctx context.Context, in *ListServiceRolloutsRequest, opts ...grpc.CallOption) (*ListServiceRolloutsResponse, error) + // Gets a service configuration [rollout][google.api.servicemanagement.v1.Rollout]. + GetServiceRollout(ctx context.Context, in *GetServiceRolloutRequest, opts ...grpc.CallOption) (*Rollout, error) + // Creates a new service configuration rollout. Based on rollout, the + // Google Service Management will roll out the service configurations to + // different backend services. For example, the logging configuration will be + // pushed to Google Cloud Logging. + // + // Please note that any previous pending and running Rollouts and associated + // Operations will be automatically cancelled so that the latest Rollout will + // not be blocked by previous Rollouts. + // + // Operation + CreateServiceRollout(ctx context.Context, in *CreateServiceRolloutRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Generates and returns a report (errors, warnings and changes from + // existing configurations) associated with + // GenerateConfigReportRequest.new_value + // + // If GenerateConfigReportRequest.old_value is specified, + // GenerateConfigReportRequest will contain a single ChangeReport based on the + // comparison between GenerateConfigReportRequest.new_value and + // GenerateConfigReportRequest.old_value. + // If GenerateConfigReportRequest.old_value is not specified, this method + // will compare GenerateConfigReportRequest.new_value with the last pushed + // service configuration. + GenerateConfigReport(ctx context.Context, in *GenerateConfigReportRequest, opts ...grpc.CallOption) (*GenerateConfigReportResponse, error) + // Enables a [service][google.api.servicemanagement.v1.ManagedService] for a project, so it can be used + // for the project. See + // [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for + // more information. + // + // Operation + EnableService(ctx context.Context, in *EnableServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Disables a [service][google.api.servicemanagement.v1.ManagedService] for a project, so it can no longer be + // be used for the project. It prevents accidental usage that may cause + // unexpected billing charges or security leaks. + // + // Operation + DisableService(ctx context.Context, in *DisableServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type serviceManagerClient struct { + cc *grpc.ClientConn +} + +func NewServiceManagerClient(cc *grpc.ClientConn) ServiceManagerClient { + return &serviceManagerClient{cc} +} + +func (c *serviceManagerClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*ManagedService, error) { + out := new(ManagedService) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/CreateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/DeleteService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) UndeleteService(ctx context.Context, in *UndeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/UndeleteService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) ListServiceConfigs(ctx context.Context, in *ListServiceConfigsRequest, opts ...grpc.CallOption) (*ListServiceConfigsResponse, error) { + out := new(ListServiceConfigsResponse) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/ListServiceConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) GetServiceConfig(ctx context.Context, in *GetServiceConfigRequest, opts ...grpc.CallOption) (*google_api21.Service, error) { + out := new(google_api21.Service) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/GetServiceConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) CreateServiceConfig(ctx context.Context, in *CreateServiceConfigRequest, opts ...grpc.CallOption) (*google_api21.Service, error) { + out := new(google_api21.Service) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/CreateServiceConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) SubmitConfigSource(ctx context.Context, in *SubmitConfigSourceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/SubmitConfigSource", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) ListServiceRollouts(ctx context.Context, in *ListServiceRolloutsRequest, opts ...grpc.CallOption) (*ListServiceRolloutsResponse, error) { + out := new(ListServiceRolloutsResponse) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/ListServiceRollouts", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) GetServiceRollout(ctx context.Context, in *GetServiceRolloutRequest, opts ...grpc.CallOption) (*Rollout, error) { + out := new(Rollout) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/GetServiceRollout", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) CreateServiceRollout(ctx context.Context, in *CreateServiceRolloutRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/CreateServiceRollout", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) GenerateConfigReport(ctx context.Context, in *GenerateConfigReportRequest, opts ...grpc.CallOption) (*GenerateConfigReportResponse, error) { + out := new(GenerateConfigReportResponse) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/GenerateConfigReport", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) EnableService(ctx context.Context, in *EnableServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/EnableService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceManagerClient) DisableService(ctx context.Context, in *DisableServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.api.servicemanagement.v1.ServiceManager/DisableService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ServiceManager service + +type ServiceManagerServer interface { + // Lists managed services. + // + // Returns all public services. For authenticated users, also returns all + // services the calling user has "servicemanagement.services.get" permission + // for. + // + // **BETA:** If the caller specifies the `consumer_id`, it returns only the + // services enabled on the consumer. The `consumer_id` must have the format + // of "project:{PROJECT-ID}". + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Gets a managed service. Authentication is required unless the service is + // public. + GetService(context.Context, *GetServiceRequest) (*ManagedService, error) + // Creates a new managed service. + // Please note one producer project can own no more than 20 services. + // + // Operation + CreateService(context.Context, *CreateServiceRequest) (*google_longrunning.Operation, error) + // Deletes a managed service. This method will change the service to the + // `Soft-Delete` state for 30 days. Within this period, service producers may + // call [UndeleteService][google.api.servicemanagement.v1.ServiceManager.UndeleteService] to restore the service. + // After 30 days, the service will be permanently deleted. + // + // Operation + DeleteService(context.Context, *DeleteServiceRequest) (*google_longrunning.Operation, error) + // Revives a previously deleted managed service. The method restores the + // service using the configuration at the time the service was deleted. + // The target service must exist and must have been deleted within the + // last 30 days. + // + // Operation + UndeleteService(context.Context, *UndeleteServiceRequest) (*google_longrunning.Operation, error) + // Lists the history of the service configuration for a managed service, + // from the newest to the oldest. + ListServiceConfigs(context.Context, *ListServiceConfigsRequest) (*ListServiceConfigsResponse, error) + // Gets a service configuration (version) for a managed service. + GetServiceConfig(context.Context, *GetServiceConfigRequest) (*google_api21.Service, error) + // Creates a new service configuration (version) for a managed service. + // This method only stores the service configuration. To roll out the service + // configuration to backend systems please call + // [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. + CreateServiceConfig(context.Context, *CreateServiceConfigRequest) (*google_api21.Service, error) + // Creates a new service configuration (version) for a managed service based + // on + // user-supplied configuration source files (for example: OpenAPI + // Specification). This method stores the source configurations as well as the + // generated service configuration. To rollout the service configuration to + // other services, + // please call [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. + // + // Operation + SubmitConfigSource(context.Context, *SubmitConfigSourceRequest) (*google_longrunning.Operation, error) + // Lists the history of the service configuration rollouts for a managed + // service, from the newest to the oldest. + ListServiceRollouts(context.Context, *ListServiceRolloutsRequest) (*ListServiceRolloutsResponse, error) + // Gets a service configuration [rollout][google.api.servicemanagement.v1.Rollout]. + GetServiceRollout(context.Context, *GetServiceRolloutRequest) (*Rollout, error) + // Creates a new service configuration rollout. Based on rollout, the + // Google Service Management will roll out the service configurations to + // different backend services. For example, the logging configuration will be + // pushed to Google Cloud Logging. + // + // Please note that any previous pending and running Rollouts and associated + // Operations will be automatically cancelled so that the latest Rollout will + // not be blocked by previous Rollouts. + // + // Operation + CreateServiceRollout(context.Context, *CreateServiceRolloutRequest) (*google_longrunning.Operation, error) + // Generates and returns a report (errors, warnings and changes from + // existing configurations) associated with + // GenerateConfigReportRequest.new_value + // + // If GenerateConfigReportRequest.old_value is specified, + // GenerateConfigReportRequest will contain a single ChangeReport based on the + // comparison between GenerateConfigReportRequest.new_value and + // GenerateConfigReportRequest.old_value. + // If GenerateConfigReportRequest.old_value is not specified, this method + // will compare GenerateConfigReportRequest.new_value with the last pushed + // service configuration. + GenerateConfigReport(context.Context, *GenerateConfigReportRequest) (*GenerateConfigReportResponse, error) + // Enables a [service][google.api.servicemanagement.v1.ManagedService] for a project, so it can be used + // for the project. See + // [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for + // more information. + // + // Operation + EnableService(context.Context, *EnableServiceRequest) (*google_longrunning.Operation, error) + // Disables a [service][google.api.servicemanagement.v1.ManagedService] for a project, so it can no longer be + // be used for the project. It prevents accidental usage that may cause + // unexpected billing charges or security leaks. + // + // Operation + DisableService(context.Context, *DisableServiceRequest) (*google_longrunning.Operation, error) +} + +func RegisterServiceManagerServer(s *grpc.Server, srv ServiceManagerServer) { + s.RegisterService(&_ServiceManager_serviceDesc, srv) +} + +func _ServiceManager_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_UndeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).UndeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/UndeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).UndeleteService(ctx, req.(*UndeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_ListServiceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).ListServiceConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/ListServiceConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).ListServiceConfigs(ctx, req.(*ListServiceConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_GetServiceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).GetServiceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/GetServiceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).GetServiceConfig(ctx, req.(*GetServiceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_CreateServiceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).CreateServiceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/CreateServiceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).CreateServiceConfig(ctx, req.(*CreateServiceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_SubmitConfigSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SubmitConfigSourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).SubmitConfigSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/SubmitConfigSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).SubmitConfigSource(ctx, req.(*SubmitConfigSourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_ListServiceRollouts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceRolloutsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).ListServiceRollouts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/ListServiceRollouts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).ListServiceRollouts(ctx, req.(*ListServiceRolloutsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_GetServiceRollout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRolloutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).GetServiceRollout(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/GetServiceRollout", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).GetServiceRollout(ctx, req.(*GetServiceRolloutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_CreateServiceRollout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRolloutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).CreateServiceRollout(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/CreateServiceRollout", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).CreateServiceRollout(ctx, req.(*CreateServiceRolloutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_GenerateConfigReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateConfigReportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).GenerateConfigReport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/GenerateConfigReport", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).GenerateConfigReport(ctx, req.(*GenerateConfigReportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_EnableService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnableServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).EnableService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/EnableService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).EnableService(ctx, req.(*EnableServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceManager_DisableService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DisableServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceManagerServer).DisableService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.api.servicemanagement.v1.ServiceManager/DisableService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceManagerServer).DisableService(ctx, req.(*DisableServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.api.servicemanagement.v1.ServiceManager", + HandlerType: (*ServiceManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListServices", + Handler: _ServiceManager_ListServices_Handler, + }, + { + MethodName: "GetService", + Handler: _ServiceManager_GetService_Handler, + }, + { + MethodName: "CreateService", + Handler: _ServiceManager_CreateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _ServiceManager_DeleteService_Handler, + }, + { + MethodName: "UndeleteService", + Handler: _ServiceManager_UndeleteService_Handler, + }, + { + MethodName: "ListServiceConfigs", + Handler: _ServiceManager_ListServiceConfigs_Handler, + }, + { + MethodName: "GetServiceConfig", + Handler: _ServiceManager_GetServiceConfig_Handler, + }, + { + MethodName: "CreateServiceConfig", + Handler: _ServiceManager_CreateServiceConfig_Handler, + }, + { + MethodName: "SubmitConfigSource", + Handler: _ServiceManager_SubmitConfigSource_Handler, + }, + { + MethodName: "ListServiceRollouts", + Handler: _ServiceManager_ListServiceRollouts_Handler, + }, + { + MethodName: "GetServiceRollout", + Handler: _ServiceManager_GetServiceRollout_Handler, + }, + { + MethodName: "CreateServiceRollout", + Handler: _ServiceManager_CreateServiceRollout_Handler, + }, + { + MethodName: "GenerateConfigReport", + Handler: _ServiceManager_GenerateConfigReport_Handler, + }, + { + MethodName: "EnableService", + Handler: _ServiceManager_EnableService_Handler, + }, + { + MethodName: "DisableService", + Handler: _ServiceManager_DisableService_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/api/servicemanagement/v1/servicemanager.proto", +} + +func init() { + proto.RegisterFile("google/api/servicemanagement/v1/servicemanager.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 1420 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0x9c, 0xb4, 0x4d, 0x5e, 0x62, 0xb7, 0x9d, 0xb8, 0x8d, 0xeb, 0x34, 0x6a, 0xba, 0x81, + 0x12, 0xa5, 0xd4, 0xab, 0xa4, 0xff, 0xa8, 0x53, 0x0e, 0x4d, 0x02, 0x55, 0x44, 0x4b, 0x23, 0xa7, + 0x05, 0x54, 0x90, 0xac, 0xcd, 0xee, 0x64, 0xbb, 0x74, 0x3d, 0x63, 0x76, 0xd6, 0x09, 0x69, 0x94, + 0x4b, 0x55, 0x09, 0x09, 0x4e, 0xa8, 0x40, 0x25, 0x8e, 0x15, 0xe2, 0xc0, 0x81, 0x0b, 0x07, 0x24, + 0x0e, 0x48, 0x7c, 0x06, 0xc4, 0x37, 0xe0, 0x33, 0x70, 0x46, 0x9e, 0x9d, 0x75, 0x76, 0xec, 0x8d, + 0x77, 0xd7, 0x02, 0x8e, 0xfb, 0x66, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x7b, 0x33, 0xbf, 0x59, 0xb8, + 0x62, 0x33, 0x66, 0xbb, 0x44, 0x37, 0x9a, 0x8e, 0xce, 0x89, 0xb7, 0xed, 0x98, 0xa4, 0x61, 0x50, + 0xc3, 0x26, 0x0d, 0x42, 0x7d, 0x7d, 0x7b, 0x41, 0x35, 0x7a, 0x95, 0xa6, 0xc7, 0x7c, 0x86, 0xcf, + 0x05, 0xa8, 0x8a, 0xd1, 0x74, 0x2a, 0x3d, 0xa8, 0xca, 0xf6, 0x42, 0xf9, 0x6c, 0xc4, 0xad, 0x41, + 0x29, 0xf3, 0x0d, 0xdf, 0x61, 0x94, 0x07, 0xf0, 0xf2, 0xa9, 0xe8, 0x68, 0xcb, 0x7f, 0x24, 0xcd, + 0xa5, 0x5e, 0x2e, 0x72, 0x44, 0x4f, 0x62, 0xe9, 0x11, 0xce, 0x5a, 0x9e, 0x49, 0xc2, 0x08, 0xb3, + 0x12, 0xe0, 0x32, 0x6a, 0x7b, 0x2d, 0x4a, 0x1d, 0x6a, 0xeb, 0xac, 0x49, 0x3c, 0x85, 0xc6, 0x19, + 0x39, 0x49, 0x7c, 0x6d, 0xb6, 0xb6, 0x74, 0x83, 0xee, 0xca, 0xa1, 0x99, 0xee, 0xa1, 0x2d, 0x87, + 0xb8, 0x56, 0xbd, 0x61, 0xf0, 0xc7, 0x72, 0xc6, 0xd9, 0xee, 0x19, 0xdc, 0xf7, 0x5a, 0xa6, 0x2f, + 0x47, 0x27, 0xe5, 0xa8, 0xd7, 0x34, 0x75, 0xee, 0x1b, 0x7e, 0x4b, 0xc6, 0xd4, 0x5e, 0x22, 0x98, + 0xb8, 0xe3, 0x70, 0x7f, 0x23, 0x58, 0x05, 0xaf, 0x91, 0x4f, 0x5b, 0x84, 0xfb, 0xb8, 0x02, 0x13, + 0x4d, 0x8f, 0x59, 0x2d, 0x93, 0x78, 0xf5, 0xa6, 0xc7, 0x3e, 0x21, 0xa6, 0x5f, 0x77, 0xac, 0x12, + 0x9a, 0x41, 0x73, 0xa3, 0xb5, 0x93, 0xe1, 0xd0, 0x7a, 0x30, 0xb2, 0x66, 0xe1, 0x29, 0x18, 0x6d, + 0x1a, 0x36, 0xa9, 0x73, 0xe7, 0x09, 0x29, 0x1d, 0x99, 0x41, 0x73, 0x47, 0x6a, 0x23, 0x6d, 0xc3, + 0x86, 0xf3, 0x84, 0xe0, 0x69, 0x00, 0x31, 0xe8, 0xb3, 0xc7, 0x84, 0x96, 0x8e, 0x0a, 0x1f, 0x62, + 0xfa, 0xfd, 0xb6, 0x01, 0x9f, 0x83, 0x31, 0x93, 0x51, 0xde, 0x6a, 0x10, 0xaf, 0x1d, 0xe3, 0x98, + 0x18, 0x87, 0xd0, 0xb4, 0x66, 0x69, 0x5f, 0x22, 0x28, 0xaa, 0x24, 0x79, 0x93, 0x51, 0x4e, 0xf0, + 0xbb, 0x30, 0x22, 0xd3, 0xcf, 0x4b, 0x68, 0x66, 0x68, 0x6e, 0x6c, 0x51, 0xaf, 0x24, 0x94, 0x42, + 0xe5, 0xae, 0xf8, 0xb2, 0xa4, 0xaf, 0x5a, 0xc7, 0x01, 0xbe, 0x00, 0xc7, 0x29, 0xf9, 0xcc, 0xaf, + 0x47, 0xa8, 0xe6, 0x04, 0x95, 0x7c, 0xdb, 0xbc, 0x1e, 0xd2, 0xd5, 0xae, 0xc1, 0xc9, 0xdb, 0x24, + 0xe4, 0x12, 0xe6, 0xeb, 0x3c, 0x8c, 0x4b, 0x47, 0x75, 0x6a, 0x34, 0x88, 0x4c, 0xd4, 0x98, 0xb4, + 0xbd, 0x67, 0x34, 0x88, 0x66, 0x40, 0x71, 0xc5, 0x23, 0x86, 0x4f, 0xba, 0xa0, 0x6b, 0x70, 0x4c, + 0x4e, 0x13, 0xa8, 0x01, 0xd6, 0x10, 0xe2, 0xb5, 0x1b, 0x50, 0x5c, 0x25, 0x2e, 0xe9, 0x09, 0x91, + 0x82, 0xdd, 0x12, 0x9c, 0x7e, 0x40, 0xad, 0x01, 0xc1, 0x16, 0x4c, 0xf6, 0x80, 0xe5, 0x16, 0xfd, + 0x8b, 0xab, 0xfb, 0x13, 0xc1, 0xe4, 0x41, 0xe6, 0x57, 0x18, 0xdd, 0x72, 0xec, 0xf4, 0x24, 0xdb, + 0x25, 0x6a, 0x0a, 0x4c, 0xbb, 0xc8, 0x82, 0x9d, 0x1d, 0x09, 0x0c, 0x6b, 0x16, 0xfe, 0x00, 0x86, + 0xb7, 0x1d, 0xb2, 0x53, 0x1a, 0x9a, 0x41, 0x73, 0x85, 0xc5, 0x95, 0x44, 0x8e, 0x87, 0xf0, 0xa8, + 0x04, 0x5f, 0xef, 0x3b, 0x64, 0xa7, 0x26, 0x1c, 0x6a, 0xe7, 0x01, 0x0e, 0x6c, 0x78, 0x14, 0x8e, + 0x2c, 0xdf, 0xda, 0x58, 0x5b, 0x39, 0xf1, 0x0a, 0x1e, 0x81, 0xe1, 0x77, 0x1e, 0xdc, 0xb9, 0x73, + 0x02, 0x69, 0x4f, 0xe0, 0x4c, 0xa4, 0xba, 0x83, 0xd9, 0x3c, 0xc3, 0xc2, 0xd4, 0xf6, 0xca, 0x75, + 0xb7, 0x97, 0xd2, 0x9a, 0x43, 0x6a, 0x6b, 0x6a, 0x4f, 0x11, 0x94, 0xe3, 0x82, 0xcb, 0xdd, 0xbb, + 0x09, 0xc7, 0xc3, 0xe8, 0x41, 0xaa, 0xc2, 0x3e, 0x9b, 0x88, 0x66, 0x28, 0xdc, 0xa9, 0x02, 0x57, + 0xbc, 0xa4, 0xee, 0xa8, 0x3d, 0x28, 0x2b, 0x9d, 0x91, 0x79, 0x6b, 0xab, 0x50, 0x50, 0x69, 0x8a, + 0x38, 0x87, 0xb0, 0xcc, 0x2b, 0x2c, 0xb5, 0x5f, 0x10, 0x9c, 0xd9, 0x68, 0x6d, 0x36, 0x1c, 0x3f, + 0x30, 0x6c, 0x88, 0x83, 0x3b, 0x43, 0xf0, 0x1a, 0xe4, 0x65, 0x5d, 0x05, 0x67, 0xbe, 0x8c, 0x7d, + 0x29, 0xb1, 0x86, 0x94, 0x78, 0xe3, 0x66, 0xe4, 0x0b, 0xcf, 0x42, 0x7e, 0xdb, 0x70, 0x1d, 0xcb, + 0xf0, 0x49, 0x9d, 0x51, 0x77, 0x57, 0xec, 0xdb, 0x48, 0x6d, 0x3c, 0x34, 0xde, 0xa3, 0xee, 0xae, + 0xf6, 0x21, 0x94, 0xe3, 0x88, 0xcb, 0xad, 0xeb, 0xcd, 0x09, 0x4a, 0x9d, 0x93, 0x67, 0x08, 0xa6, + 0xd4, 0xb3, 0x8a, 0xb9, 0x2e, 0x6b, 0xf9, 0x19, 0xb2, 0xb2, 0x0c, 0xc7, 0xbc, 0x00, 0x24, 0xf3, + 0x31, 0x97, 0x98, 0x8f, 0x30, 0x48, 0x08, 0xd4, 0x9e, 0xab, 0xc5, 0x29, 0xc7, 0xff, 0xa7, 0xd6, + 0xc0, 0xa7, 0xe1, 0xe8, 0x96, 0xe3, 0xfa, 0xc4, 0x2b, 0x0d, 0x0b, 0x9c, 0xfc, 0x6a, 0xdf, 0x46, + 0x53, 0xb1, 0xac, 0x64, 0xe2, 0x57, 0x61, 0x44, 0x2e, 0x20, 0x6c, 0x96, 0xf4, 0x4b, 0xef, 0x20, + 0x53, 0xf7, 0xce, 0xc7, 0x50, 0x8a, 0xdc, 0x46, 0x99, 0xb7, 0x69, 0x1a, 0x40, 0x86, 0x3c, 0x38, + 0x15, 0x47, 0xa5, 0x65, 0xcd, 0xd2, 0x1e, 0x42, 0xf1, 0x6d, 0x6a, 0x6c, 0xba, 0xd9, 0xef, 0x84, + 0xee, 0x5b, 0x3d, 0xd7, 0x73, 0xab, 0x7f, 0x04, 0xa7, 0x56, 0x1d, 0xfe, 0x1f, 0x39, 0xff, 0x1c, + 0xc1, 0xd4, 0x6d, 0x42, 0xdb, 0x0a, 0xab, 0x73, 0x9c, 0x34, 0x99, 0xd7, 0x49, 0xcd, 0x65, 0x00, + 0x4a, 0x76, 0xd4, 0xce, 0x28, 0x86, 0xdb, 0x14, 0x6a, 0xa8, 0xca, 0x2d, 0xba, 0x5b, 0x1b, 0xa5, + 0x64, 0x27, 0xf0, 0xd0, 0x06, 0x31, 0xd7, 0x52, 0x8f, 0x98, 0x43, 0x40, 0xcc, 0xb5, 0x64, 0x2f, + 0xfd, 0x8d, 0xe0, 0x6c, 0x3c, 0x13, 0x59, 0x2f, 0x29, 0x96, 0x5b, 0x80, 0x5c, 0x67, 0x95, 0x39, + 0xc7, 0xc2, 0xf7, 0xa1, 0x60, 0x3e, 0x32, 0xa8, 0x4d, 0xea, 0x9e, 0xf0, 0xc5, 0x4b, 0x43, 0xa2, + 0xd0, 0x52, 0x9c, 0x39, 0x02, 0x26, 0x19, 0xe4, 0xcd, 0xc8, 0x17, 0xc7, 0x77, 0x61, 0xcc, 0x72, + 0x0c, 0x9b, 0x32, 0xee, 0x3b, 0x26, 0x2f, 0x0d, 0x0b, 0x97, 0x17, 0x13, 0x5d, 0xae, 0x76, 0x30, + 0xb5, 0x28, 0x7e, 0xf1, 0xa7, 0x09, 0x28, 0xc8, 0x9d, 0x0d, 0x6e, 0x74, 0x0f, 0x7f, 0x85, 0x60, + 0x3c, 0x2a, 0xe4, 0xf0, 0x95, 0x44, 0xef, 0x31, 0xe2, 0xb4, 0x7c, 0x35, 0x23, 0x2a, 0x48, 0xb4, + 0x56, 0x7c, 0xfa, 0xc7, 0x5f, 0xcf, 0x73, 0x05, 0x3c, 0x1e, 0x79, 0x47, 0x70, 0xfc, 0x2d, 0x02, + 0x38, 0xe8, 0x20, 0xbc, 0x98, 0xe1, 0xea, 0x0f, 0xf9, 0x64, 0x95, 0x34, 0xda, 0xac, 0x60, 0x32, + 0x8d, 0xa7, 0xa2, 0x4c, 0xf4, 0xbd, 0x68, 0x19, 0xec, 0xe3, 0x67, 0x08, 0xf2, 0xca, 0x21, 0x8c, + 0x93, 0xd7, 0x1d, 0x27, 0x30, 0xcb, 0xd3, 0x21, 0x2c, 0xf2, 0xfa, 0xa8, 0xdc, 0x0b, 0x5f, 0x1f, + 0xda, 0xb4, 0x20, 0x33, 0xa9, 0x29, 0x69, 0xa9, 0x86, 0xaa, 0x0b, 0x7f, 0x81, 0x20, 0xaf, 0x88, + 0xca, 0x14, 0x34, 0xe2, 0x44, 0x68, 0x12, 0x0d, 0x99, 0x93, 0xf9, 0xbe, 0x39, 0x79, 0x81, 0xe0, + 0x78, 0x97, 0xd2, 0xc4, 0xd7, 0x13, 0xe9, 0xc4, 0x0b, 0xdb, 0x24, 0x42, 0x6f, 0x08, 0x42, 0x17, + 0xb4, 0x57, 0xfb, 0x10, 0xaa, 0xb6, 0xa4, 0x6b, 0xfc, 0x2b, 0x02, 0xdc, 0x2b, 0xa4, 0x70, 0x35, + 0x4b, 0xa9, 0xaa, 0xd2, 0xaf, 0xbc, 0x34, 0x10, 0x56, 0x16, 0xfb, 0x45, 0xc1, 0xfe, 0x35, 0x3c, + 0xdb, 0x87, 0xbd, 0x2e, 0x35, 0x1d, 0xfe, 0x0e, 0xc1, 0x89, 0x6e, 0x45, 0x8b, 0xdf, 0x1c, 0x54, + 0x04, 0x97, 0xe3, 0x24, 0x86, 0x76, 0x5d, 0x10, 0x5a, 0xc0, 0x7a, 0x0a, 0x42, 0xfa, 0x5e, 0x47, + 0xa9, 0xef, 0xe3, 0xef, 0x11, 0x4c, 0xc4, 0xc8, 0x43, 0xbc, 0x94, 0xad, 0x1b, 0x52, 0x50, 0x5c, + 0x12, 0x14, 0xaf, 0x6a, 0x69, 0x72, 0x56, 0xed, 0x52, 0x57, 0xf8, 0x07, 0x04, 0xb8, 0x57, 0x8e, + 0xa5, 0x28, 0x80, 0x43, 0xc5, 0x67, 0x52, 0x81, 0x5e, 0x15, 0x74, 0x75, 0x6d, 0x3e, 0x0d, 0x5d, + 0x2e, 0xa2, 0x54, 0xd1, 0x3c, 0xfe, 0x4d, 0x7d, 0xf2, 0x87, 0xfa, 0x05, 0x67, 0x2a, 0xb7, 0x2e, + 0x2d, 0x56, 0xbe, 0x39, 0x18, 0x58, 0x16, 0xab, 0x6c, 0x35, 0xdc, 0xaf, 0xd5, 0xf4, 0x8e, 0x34, + 0xfa, 0x19, 0x29, 0x2f, 0xf0, 0xc0, 0x8c, 0x6f, 0x64, 0x39, 0xb8, 0x15, 0x9d, 0x54, 0x4e, 0xad, + 0xcf, 0xb4, 0x1b, 0x82, 0xe8, 0x65, 0xbc, 0x90, 0x86, 0xa8, 0xbe, 0x77, 0x20, 0xad, 0xf6, 0xf1, + 0x8f, 0xa8, 0xfb, 0xfd, 0x2f, 0x89, 0xdf, 0xcc, 0x78, 0xaa, 0xab, 0xdc, 0x53, 0xd6, 0x48, 0xaa, + 0xcc, 0x56, 0x43, 0xe5, 0x8d, 0x7f, 0x47, 0x50, 0x8c, 0x13, 0x2d, 0x29, 0xc8, 0xf6, 0x51, 0x5d, + 0xe5, 0xb7, 0x06, 0x44, 0xab, 0x65, 0xa2, 0x9d, 0x57, 0x6e, 0x2a, 0x3b, 0x06, 0xd2, 0xae, 0xf3, + 0xaf, 0x11, 0xe4, 0x15, 0xf1, 0x9a, 0xe2, 0xe2, 0x8a, 0x13, 0xbb, 0x49, 0x29, 0xbe, 0x24, 0x58, + 0xbd, 0xae, 0x69, 0xfd, 0xee, 0x09, 0x22, 0x1c, 0xb7, 0x69, 0xbd, 0x40, 0x50, 0x50, 0x75, 0x2f, + 0xbe, 0x96, 0x42, 0x63, 0xf1, 0xec, 0xc4, 0x2a, 0x82, 0xd8, 0x5c, 0xdf, 0xe3, 0xac, 0x6a, 0x05, + 0x9e, 0xab, 0x68, 0x7e, 0xf9, 0x1b, 0x04, 0xb3, 0x26, 0x6b, 0x24, 0x91, 0x59, 0x9e, 0x50, 0x55, + 0xdd, 0x7a, 0x5b, 0xf8, 0xae, 0xa3, 0x87, 0xeb, 0x12, 0x67, 0x33, 0xd7, 0xa0, 0x76, 0x85, 0x79, + 0xb6, 0x6e, 0x13, 0x2a, 0x64, 0xb1, 0xfc, 0x5f, 0x6a, 0x34, 0x1d, 0x7e, 0xe8, 0x3f, 0xd3, 0xa5, + 0x1e, 0xe3, 0xcb, 0xdc, 0xf0, 0xed, 0x5b, 0x1b, 0x77, 0x37, 0x8f, 0x0a, 0x1f, 0x97, 0xff, 0x09, + 0x00, 0x00, 0xff, 0xff, 0x50, 0x43, 0x0b, 0xb9, 0x1c, 0x16, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/legacy/audit_data.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/legacy/audit_data.pb.go new file mode 100644 index 000000000..48c409a99 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/legacy/audit_data.pb.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/legacy/audit_data.proto + +/* +Package legacy is a generated protocol buffer package. + +It is generated from these files: + google/appengine/legacy/audit_data.proto + +It has these top-level messages: + AuditData +*/ +package legacy + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Admin Console legacy audit log. +type AuditData struct { + // Text description of the admin event. + // This is the "Event" column in Admin Console's Admin Logs. + EventMessage string `protobuf:"bytes,1,opt,name=event_message,json=eventMessage" json:"event_message,omitempty"` + // Arbitrary event data. + // This is the "Result" column in Admin Console's Admin Logs. + EventData map[string]string `protobuf:"bytes,2,rep,name=event_data,json=eventData" json:"event_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *AuditData) Reset() { *m = AuditData{} } +func (m *AuditData) String() string { return proto.CompactTextString(m) } +func (*AuditData) ProtoMessage() {} +func (*AuditData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AuditData) GetEventMessage() string { + if m != nil { + return m.EventMessage + } + return "" +} + +func (m *AuditData) GetEventData() map[string]string { + if m != nil { + return m.EventData + } + return nil +} + +func init() { + proto.RegisterType((*AuditData)(nil), "google.appengine.legacy.AuditData") +} + +func init() { proto.RegisterFile("google/appengine/legacy/audit_data.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x4f, 0x4b, 0x03, 0x31, + 0x10, 0xc5, 0xc9, 0x16, 0x85, 0x1d, 0xb5, 0x48, 0x10, 0x5c, 0xf4, 0x52, 0xf4, 0xb2, 0xa7, 0x04, + 0xf5, 0x22, 0xfe, 0x39, 0x58, 0xec, 0x51, 0x58, 0x7a, 0xf4, 0x52, 0xc6, 0x76, 0x18, 0x16, 0xb7, + 0x49, 0xd8, 0x4d, 0x0b, 0xfb, 0xed, 0xfc, 0x68, 0x92, 0xa4, 0x2e, 0x48, 0xe9, 0x29, 0x33, 0x6f, + 0x7e, 0x99, 0x79, 0x3c, 0x28, 0xd9, 0x5a, 0x6e, 0x48, 0xa3, 0x73, 0x64, 0xb8, 0x36, 0xa4, 0x1b, + 0x62, 0x5c, 0xf6, 0x1a, 0x37, 0xab, 0xda, 0x2f, 0x56, 0xe8, 0x51, 0xb9, 0xd6, 0x7a, 0x2b, 0x2f, + 0x13, 0xa9, 0x06, 0x52, 0x25, 0xf2, 0xe6, 0x47, 0x40, 0xfe, 0x16, 0xe8, 0x77, 0xf4, 0x28, 0x6f, + 0xe1, 0x8c, 0xb6, 0x64, 0xfc, 0x62, 0x4d, 0x5d, 0x87, 0x4c, 0x85, 0x98, 0x88, 0x32, 0x9f, 0x9f, + 0x46, 0xf1, 0x23, 0x69, 0xb2, 0x02, 0x48, 0x50, 0xd8, 0x5f, 0x64, 0x93, 0x51, 0x79, 0x72, 0x7f, + 0xa7, 0x0e, 0x1c, 0x50, 0xc3, 0x72, 0x35, 0x0b, 0x9f, 0x42, 0x35, 0x33, 0xbe, 0xed, 0xe7, 0x39, + 0xfd, 0xf5, 0x57, 0x2f, 0x30, 0xfe, 0x3f, 0x94, 0xe7, 0x30, 0xfa, 0xa6, 0x7e, 0x77, 0x3e, 0x94, + 0xf2, 0x02, 0x8e, 0xb6, 0xd8, 0x6c, 0xa8, 0xc8, 0xa2, 0x96, 0x9a, 0xa7, 0xec, 0x51, 0x4c, 0x0d, + 0x5c, 0x2f, 0xed, 0xfa, 0x90, 0x81, 0xe9, 0x78, 0x70, 0x50, 0x85, 0x28, 0x2a, 0xf1, 0xf9, 0xba, + 0x43, 0xd9, 0x36, 0x68, 0x58, 0xd9, 0x96, 0x35, 0x93, 0x89, 0x41, 0xe9, 0x34, 0x42, 0x57, 0x77, + 0x7b, 0xa9, 0x3e, 0xa7, 0xe7, 0xeb, 0x38, 0x92, 0x0f, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, + 0x5d, 0x14, 0xaa, 0x7e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/logging/v1/request_log.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/logging/v1/request_log.pb.go new file mode 100644 index 000000000..7fb8990eb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/logging/v1/request_log.pb.go @@ -0,0 +1,538 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/logging/v1/request_log.proto + +/* +Package logging is a generated protocol buffer package. + +It is generated from these files: + google/appengine/logging/v1/request_log.proto + +It has these top-level messages: + LogLine + SourceLocation + SourceReference + RequestLog +*/ +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_logging_type "google.golang.org/genproto/googleapis/logging/type" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Application log line emitted while processing a request. +type LogLine struct { + // Approximate time when this log entry was made. + Time *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=time" json:"time,omitempty"` + // Severity of this log entry. + Severity google_logging_type.LogSeverity `protobuf:"varint,2,opt,name=severity,enum=google.logging.type.LogSeverity" json:"severity,omitempty"` + // App-provided log message. + LogMessage string `protobuf:"bytes,3,opt,name=log_message,json=logMessage" json:"log_message,omitempty"` + // Where in the source code this log message was written. + SourceLocation *SourceLocation `protobuf:"bytes,4,opt,name=source_location,json=sourceLocation" json:"source_location,omitempty"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} +func (*LogLine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *LogLine) GetTime() *google_protobuf2.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +func (m *LogLine) GetSeverity() google_logging_type.LogSeverity { + if m != nil { + return m.Severity + } + return google_logging_type.LogSeverity_DEFAULT +} + +func (m *LogLine) GetLogMessage() string { + if m != nil { + return m.LogMessage + } + return "" +} + +func (m *LogLine) GetSourceLocation() *SourceLocation { + if m != nil { + return m.SourceLocation + } + return nil +} + +// Specifies a location in a source code file. +type SourceLocation struct { + // Source file name. Depending on the runtime environment, this might be a + // simple name or a fully-qualified name. + File string `protobuf:"bytes,1,opt,name=file" json:"file,omitempty"` + // Line within the source file. + Line int64 `protobuf:"varint,2,opt,name=line" json:"line,omitempty"` + // Human-readable name of the function or method being invoked, with optional + // context such as the class or package name. This information is used in + // contexts such as the logs viewer, where a file and line number are less + // meaningful. The format can vary by language. For example: + // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + // (Python). + FunctionName string `protobuf:"bytes,3,opt,name=function_name,json=functionName" json:"function_name,omitempty"` +} + +func (m *SourceLocation) Reset() { *m = SourceLocation{} } +func (m *SourceLocation) String() string { return proto.CompactTextString(m) } +func (*SourceLocation) ProtoMessage() {} +func (*SourceLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SourceLocation) GetFile() string { + if m != nil { + return m.File + } + return "" +} + +func (m *SourceLocation) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *SourceLocation) GetFunctionName() string { + if m != nil { + return m.FunctionName + } + return "" +} + +// A reference to a particular snapshot of the source tree used to build and +// deploy an application. +type SourceReference struct { + // Optional. A URI string identifying the repository. + // Example: "https://github.com/GoogleCloudPlatform/kubernetes.git" + Repository string `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` + // The canonical and persistent identifier of the deployed revision. + // Example (git): "0035781c50ec7aa23385dc841529ce8a4b70db1b" + RevisionId string `protobuf:"bytes,2,opt,name=revision_id,json=revisionId" json:"revision_id,omitempty"` +} + +func (m *SourceReference) Reset() { *m = SourceReference{} } +func (m *SourceReference) String() string { return proto.CompactTextString(m) } +func (*SourceReference) ProtoMessage() {} +func (*SourceReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SourceReference) GetRepository() string { + if m != nil { + return m.Repository + } + return "" +} + +func (m *SourceReference) GetRevisionId() string { + if m != nil { + return m.RevisionId + } + return "" +} + +// Complete log information about a single HTTP request to an App Engine +// application. +type RequestLog struct { + // Application that handled this request. + AppId string `protobuf:"bytes,1,opt,name=app_id,json=appId" json:"app_id,omitempty"` + // Module of the application that handled this request. + ModuleId string `protobuf:"bytes,37,opt,name=module_id,json=moduleId" json:"module_id,omitempty"` + // Version of the application that handled this request. + VersionId string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` + // Globally unique identifier for a request, which is based on the request + // start time. Request IDs for requests which started later will compare + // greater as strings than those for requests which started earlier. + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + // Origin IP address. + Ip string `protobuf:"bytes,4,opt,name=ip" json:"ip,omitempty"` + // Time when the request started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time when the request finished. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,7,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Latency of the request. + Latency *google_protobuf1.Duration `protobuf:"bytes,8,opt,name=latency" json:"latency,omitempty"` + // Number of CPU megacycles used to process request. + MegaCycles int64 `protobuf:"varint,9,opt,name=mega_cycles,json=megaCycles" json:"mega_cycles,omitempty"` + // Request method. Example: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`, `"DELETE"`. + Method string `protobuf:"bytes,10,opt,name=method" json:"method,omitempty"` + // Contains the path and query portion of the URL that was requested. For + // example, if the URL was "http://example.com/app?name=val", the resource + // would be "/app?name=val". The fragment identifier, which is identified by + // the `#` character, is not included. + Resource string `protobuf:"bytes,11,opt,name=resource" json:"resource,omitempty"` + // HTTP version of request. Example: `"HTTP/1.1"`. + HttpVersion string `protobuf:"bytes,12,opt,name=http_version,json=httpVersion" json:"http_version,omitempty"` + // HTTP response status code. Example: 200, 404. + Status int32 `protobuf:"varint,13,opt,name=status" json:"status,omitempty"` + // Size in bytes sent back to client by request. + ResponseSize int64 `protobuf:"varint,14,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Referrer URL of request. + Referrer string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + // User agent that made the request. + UserAgent string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` + // The logged-in user who made the request. + // + // Most likely, this is the part of the user's email before the `@` sign. The + // field value is the same for different requests from the same user, but + // different users can have similar names. This information is also + // available to the application via the App Engine Users API. + // + // This field will be populated starting with App Engine 1.9.21. + Nickname string `protobuf:"bytes,40,opt,name=nickname" json:"nickname,omitempty"` + // File or class that handled the request. + UrlMapEntry string `protobuf:"bytes,17,opt,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` + // Internet host and port number of the resource being requested. + Host string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + // An indication of the relative cost of serving this request. + Cost float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + // Queue name of the request, in the case of an offline request. + TaskQueueName string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` + // Task name of the request, in the case of an offline request. + TaskName string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` + // Whether this was a loading request for the instance. + WasLoadingRequest bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` + // Time this request spent in the pending request queue. + PendingTime *google_protobuf1.Duration `protobuf:"bytes,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` + // If the instance processing this request belongs to a manually scaled + // module, then this is the 0-based index of the instance. Otherwise, this + // value is -1. + InstanceIndex int32 `protobuf:"varint,26,opt,name=instance_index,json=instanceIndex" json:"instance_index,omitempty"` + // Whether this request is finished or active. + Finished bool `protobuf:"varint,27,opt,name=finished" json:"finished,omitempty"` + // Whether this is the first `RequestLog` entry for this request. If an + // active request has several `RequestLog` entries written to Stackdriver + // Logging, then this field will be set for one of them. + First bool `protobuf:"varint,42,opt,name=first" json:"first,omitempty"` + // An identifier for the instance that handled the request. + InstanceId string `protobuf:"bytes,28,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` + // A list of log lines emitted by the application while serving this request. + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + // App Engine release version. + AppEngineRelease string `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` + // Stackdriver Trace identifier for this request. + TraceId string `protobuf:"bytes,39,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` + // Source code for the application that handled this request. There can be + // more than one source reference per deployed application if source code is + // distributed among multiple repositories. + SourceReference []*SourceReference `protobuf:"bytes,41,rep,name=source_reference,json=sourceReference" json:"source_reference,omitempty"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} +func (*RequestLog) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RequestLog) GetAppId() string { + if m != nil { + return m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil { + return m.ModuleId + } + return "" +} + +func (m *RequestLog) GetVersionId() string { + if m != nil { + return m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *RequestLog) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *RequestLog) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *RequestLog) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *RequestLog) GetLatency() *google_protobuf1.Duration { + if m != nil { + return m.Latency + } + return nil +} + +func (m *RequestLog) GetMegaCycles() int64 { + if m != nil { + return m.MegaCycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil { + return m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil { + return m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil { + return m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil { + return m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil { + return m.UserAgent + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil { + return m.Nickname + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil { + return m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil { + return m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil { + return m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil { + return m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() *google_protobuf1.Duration { + if m != nil { + return m.PendingTime + } + return nil +} + +func (m *RequestLog) GetInstanceIndex() int32 { + if m != nil { + return m.InstanceIndex + } + return 0 +} + +func (m *RequestLog) GetFinished() bool { + if m != nil { + return m.Finished + } + return false +} + +func (m *RequestLog) GetFirst() bool { + if m != nil { + return m.First + } + return false +} + +func (m *RequestLog) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetAppEngineRelease() string { + if m != nil { + return m.AppEngineRelease + } + return "" +} + +func (m *RequestLog) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *RequestLog) GetSourceReference() []*SourceReference { + if m != nil { + return m.SourceReference + } + return nil +} + +func init() { + proto.RegisterType((*LogLine)(nil), "google.appengine.logging.v1.LogLine") + proto.RegisterType((*SourceLocation)(nil), "google.appengine.logging.v1.SourceLocation") + proto.RegisterType((*SourceReference)(nil), "google.appengine.logging.v1.SourceReference") + proto.RegisterType((*RequestLog)(nil), "google.appengine.logging.v1.RequestLog") +} + +func init() { proto.RegisterFile("google/appengine/logging/v1/request_log.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 921 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x7f, 0x6f, 0x1c, 0x35, + 0x10, 0xd5, 0xe6, 0xe7, 0x9d, 0x2f, 0xb9, 0xa4, 0xa6, 0x2d, 0x4e, 0x42, 0x9b, 0x23, 0xd0, 0x70, + 0x40, 0xd9, 0x53, 0x53, 0x21, 0x81, 0xe8, 0x3f, 0x04, 0xfa, 0xc7, 0x49, 0x57, 0x54, 0x36, 0x15, + 0x48, 0x48, 0x68, 0xe5, 0xee, 0xce, 0x6d, 0xac, 0xee, 0xda, 0xae, 0xed, 0xbd, 0xf6, 0xfa, 0x41, + 0xf8, 0x98, 0x7c, 0x06, 0xe4, 0xb1, 0xf7, 0x4a, 0x00, 0x25, 0xfc, 0xb7, 0xf3, 0xe6, 0xbd, 0xf1, + 0xd8, 0x7e, 0x9e, 0x25, 0x5f, 0x55, 0x4a, 0x55, 0x35, 0x4c, 0xb8, 0xd6, 0x20, 0x2b, 0x21, 0x61, + 0x52, 0xab, 0xaa, 0x12, 0xb2, 0x9a, 0x2c, 0x1e, 0x4d, 0x0c, 0xbc, 0x6e, 0xc1, 0xba, 0xbc, 0x56, + 0x55, 0xaa, 0x8d, 0x72, 0x8a, 0x1e, 0x05, 0x7a, 0xba, 0xa2, 0xa7, 0x91, 0x9e, 0x2e, 0x1e, 0x1d, + 0x9e, 0xc6, 0x5a, 0x5d, 0x05, 0xb7, 0xd4, 0x18, 0xe4, 0x16, 0x16, 0x60, 0x84, 0x5b, 0x86, 0x22, + 0x87, 0xf7, 0x23, 0x0f, 0xa3, 0x97, 0xed, 0x7c, 0x52, 0xb6, 0x86, 0x3b, 0xa1, 0x64, 0xcc, 0x1f, + 0xff, 0x33, 0xef, 0x44, 0x03, 0xd6, 0xf1, 0x46, 0x07, 0xc2, 0xc9, 0x9f, 0x09, 0xd9, 0x9e, 0xa9, + 0x6a, 0x26, 0x24, 0xd0, 0x94, 0x6c, 0xf8, 0x34, 0x4b, 0x46, 0xc9, 0x78, 0x70, 0x76, 0x98, 0xc6, + 0x06, 0x3b, 0x6d, 0xfa, 0xa2, 0xd3, 0x66, 0xc8, 0xa3, 0x4f, 0x48, 0xaf, 0x6b, 0x87, 0xad, 0x8d, + 0x92, 0xf1, 0xf0, 0x6c, 0xd4, 0x69, 0xba, 0xad, 0xf8, 0xbe, 0xd3, 0x99, 0xaa, 0x2e, 0x22, 0x2f, + 0x5b, 0x29, 0xe8, 0x31, 0x19, 0xf8, 0x0d, 0x35, 0x60, 0x2d, 0xaf, 0x80, 0xad, 0x8f, 0x92, 0x71, + 0x3f, 0x23, 0xb5, 0xaa, 0x9e, 0x05, 0x84, 0xbe, 0x20, 0x7b, 0x56, 0xb5, 0xa6, 0x80, 0xbc, 0x56, + 0x05, 0x6e, 0x8a, 0x6d, 0x60, 0x67, 0x5f, 0xa6, 0xd7, 0x1c, 0x5d, 0x7a, 0x81, 0x9a, 0x59, 0x94, + 0x64, 0x43, 0x7b, 0x25, 0x3e, 0xf9, 0x9d, 0x0c, 0xaf, 0x32, 0x28, 0x25, 0x1b, 0x73, 0x51, 0x87, + 0x6d, 0xf7, 0x33, 0xfc, 0xf6, 0x58, 0x2d, 0x24, 0xe0, 0xb6, 0xd6, 0x33, 0xfc, 0xa6, 0x9f, 0x90, + 0xdd, 0x79, 0x2b, 0x0b, 0xaf, 0xc9, 0x25, 0x6f, 0xba, 0x96, 0x77, 0x3a, 0xf0, 0x27, 0xde, 0xc0, + 0x49, 0x46, 0xf6, 0x42, 0xf9, 0x0c, 0xe6, 0x60, 0x40, 0x16, 0x40, 0xef, 0x13, 0x62, 0x40, 0x2b, + 0x2b, 0x9c, 0x32, 0xcb, 0xb8, 0xca, 0xdf, 0x10, 0x7f, 0x10, 0x06, 0x16, 0xc2, 0xfa, 0xba, 0xa2, + 0xc4, 0x25, 0x91, 0x10, 0xa0, 0x69, 0x79, 0xf2, 0x47, 0x9f, 0x90, 0x2c, 0xf8, 0x67, 0xa6, 0x2a, + 0x7a, 0x87, 0x6c, 0x71, 0xad, 0x3d, 0x35, 0xd4, 0xda, 0xe4, 0x5a, 0x4f, 0x4b, 0x7a, 0x44, 0xfa, + 0x8d, 0x2a, 0xdb, 0x1a, 0x7c, 0xe6, 0x01, 0x66, 0x7a, 0x01, 0x98, 0x96, 0xf4, 0x1e, 0x21, 0x0b, + 0x30, 0x57, 0x97, 0xe8, 0x47, 0x24, 0xa4, 0x3b, 0x83, 0x8a, 0x32, 0xee, 0xab, 0x1f, 0x91, 0x69, + 0x49, 0x87, 0x64, 0x4d, 0x68, 0x3c, 0xfc, 0x7e, 0xb6, 0x26, 0x34, 0xfd, 0x96, 0x10, 0xeb, 0xb8, + 0x71, 0x39, 0xda, 0x65, 0xeb, 0x46, 0xbb, 0xf4, 0x91, 0xed, 0x63, 0xfa, 0x35, 0xe9, 0x81, 0x2c, + 0x83, 0x70, 0xfb, 0x46, 0xe1, 0x36, 0xc8, 0x12, 0x65, 0x8f, 0xc9, 0x76, 0xcd, 0x1d, 0xc8, 0x62, + 0xc9, 0x7a, 0xa8, 0x3a, 0xf8, 0x97, 0xea, 0xc7, 0xe8, 0xfc, 0xac, 0x63, 0xfa, 0x83, 0x6d, 0xa0, + 0xe2, 0x79, 0xb1, 0x2c, 0x6a, 0xb0, 0xac, 0x8f, 0x77, 0x49, 0x3c, 0xf4, 0x03, 0x22, 0xf4, 0x2e, + 0xd9, 0x6a, 0xc0, 0x5d, 0xaa, 0x92, 0x11, 0xdc, 0x5b, 0x8c, 0xe8, 0x21, 0xe9, 0x19, 0x08, 0xbe, + 0x61, 0x83, 0x70, 0x92, 0x5d, 0x4c, 0x3f, 0x26, 0x3b, 0x97, 0xce, 0xe9, 0x3c, 0x1e, 0x1e, 0xdb, + 0xc1, 0xfc, 0xc0, 0x63, 0xbf, 0x04, 0xc8, 0x97, 0xb5, 0x8e, 0xbb, 0xd6, 0xb2, 0xdd, 0x51, 0x32, + 0xde, 0xcc, 0x62, 0xe4, 0x0d, 0x64, 0xc0, 0x6a, 0x25, 0x2d, 0xe4, 0x56, 0xbc, 0x03, 0x36, 0xc4, + 0x8e, 0x76, 0x3a, 0xf0, 0x42, 0xbc, 0x83, 0xb0, 0xf6, 0x1c, 0x8c, 0x01, 0xc3, 0xf6, 0xba, 0xb5, + 0x43, 0xec, 0xaf, 0xa9, 0xb5, 0x60, 0x72, 0x5e, 0x81, 0x74, 0x6c, 0x3f, 0x5c, 0x93, 0x47, 0xbe, + 0xf7, 0x80, 0x97, 0x4a, 0x51, 0xbc, 0x42, 0x6f, 0x8e, 0x83, 0xb4, 0x8b, 0xe9, 0x09, 0xd9, 0x6d, + 0x4d, 0x9d, 0x37, 0x5c, 0xe7, 0x20, 0x9d, 0x59, 0xb2, 0x5b, 0xa1, 0xef, 0xd6, 0xd4, 0xcf, 0xb8, + 0x7e, 0xea, 0x21, 0x6f, 0xfa, 0x4b, 0x65, 0x1d, 0xbb, 0x1d, 0x1e, 0x82, 0xff, 0xf6, 0x58, 0xe1, + 0xb1, 0x3b, 0xa3, 0x64, 0x9c, 0x64, 0xf8, 0x4d, 0x4f, 0xc9, 0x9e, 0xe3, 0xf6, 0x55, 0xfe, 0xba, + 0x85, 0x16, 0xc2, 0x53, 0xb8, 0x8b, 0x92, 0x5d, 0x0f, 0xff, 0xec, 0x51, 0xff, 0x16, 0xbc, 0x23, + 0x91, 0x87, 0x8c, 0x0f, 0x43, 0x43, 0x1e, 0xc0, 0x64, 0x4a, 0x3e, 0x78, 0xc3, 0x6d, 0x5e, 0x2b, + 0x5e, 0x0a, 0x59, 0xe5, 0xd1, 0x6c, 0x8c, 0x8d, 0x92, 0x71, 0x2f, 0xbb, 0xf5, 0x86, 0xdb, 0x59, + 0xc8, 0x44, 0xe3, 0xd3, 0x27, 0x64, 0x47, 0x83, 0x44, 0x2e, 0x9a, 0xe7, 0xe0, 0x26, 0x1b, 0x0c, + 0x22, 0x1d, 0xfd, 0xf3, 0x80, 0x0c, 0x85, 0xb4, 0x8e, 0xcb, 0x02, 0x72, 0x21, 0x4b, 0x78, 0xcb, + 0x0e, 0xf1, 0x6a, 0x76, 0x3b, 0x74, 0xea, 0x41, 0x7f, 0x82, 0x73, 0x21, 0x85, 0xbd, 0x84, 0x92, + 0x1d, 0x61, 0x27, 0xab, 0x98, 0xde, 0x26, 0x9b, 0x73, 0x61, 0xac, 0x63, 0x5f, 0x60, 0x22, 0x04, + 0xde, 0x63, 0xef, 0x0b, 0x97, 0xec, 0xa3, 0xf0, 0x78, 0x57, 0x55, 0x4b, 0xfa, 0x4d, 0x9c, 0x24, + 0xf7, 0x46, 0xeb, 0xe3, 0xc1, 0xd9, 0xa7, 0xd7, 0x8e, 0xae, 0x38, 0x88, 0xe3, 0xbc, 0x79, 0x48, + 0xa8, 0x7f, 0xe7, 0x81, 0x96, 0x1b, 0xa8, 0x81, 0x5b, 0x60, 0xa7, 0xb8, 0xc2, 0x3e, 0xd7, 0xfa, + 0x29, 0x26, 0xb2, 0x80, 0xd3, 0x03, 0xd2, 0x73, 0x86, 0x87, 0x2e, 0x3e, 0x43, 0xce, 0x36, 0xc6, + 0xd3, 0x92, 0xfe, 0x4a, 0xf6, 0xe3, 0x20, 0x35, 0xdd, 0x50, 0x62, 0x9f, 0x63, 0x3b, 0x0f, 0xff, + 0xc7, 0x24, 0x5d, 0x0d, 0xb2, 0x2c, 0x8e, 0xe3, 0x15, 0x70, 0xfe, 0x96, 0x1c, 0x17, 0xaa, 0xb9, + 0xae, 0xc6, 0xf9, 0xde, 0xfb, 0xc1, 0xf5, 0xdc, 0x5f, 0xd1, 0xf3, 0xe4, 0xb7, 0xf3, 0xc8, 0xaf, + 0x54, 0xcd, 0x65, 0x95, 0x2a, 0x53, 0x4d, 0x2a, 0x90, 0x78, 0x81, 0x93, 0x90, 0xe2, 0x5a, 0xd8, + 0xff, 0xfc, 0x8d, 0x7e, 0x17, 0x3f, 0x5f, 0x6e, 0x21, 0xfd, 0xf1, 0x5f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x05, 0xf7, 0x68, 0xa8, 0x74, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/app_yaml.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/app_yaml.pb.go new file mode 100644 index 000000000..af25606a9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/app_yaml.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/app_yaml.proto + +/* +Package appengine is a generated protocol buffer package. + +It is generated from these files: + google/appengine/v1/app_yaml.proto + google/appengine/v1/appengine.proto + google/appengine/v1/application.proto + google/appengine/v1/audit_data.proto + google/appengine/v1/deploy.proto + google/appengine/v1/instance.proto + google/appengine/v1/location.proto + google/appengine/v1/operation.proto + google/appengine/v1/service.proto + google/appengine/v1/version.proto + +It has these top-level messages: + ApiConfigHandler + ErrorHandler + UrlMap + StaticFilesHandler + ScriptHandler + ApiEndpointHandler + HealthCheck + Library + GetApplicationRequest + RepairApplicationRequest + ListServicesRequest + ListServicesResponse + GetServiceRequest + UpdateServiceRequest + DeleteServiceRequest + ListVersionsRequest + ListVersionsResponse + GetVersionRequest + CreateVersionRequest + UpdateVersionRequest + DeleteVersionRequest + ListInstancesRequest + ListInstancesResponse + GetInstanceRequest + DeleteInstanceRequest + DebugInstanceRequest + Application + UrlDispatchRule + AuditData + UpdateServiceMethod + CreateVersionMethod + Deployment + FileInfo + ContainerInfo + ZipInfo + Instance + LocationMetadata + OperationMetadataV1 + Service + TrafficSplit + Version + AutomaticScaling + BasicScaling + ManualScaling + CpuUtilization + RequestUtilization + DiskUtilization + NetworkUtilization + Network + Resources +*/ +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Actions to take when the user is not logged in. +type AuthFailAction int32 + +const ( + // Not specified. `AUTH_FAIL_ACTION_REDIRECT` is assumed. + AuthFailAction_AUTH_FAIL_ACTION_UNSPECIFIED AuthFailAction = 0 + // Redirects user to "accounts.google.com". The user is redirected back to the + // application URL after signing in or creating an account. + AuthFailAction_AUTH_FAIL_ACTION_REDIRECT AuthFailAction = 1 + // Rejects request with a `401` HTTP status code and an error + // message. + AuthFailAction_AUTH_FAIL_ACTION_UNAUTHORIZED AuthFailAction = 2 +) + +var AuthFailAction_name = map[int32]string{ + 0: "AUTH_FAIL_ACTION_UNSPECIFIED", + 1: "AUTH_FAIL_ACTION_REDIRECT", + 2: "AUTH_FAIL_ACTION_UNAUTHORIZED", +} +var AuthFailAction_value = map[string]int32{ + "AUTH_FAIL_ACTION_UNSPECIFIED": 0, + "AUTH_FAIL_ACTION_REDIRECT": 1, + "AUTH_FAIL_ACTION_UNAUTHORIZED": 2, +} + +func (x AuthFailAction) String() string { + return proto.EnumName(AuthFailAction_name, int32(x)) +} +func (AuthFailAction) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Methods to restrict access to a URL based on login status. +type LoginRequirement int32 + +const ( + // Not specified. `LOGIN_OPTIONAL` is assumed. + LoginRequirement_LOGIN_UNSPECIFIED LoginRequirement = 0 + // Does not require that the user is signed in. + LoginRequirement_LOGIN_OPTIONAL LoginRequirement = 1 + // If the user is not signed in, the `auth_fail_action` is taken. + // In addition, if the user is not an administrator for the + // application, they are given an error message regardless of + // `auth_fail_action`. If the user is an administrator, the handler + // proceeds. + LoginRequirement_LOGIN_ADMIN LoginRequirement = 2 + // If the user has signed in, the handler proceeds normally. Otherwise, the + // auth_fail_action is taken. + LoginRequirement_LOGIN_REQUIRED LoginRequirement = 3 +) + +var LoginRequirement_name = map[int32]string{ + 0: "LOGIN_UNSPECIFIED", + 1: "LOGIN_OPTIONAL", + 2: "LOGIN_ADMIN", + 3: "LOGIN_REQUIRED", +} +var LoginRequirement_value = map[string]int32{ + "LOGIN_UNSPECIFIED": 0, + "LOGIN_OPTIONAL": 1, + "LOGIN_ADMIN": 2, + "LOGIN_REQUIRED": 3, +} + +func (x LoginRequirement) String() string { + return proto.EnumName(LoginRequirement_name, int32(x)) +} +func (LoginRequirement) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Methods to enforce security (HTTPS) on a URL. +type SecurityLevel int32 + +const ( + // Not specified. + SecurityLevel_SECURE_UNSPECIFIED SecurityLevel = 0 + // Both HTTP and HTTPS requests with URLs that match the handler succeed + // without redirects. The application can examine the request to determine + // which protocol was used, and respond accordingly. + SecurityLevel_SECURE_DEFAULT SecurityLevel = 0 + // Requests for a URL that match this handler that use HTTPS are automatically + // redirected to the HTTP equivalent URL. + SecurityLevel_SECURE_NEVER SecurityLevel = 1 + // Both HTTP and HTTPS requests with URLs that match the handler succeed + // without redirects. The application can examine the request to determine + // which protocol was used and respond accordingly. + SecurityLevel_SECURE_OPTIONAL SecurityLevel = 2 + // Requests for a URL that match this handler that do not use HTTPS are + // automatically redirected to the HTTPS URL with the same path. Query + // parameters are reserved for the redirect. + SecurityLevel_SECURE_ALWAYS SecurityLevel = 3 +) + +var SecurityLevel_name = map[int32]string{ + 0: "SECURE_UNSPECIFIED", + // Duplicate value: 0: "SECURE_DEFAULT", + 1: "SECURE_NEVER", + 2: "SECURE_OPTIONAL", + 3: "SECURE_ALWAYS", +} +var SecurityLevel_value = map[string]int32{ + "SECURE_UNSPECIFIED": 0, + "SECURE_DEFAULT": 0, + "SECURE_NEVER": 1, + "SECURE_OPTIONAL": 2, + "SECURE_ALWAYS": 3, +} + +func (x SecurityLevel) String() string { + return proto.EnumName(SecurityLevel_name, int32(x)) +} +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Error codes. +type ErrorHandler_ErrorCode int32 + +const ( + // Not specified. ERROR_CODE_DEFAULT is assumed. + ErrorHandler_ERROR_CODE_UNSPECIFIED ErrorHandler_ErrorCode = 0 + // All other error types. + ErrorHandler_ERROR_CODE_DEFAULT ErrorHandler_ErrorCode = 0 + // Application has exceeded a resource quota. + ErrorHandler_ERROR_CODE_OVER_QUOTA ErrorHandler_ErrorCode = 1 + // Client blocked by the application's Denial of Service protection + // configuration. + ErrorHandler_ERROR_CODE_DOS_API_DENIAL ErrorHandler_ErrorCode = 2 + // Deadline reached before the application responds. + ErrorHandler_ERROR_CODE_TIMEOUT ErrorHandler_ErrorCode = 3 +) + +var ErrorHandler_ErrorCode_name = map[int32]string{ + 0: "ERROR_CODE_UNSPECIFIED", + // Duplicate value: 0: "ERROR_CODE_DEFAULT", + 1: "ERROR_CODE_OVER_QUOTA", + 2: "ERROR_CODE_DOS_API_DENIAL", + 3: "ERROR_CODE_TIMEOUT", +} +var ErrorHandler_ErrorCode_value = map[string]int32{ + "ERROR_CODE_UNSPECIFIED": 0, + "ERROR_CODE_DEFAULT": 0, + "ERROR_CODE_OVER_QUOTA": 1, + "ERROR_CODE_DOS_API_DENIAL": 2, + "ERROR_CODE_TIMEOUT": 3, +} + +func (x ErrorHandler_ErrorCode) String() string { + return proto.EnumName(ErrorHandler_ErrorCode_name, int32(x)) +} +func (ErrorHandler_ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// Redirect codes. +type UrlMap_RedirectHttpResponseCode int32 + +const ( + // Not specified. `302` is assumed. + UrlMap_REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED UrlMap_RedirectHttpResponseCode = 0 + // `301 Moved Permanently` code. + UrlMap_REDIRECT_HTTP_RESPONSE_CODE_301 UrlMap_RedirectHttpResponseCode = 1 + // `302 Moved Temporarily` code. + UrlMap_REDIRECT_HTTP_RESPONSE_CODE_302 UrlMap_RedirectHttpResponseCode = 2 + // `303 See Other` code. + UrlMap_REDIRECT_HTTP_RESPONSE_CODE_303 UrlMap_RedirectHttpResponseCode = 3 + // `307 Temporary Redirect` code. + UrlMap_REDIRECT_HTTP_RESPONSE_CODE_307 UrlMap_RedirectHttpResponseCode = 4 +) + +var UrlMap_RedirectHttpResponseCode_name = map[int32]string{ + 0: "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED", + 1: "REDIRECT_HTTP_RESPONSE_CODE_301", + 2: "REDIRECT_HTTP_RESPONSE_CODE_302", + 3: "REDIRECT_HTTP_RESPONSE_CODE_303", + 4: "REDIRECT_HTTP_RESPONSE_CODE_307", +} +var UrlMap_RedirectHttpResponseCode_value = map[string]int32{ + "REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED": 0, + "REDIRECT_HTTP_RESPONSE_CODE_301": 1, + "REDIRECT_HTTP_RESPONSE_CODE_302": 2, + "REDIRECT_HTTP_RESPONSE_CODE_303": 3, + "REDIRECT_HTTP_RESPONSE_CODE_307": 4, +} + +func (x UrlMap_RedirectHttpResponseCode) String() string { + return proto.EnumName(UrlMap_RedirectHttpResponseCode_name, int32(x)) +} +func (UrlMap_RedirectHttpResponseCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +// [Google Cloud Endpoints](https://cloud.google.com/appengine/docs/python/endpoints/) +// configuration for API handlers. +type ApiConfigHandler struct { + // Action to take when users access resources that require + // authentication. Defaults to `redirect`. + AuthFailAction AuthFailAction `protobuf:"varint,1,opt,name=auth_fail_action,json=authFailAction,enum=google.appengine.v1.AuthFailAction" json:"auth_fail_action,omitempty"` + // Level of login required to access this resource. Defaults to + // `optional`. + Login LoginRequirement `protobuf:"varint,2,opt,name=login,enum=google.appengine.v1.LoginRequirement" json:"login,omitempty"` + // Path to the script from the application root directory. + Script string `protobuf:"bytes,3,opt,name=script" json:"script,omitempty"` + // Security (HTTPS) enforcement for this URL. + SecurityLevel SecurityLevel `protobuf:"varint,4,opt,name=security_level,json=securityLevel,enum=google.appengine.v1.SecurityLevel" json:"security_level,omitempty"` + // URL to serve the endpoint at. + Url string `protobuf:"bytes,5,opt,name=url" json:"url,omitempty"` +} + +func (m *ApiConfigHandler) Reset() { *m = ApiConfigHandler{} } +func (m *ApiConfigHandler) String() string { return proto.CompactTextString(m) } +func (*ApiConfigHandler) ProtoMessage() {} +func (*ApiConfigHandler) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ApiConfigHandler) GetAuthFailAction() AuthFailAction { + if m != nil { + return m.AuthFailAction + } + return AuthFailAction_AUTH_FAIL_ACTION_UNSPECIFIED +} + +func (m *ApiConfigHandler) GetLogin() LoginRequirement { + if m != nil { + return m.Login + } + return LoginRequirement_LOGIN_UNSPECIFIED +} + +func (m *ApiConfigHandler) GetScript() string { + if m != nil { + return m.Script + } + return "" +} + +func (m *ApiConfigHandler) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURE_UNSPECIFIED +} + +func (m *ApiConfigHandler) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// Custom static error page to be served when an error occurs. +type ErrorHandler struct { + // Error condition this handler applies to. + ErrorCode ErrorHandler_ErrorCode `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=google.appengine.v1.ErrorHandler_ErrorCode" json:"error_code,omitempty"` + // Static file content to be served for this error. + StaticFile string `protobuf:"bytes,2,opt,name=static_file,json=staticFile" json:"static_file,omitempty"` + // MIME type of file. Defaults to `text/html`. + MimeType string `protobuf:"bytes,3,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` +} + +func (m *ErrorHandler) Reset() { *m = ErrorHandler{} } +func (m *ErrorHandler) String() string { return proto.CompactTextString(m) } +func (*ErrorHandler) ProtoMessage() {} +func (*ErrorHandler) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ErrorHandler) GetErrorCode() ErrorHandler_ErrorCode { + if m != nil { + return m.ErrorCode + } + return ErrorHandler_ERROR_CODE_UNSPECIFIED +} + +func (m *ErrorHandler) GetStaticFile() string { + if m != nil { + return m.StaticFile + } + return "" +} + +func (m *ErrorHandler) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// URL pattern and description of how the URL should be handled. App Engine can +// handle URLs by executing application code or by serving static files +// uploaded with the version, such as images, CSS, or JavaScript. +type UrlMap struct { + // URL prefix. Uses regular expression syntax, which means regexp + // special characters must be escaped, but should not contain groupings. + // All URLs that begin with this prefix are handled by this handler, using the + // portion of the URL after the prefix as part of the file path. + UrlRegex string `protobuf:"bytes,1,opt,name=url_regex,json=urlRegex" json:"url_regex,omitempty"` + // Type of handler for this URL pattern. + // + // Types that are valid to be assigned to HandlerType: + // *UrlMap_StaticFiles + // *UrlMap_Script + // *UrlMap_ApiEndpoint + HandlerType isUrlMap_HandlerType `protobuf_oneof:"handler_type"` + // Security (HTTPS) enforcement for this URL. + SecurityLevel SecurityLevel `protobuf:"varint,5,opt,name=security_level,json=securityLevel,enum=google.appengine.v1.SecurityLevel" json:"security_level,omitempty"` + // Level of login required to access this resource. + Login LoginRequirement `protobuf:"varint,6,opt,name=login,enum=google.appengine.v1.LoginRequirement" json:"login,omitempty"` + // Action to take when users access resources that require + // authentication. Defaults to `redirect`. + AuthFailAction AuthFailAction `protobuf:"varint,7,opt,name=auth_fail_action,json=authFailAction,enum=google.appengine.v1.AuthFailAction" json:"auth_fail_action,omitempty"` + // `30x` code to use when performing redirects for the `secure` field. + // Defaults to `302`. + RedirectHttpResponseCode UrlMap_RedirectHttpResponseCode `protobuf:"varint,8,opt,name=redirect_http_response_code,json=redirectHttpResponseCode,enum=google.appengine.v1.UrlMap_RedirectHttpResponseCode" json:"redirect_http_response_code,omitempty"` +} + +func (m *UrlMap) Reset() { *m = UrlMap{} } +func (m *UrlMap) String() string { return proto.CompactTextString(m) } +func (*UrlMap) ProtoMessage() {} +func (*UrlMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isUrlMap_HandlerType interface { + isUrlMap_HandlerType() +} + +type UrlMap_StaticFiles struct { + StaticFiles *StaticFilesHandler `protobuf:"bytes,2,opt,name=static_files,json=staticFiles,oneof"` +} +type UrlMap_Script struct { + Script *ScriptHandler `protobuf:"bytes,3,opt,name=script,oneof"` +} +type UrlMap_ApiEndpoint struct { + ApiEndpoint *ApiEndpointHandler `protobuf:"bytes,4,opt,name=api_endpoint,json=apiEndpoint,oneof"` +} + +func (*UrlMap_StaticFiles) isUrlMap_HandlerType() {} +func (*UrlMap_Script) isUrlMap_HandlerType() {} +func (*UrlMap_ApiEndpoint) isUrlMap_HandlerType() {} + +func (m *UrlMap) GetHandlerType() isUrlMap_HandlerType { + if m != nil { + return m.HandlerType + } + return nil +} + +func (m *UrlMap) GetUrlRegex() string { + if m != nil { + return m.UrlRegex + } + return "" +} + +func (m *UrlMap) GetStaticFiles() *StaticFilesHandler { + if x, ok := m.GetHandlerType().(*UrlMap_StaticFiles); ok { + return x.StaticFiles + } + return nil +} + +func (m *UrlMap) GetScript() *ScriptHandler { + if x, ok := m.GetHandlerType().(*UrlMap_Script); ok { + return x.Script + } + return nil +} + +func (m *UrlMap) GetApiEndpoint() *ApiEndpointHandler { + if x, ok := m.GetHandlerType().(*UrlMap_ApiEndpoint); ok { + return x.ApiEndpoint + } + return nil +} + +func (m *UrlMap) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURE_UNSPECIFIED +} + +func (m *UrlMap) GetLogin() LoginRequirement { + if m != nil { + return m.Login + } + return LoginRequirement_LOGIN_UNSPECIFIED +} + +func (m *UrlMap) GetAuthFailAction() AuthFailAction { + if m != nil { + return m.AuthFailAction + } + return AuthFailAction_AUTH_FAIL_ACTION_UNSPECIFIED +} + +func (m *UrlMap) GetRedirectHttpResponseCode() UrlMap_RedirectHttpResponseCode { + if m != nil { + return m.RedirectHttpResponseCode + } + return UrlMap_REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UrlMap) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UrlMap_OneofMarshaler, _UrlMap_OneofUnmarshaler, _UrlMap_OneofSizer, []interface{}{ + (*UrlMap_StaticFiles)(nil), + (*UrlMap_Script)(nil), + (*UrlMap_ApiEndpoint)(nil), + } +} + +func _UrlMap_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UrlMap) + // handler_type + switch x := m.HandlerType.(type) { + case *UrlMap_StaticFiles: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StaticFiles); err != nil { + return err + } + case *UrlMap_Script: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Script); err != nil { + return err + } + case *UrlMap_ApiEndpoint: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiEndpoint); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UrlMap.HandlerType has unexpected type %T", x) + } + return nil +} + +func _UrlMap_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UrlMap) + switch tag { + case 2: // handler_type.static_files + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StaticFilesHandler) + err := b.DecodeMessage(msg) + m.HandlerType = &UrlMap_StaticFiles{msg} + return true, err + case 3: // handler_type.script + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ScriptHandler) + err := b.DecodeMessage(msg) + m.HandlerType = &UrlMap_Script{msg} + return true, err + case 4: // handler_type.api_endpoint + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiEndpointHandler) + err := b.DecodeMessage(msg) + m.HandlerType = &UrlMap_ApiEndpoint{msg} + return true, err + default: + return false, nil + } +} + +func _UrlMap_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UrlMap) + // handler_type + switch x := m.HandlerType.(type) { + case *UrlMap_StaticFiles: + s := proto.Size(x.StaticFiles) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *UrlMap_Script: + s := proto.Size(x.Script) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *UrlMap_ApiEndpoint: + s := proto.Size(x.ApiEndpoint) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Files served directly to the user for a given URL, such as images, CSS +// stylesheets, or JavaScript source files. Static file handlers describe which +// files in the application directory are static files, and which URLs serve +// them. +type StaticFilesHandler struct { + // Path to the static files matched by the URL pattern, from the + // application root directory. The path can refer to text matched in groupings + // in the URL pattern. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // Regular expression that matches the file paths for all files that should be + // referenced by this handler. + UploadPathRegex string `protobuf:"bytes,2,opt,name=upload_path_regex,json=uploadPathRegex" json:"upload_path_regex,omitempty"` + // HTTP headers to use for all responses from these URLs. + HttpHeaders map[string]string `protobuf:"bytes,3,rep,name=http_headers,json=httpHeaders" json:"http_headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // MIME type used to serve all files served by this handler. + // + // Defaults to file-specific MIME types, which are derived from each file's + // filename extension. + MimeType string `protobuf:"bytes,4,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` + // Time a static file served by this handler should be cached + // by web proxies and browsers. + Expiration *google_protobuf1.Duration `protobuf:"bytes,5,opt,name=expiration" json:"expiration,omitempty"` + // Whether this handler should match the request if the file + // referenced by the handler does not exist. + RequireMatchingFile bool `protobuf:"varint,6,opt,name=require_matching_file,json=requireMatchingFile" json:"require_matching_file,omitempty"` + // Whether files should also be uploaded as code data. By default, files + // declared in static file handlers are uploaded as static + // data and are only served to end users; they cannot be read by the + // application. If enabled, uploads are charged against both your code and + // static data storage resource quotas. + ApplicationReadable bool `protobuf:"varint,7,opt,name=application_readable,json=applicationReadable" json:"application_readable,omitempty"` +} + +func (m *StaticFilesHandler) Reset() { *m = StaticFilesHandler{} } +func (m *StaticFilesHandler) String() string { return proto.CompactTextString(m) } +func (*StaticFilesHandler) ProtoMessage() {} +func (*StaticFilesHandler) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *StaticFilesHandler) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *StaticFilesHandler) GetUploadPathRegex() string { + if m != nil { + return m.UploadPathRegex + } + return "" +} + +func (m *StaticFilesHandler) GetHttpHeaders() map[string]string { + if m != nil { + return m.HttpHeaders + } + return nil +} + +func (m *StaticFilesHandler) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +func (m *StaticFilesHandler) GetExpiration() *google_protobuf1.Duration { + if m != nil { + return m.Expiration + } + return nil +} + +func (m *StaticFilesHandler) GetRequireMatchingFile() bool { + if m != nil { + return m.RequireMatchingFile + } + return false +} + +func (m *StaticFilesHandler) GetApplicationReadable() bool { + if m != nil { + return m.ApplicationReadable + } + return false +} + +// Executes a script to handle the request that matches the URL pattern. +type ScriptHandler struct { + // Path to the script from the application root directory. + ScriptPath string `protobuf:"bytes,1,opt,name=script_path,json=scriptPath" json:"script_path,omitempty"` +} + +func (m *ScriptHandler) Reset() { *m = ScriptHandler{} } +func (m *ScriptHandler) String() string { return proto.CompactTextString(m) } +func (*ScriptHandler) ProtoMessage() {} +func (*ScriptHandler) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ScriptHandler) GetScriptPath() string { + if m != nil { + return m.ScriptPath + } + return "" +} + +// Uses Google Cloud Endpoints to handle requests. +type ApiEndpointHandler struct { + // Path to the script from the application root directory. + ScriptPath string `protobuf:"bytes,1,opt,name=script_path,json=scriptPath" json:"script_path,omitempty"` +} + +func (m *ApiEndpointHandler) Reset() { *m = ApiEndpointHandler{} } +func (m *ApiEndpointHandler) String() string { return proto.CompactTextString(m) } +func (*ApiEndpointHandler) ProtoMessage() {} +func (*ApiEndpointHandler) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ApiEndpointHandler) GetScriptPath() string { + if m != nil { + return m.ScriptPath + } + return "" +} + +// Health checking configuration for VM instances. Unhealthy instances +// are killed and replaced with new instances. Only applicable for +// instances in App Engine flexible environment. +type HealthCheck struct { + // Whether to explicitly disable health checks for this instance. + DisableHealthCheck bool `protobuf:"varint,1,opt,name=disable_health_check,json=disableHealthCheck" json:"disable_health_check,omitempty"` + // Host header to send when performing an HTTP health check. + // Example: "myapp.appspot.com" + Host string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"` + // Number of consecutive successful health checks required before receiving + // traffic. + HealthyThreshold uint32 `protobuf:"varint,3,opt,name=healthy_threshold,json=healthyThreshold" json:"healthy_threshold,omitempty"` + // Number of consecutive failed health checks required before removing + // traffic. + UnhealthyThreshold uint32 `protobuf:"varint,4,opt,name=unhealthy_threshold,json=unhealthyThreshold" json:"unhealthy_threshold,omitempty"` + // Number of consecutive failed health checks required before an instance is + // restarted. + RestartThreshold uint32 `protobuf:"varint,5,opt,name=restart_threshold,json=restartThreshold" json:"restart_threshold,omitempty"` + // Interval between health checks. + CheckInterval *google_protobuf1.Duration `protobuf:"bytes,6,opt,name=check_interval,json=checkInterval" json:"check_interval,omitempty"` + // Time before the health check is considered failed. + Timeout *google_protobuf1.Duration `protobuf:"bytes,7,opt,name=timeout" json:"timeout,omitempty"` +} + +func (m *HealthCheck) Reset() { *m = HealthCheck{} } +func (m *HealthCheck) String() string { return proto.CompactTextString(m) } +func (*HealthCheck) ProtoMessage() {} +func (*HealthCheck) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *HealthCheck) GetDisableHealthCheck() bool { + if m != nil { + return m.DisableHealthCheck + } + return false +} + +func (m *HealthCheck) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *HealthCheck) GetHealthyThreshold() uint32 { + if m != nil { + return m.HealthyThreshold + } + return 0 +} + +func (m *HealthCheck) GetUnhealthyThreshold() uint32 { + if m != nil { + return m.UnhealthyThreshold + } + return 0 +} + +func (m *HealthCheck) GetRestartThreshold() uint32 { + if m != nil { + return m.RestartThreshold + } + return 0 +} + +func (m *HealthCheck) GetCheckInterval() *google_protobuf1.Duration { + if m != nil { + return m.CheckInterval + } + return nil +} + +func (m *HealthCheck) GetTimeout() *google_protobuf1.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +// Third-party Python runtime library that is required by the application. +type Library struct { + // Name of the library. Example: "django". + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Version of the library to select, or "latest". + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *Library) Reset() { *m = Library{} } +func (m *Library) String() string { return proto.CompactTextString(m) } +func (*Library) ProtoMessage() {} +func (*Library) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Library) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Library) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func init() { + proto.RegisterType((*ApiConfigHandler)(nil), "google.appengine.v1.ApiConfigHandler") + proto.RegisterType((*ErrorHandler)(nil), "google.appengine.v1.ErrorHandler") + proto.RegisterType((*UrlMap)(nil), "google.appengine.v1.UrlMap") + proto.RegisterType((*StaticFilesHandler)(nil), "google.appengine.v1.StaticFilesHandler") + proto.RegisterType((*ScriptHandler)(nil), "google.appengine.v1.ScriptHandler") + proto.RegisterType((*ApiEndpointHandler)(nil), "google.appengine.v1.ApiEndpointHandler") + proto.RegisterType((*HealthCheck)(nil), "google.appengine.v1.HealthCheck") + proto.RegisterType((*Library)(nil), "google.appengine.v1.Library") + proto.RegisterEnum("google.appengine.v1.AuthFailAction", AuthFailAction_name, AuthFailAction_value) + proto.RegisterEnum("google.appengine.v1.LoginRequirement", LoginRequirement_name, LoginRequirement_value) + proto.RegisterEnum("google.appengine.v1.SecurityLevel", SecurityLevel_name, SecurityLevel_value) + proto.RegisterEnum("google.appengine.v1.ErrorHandler_ErrorCode", ErrorHandler_ErrorCode_name, ErrorHandler_ErrorCode_value) + proto.RegisterEnum("google.appengine.v1.UrlMap_RedirectHttpResponseCode", UrlMap_RedirectHttpResponseCode_name, UrlMap_RedirectHttpResponseCode_value) +} + +func init() { proto.RegisterFile("google/appengine/v1/app_yaml.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1232 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x6e, 0x13, 0x47, + 0x14, 0xc6, 0x76, 0x7e, 0x8f, 0x1d, 0xb3, 0x99, 0x00, 0x75, 0x02, 0x94, 0xd4, 0xa8, 0x02, 0x25, + 0x92, 0x4d, 0x92, 0x56, 0xd0, 0x82, 0xaa, 0x2e, 0xf6, 0xa6, 0xde, 0xca, 0x89, 0xcd, 0xd8, 0xa6, + 0x82, 0x5e, 0x8c, 0x26, 0xf6, 0xc4, 0x3b, 0x62, 0xbd, 0xbb, 0x9d, 0x1d, 0x47, 0xf8, 0x39, 0xaa, + 0xbe, 0x07, 0xb7, 0x7d, 0x90, 0x5e, 0xf5, 0x65, 0xaa, 0x99, 0x1d, 0xff, 0x25, 0x0e, 0xa9, 0xb8, + 0x9b, 0x73, 0xce, 0xf7, 0x9d, 0x9d, 0xf3, 0x3b, 0x0b, 0xc5, 0x7e, 0x18, 0xf6, 0x7d, 0x56, 0xa6, + 0x51, 0xc4, 0x82, 0x3e, 0x0f, 0x58, 0xf9, 0xe2, 0x40, 0x09, 0x64, 0x44, 0x07, 0x7e, 0x29, 0x12, + 0xa1, 0x0c, 0xd1, 0x56, 0x82, 0x29, 0x4d, 0x30, 0xa5, 0x8b, 0x83, 0x9d, 0x07, 0x13, 0x22, 0x2f, + 0xd3, 0x20, 0x08, 0x25, 0x95, 0x3c, 0x0c, 0xe2, 0x84, 0xb2, 0xf3, 0xb5, 0xb1, 0x6a, 0xe9, 0x6c, + 0x78, 0x5e, 0xee, 0x0d, 0x85, 0x06, 0x24, 0xf6, 0xe2, 0x9f, 0x69, 0xb0, 0xec, 0x88, 0x57, 0xc2, + 0xe0, 0x9c, 0xf7, 0x6b, 0x34, 0xe8, 0xf9, 0x4c, 0xa0, 0x13, 0xb0, 0xe8, 0x50, 0x7a, 0xe4, 0x9c, + 0x72, 0x9f, 0xd0, 0xae, 0x82, 0x17, 0x52, 0xbb, 0xa9, 0xa7, 0xf9, 0xc3, 0xc7, 0xa5, 0x05, 0x57, + 0x28, 0xd9, 0x43, 0xe9, 0x1d, 0x53, 0xee, 0xdb, 0x1a, 0x8a, 0xf3, 0x74, 0x4e, 0x46, 0x2f, 0x61, + 0xd9, 0x0f, 0xfb, 0x3c, 0x28, 0xa4, 0xb5, 0x8f, 0x6f, 0x17, 0xfa, 0xa8, 0x2b, 0x04, 0x66, 0x7f, + 0x0c, 0xb9, 0x60, 0x03, 0x16, 0x48, 0x9c, 0x70, 0xd0, 0x3d, 0x58, 0x89, 0xbb, 0x82, 0x47, 0xb2, + 0x90, 0xd9, 0x4d, 0x3d, 0x5d, 0xc7, 0x46, 0x42, 0x2e, 0xe4, 0x63, 0xd6, 0x1d, 0x0a, 0x2e, 0x47, + 0xc4, 0x67, 0x17, 0xcc, 0x2f, 0x2c, 0x69, 0xef, 0xc5, 0x85, 0xde, 0x5b, 0x06, 0x5a, 0x57, 0x48, + 0xbc, 0x11, 0xcf, 0x8a, 0xc8, 0x82, 0xcc, 0x50, 0xf8, 0x85, 0x65, 0xed, 0x5f, 0x1d, 0x8b, 0x9f, + 0xd2, 0x90, 0x73, 0x84, 0x08, 0xc5, 0x38, 0x23, 0xbf, 0x02, 0x30, 0x25, 0x93, 0x6e, 0xd8, 0x63, + 0x26, 0x17, 0xfb, 0x0b, 0xbf, 0x34, 0x4b, 0x4b, 0x84, 0x4a, 0xd8, 0x63, 0x78, 0x9d, 0x8d, 0x8f, + 0xe8, 0x11, 0x64, 0x63, 0x55, 0xa4, 0x2e, 0x39, 0xe7, 0x3e, 0xd3, 0x49, 0x59, 0xc7, 0x90, 0xa8, + 0x8e, 0xb9, 0xcf, 0xd0, 0x7d, 0x58, 0x1f, 0xf0, 0x01, 0x23, 0x72, 0x14, 0x31, 0x13, 0xf5, 0x9a, + 0x52, 0xb4, 0x47, 0x11, 0x2b, 0xfe, 0x95, 0x82, 0xf5, 0x89, 0x5b, 0xb4, 0x03, 0xf7, 0x1c, 0x8c, + 0x1b, 0x98, 0x54, 0x1a, 0x55, 0x87, 0x74, 0x4e, 0x5b, 0x4d, 0xa7, 0xe2, 0x1e, 0xbb, 0x4e, 0xd5, + 0xba, 0x85, 0xee, 0x01, 0x9a, 0xb1, 0x55, 0x9d, 0x63, 0xbb, 0x53, 0x6f, 0x5b, 0xb7, 0xd0, 0x36, + 0xdc, 0x9d, 0xd1, 0x37, 0xde, 0x3a, 0x98, 0xbc, 0xe9, 0x34, 0xda, 0xb6, 0x95, 0x42, 0x0f, 0x61, + 0x7b, 0x96, 0xd2, 0x68, 0x11, 0xbb, 0xe9, 0x92, 0xaa, 0x73, 0xea, 0xda, 0x75, 0x2b, 0x7d, 0xc9, + 0x63, 0xdb, 0x3d, 0x71, 0x1a, 0x9d, 0xb6, 0x95, 0xd9, 0x49, 0x5b, 0xa9, 0xe2, 0xdf, 0x2b, 0xb0, + 0xd2, 0x11, 0xfe, 0x09, 0x8d, 0xd4, 0xfd, 0x87, 0xc2, 0x27, 0x82, 0xf5, 0xd9, 0x47, 0x9d, 0xab, + 0x75, 0xbc, 0x36, 0x14, 0x3e, 0x56, 0x32, 0xaa, 0x43, 0x6e, 0x26, 0xfa, 0x58, 0x87, 0x9f, 0x3d, + 0x7c, 0xb2, 0xb8, 0x6a, 0x93, 0x9c, 0xc4, 0x26, 0xa3, 0xb5, 0x5b, 0x38, 0x3b, 0xcd, 0x54, 0x8c, + 0x5e, 0xcd, 0x75, 0x47, 0xf6, 0xba, 0xea, 0x6b, 0xc8, 0xd4, 0xc5, 0xb8, 0x87, 0xea, 0x90, 0xa3, + 0x11, 0x27, 0x2c, 0xe8, 0x45, 0x21, 0x0f, 0xa4, 0xee, 0xa0, 0xeb, 0xee, 0x62, 0x47, 0xdc, 0x31, + 0xb8, 0x99, 0xbb, 0xd0, 0xa9, 0x76, 0x41, 0x47, 0x2e, 0x7f, 0x69, 0x47, 0x4e, 0x26, 0x66, 0xe5, + 0x0b, 0x26, 0x66, 0xd1, 0xf4, 0xae, 0x7e, 0xf9, 0xf4, 0xc6, 0x70, 0x5f, 0xb0, 0x1e, 0x17, 0xac, + 0x2b, 0x89, 0x27, 0x65, 0x44, 0x04, 0x8b, 0xa3, 0x30, 0x88, 0x59, 0x32, 0x0b, 0x6b, 0xda, 0xf3, + 0x77, 0x0b, 0x3d, 0x27, 0xfd, 0x50, 0xc2, 0x86, 0x5e, 0x93, 0x32, 0xc2, 0x86, 0xac, 0x87, 0xa2, + 0x20, 0xae, 0xb1, 0x14, 0xff, 0x4d, 0x41, 0xe1, 0x3a, 0x1a, 0xda, 0x87, 0x27, 0xd8, 0xa9, 0xba, + 0xd8, 0xa9, 0xb4, 0x49, 0xad, 0xdd, 0x6e, 0x12, 0xec, 0xb4, 0x9a, 0x8d, 0xd3, 0x96, 0xb3, 0x68, + 0x0a, 0x1e, 0xc3, 0xa3, 0xcf, 0x81, 0x8f, 0x9e, 0x1d, 0x58, 0xa9, 0x9b, 0x41, 0x87, 0x56, 0xfa, + 0x66, 0xd0, 0x91, 0x95, 0xb9, 0x19, 0xf4, 0xdc, 0x5a, 0x7a, 0x9d, 0x87, 0x9c, 0x97, 0xf4, 0x90, + 0x9e, 0xf1, 0xe2, 0xa7, 0x0c, 0xa0, 0xab, 0xbd, 0x8e, 0x10, 0x2c, 0x45, 0x54, 0x7a, 0x66, 0x84, + 0xf4, 0x19, 0xed, 0xc1, 0xe6, 0x30, 0xf2, 0x43, 0xda, 0x23, 0x4a, 0x34, 0x33, 0x96, 0xac, 0x90, + 0xdb, 0x89, 0xa1, 0x49, 0xa5, 0x97, 0x8c, 0xda, 0xef, 0x90, 0xd3, 0x05, 0xf3, 0x18, 0xed, 0x31, + 0x11, 0x17, 0x32, 0xbb, 0x99, 0xa7, 0xd9, 0xc3, 0x17, 0xff, 0x73, 0xd4, 0x4a, 0x2a, 0xef, 0xb5, + 0x84, 0xea, 0x04, 0x52, 0x8c, 0x70, 0xd6, 0x9b, 0x6a, 0xe6, 0x97, 0xd4, 0xd2, 0xfc, 0x92, 0x42, + 0x3f, 0x00, 0xb0, 0x8f, 0x11, 0x4f, 0x5e, 0x1a, 0x3d, 0x06, 0xd9, 0xc3, 0xed, 0xf1, 0x77, 0xc7, + 0x4f, 0x51, 0xa9, 0x6a, 0x9e, 0x22, 0x3c, 0x03, 0x46, 0x87, 0x70, 0x57, 0x24, 0x3d, 0x4d, 0x06, + 0x54, 0x76, 0x3d, 0x1e, 0xf4, 0x93, 0x3d, 0xa9, 0x46, 0x61, 0x0d, 0x6f, 0x19, 0xe3, 0x89, 0xb1, + 0xe9, 0x85, 0x79, 0x00, 0x77, 0x68, 0x14, 0xf9, 0xbc, 0xab, 0x5d, 0x10, 0xc1, 0x68, 0x8f, 0x9e, + 0xf9, 0x4c, 0x77, 0xfd, 0x1a, 0xde, 0x9a, 0xb1, 0x61, 0x63, 0xda, 0xf9, 0x09, 0xac, 0xcb, 0xf1, + 0xa9, 0x77, 0xe0, 0x03, 0x1b, 0x99, 0x74, 0xab, 0x23, 0xba, 0x03, 0xcb, 0x17, 0xd4, 0x1f, 0x8e, + 0x97, 0x74, 0x22, 0xfc, 0x98, 0x7e, 0x91, 0x2a, 0x3e, 0x83, 0x8d, 0xb9, 0xad, 0xa2, 0xb7, 0xba, + 0x56, 0x90, 0x99, 0x9a, 0x41, 0xa2, 0x52, 0x25, 0x29, 0x7e, 0x0f, 0xe8, 0xea, 0x0e, 0xb9, 0x99, + 0xf6, 0x4f, 0x1a, 0xb2, 0x35, 0x46, 0x7d, 0xe9, 0x55, 0x3c, 0xd6, 0xfd, 0x80, 0x9e, 0xc1, 0x9d, + 0x1e, 0x8f, 0x55, 0x0c, 0xaa, 0xae, 0xbe, 0xf4, 0x48, 0x57, 0xe9, 0x35, 0x73, 0x0d, 0x23, 0x63, + 0x9b, 0x65, 0x20, 0x58, 0xf2, 0xc2, 0x58, 0x9a, 0x18, 0xf4, 0x19, 0xed, 0xc3, 0x66, 0xc2, 0x1e, + 0x11, 0xe9, 0x09, 0x16, 0x7b, 0xa1, 0xdf, 0xd3, 0x2b, 0x74, 0x03, 0x5b, 0xc6, 0xd0, 0x1e, 0xeb, + 0x51, 0x19, 0xb6, 0x86, 0xc1, 0x55, 0xf8, 0x92, 0x86, 0xa3, 0x89, 0x69, 0x4a, 0xd8, 0x87, 0x4d, + 0xc1, 0x62, 0x49, 0x85, 0x9c, 0x81, 0x2f, 0x27, 0xde, 0x8d, 0x61, 0x0a, 0xfe, 0x19, 0xf2, 0x3a, + 0x02, 0xc2, 0x03, 0xc9, 0xc4, 0x05, 0xf5, 0x75, 0xa5, 0x3f, 0xdb, 0x2f, 0x1b, 0x9a, 0xe0, 0x1a, + 0x3c, 0x3a, 0x82, 0x55, 0xc9, 0x07, 0x2c, 0x1c, 0x4a, 0x5d, 0xf1, 0xcf, 0x52, 0xc7, 0xc8, 0xe2, + 0x73, 0x58, 0xad, 0xf3, 0x33, 0x41, 0xc5, 0x48, 0x25, 0x28, 0xa0, 0x03, 0x36, 0x9e, 0x33, 0x75, + 0x46, 0x05, 0x58, 0xbd, 0x60, 0x22, 0x56, 0xed, 0x9b, 0xe4, 0x6d, 0x2c, 0xee, 0x49, 0xc8, 0xcf, + 0x6f, 0x4c, 0xb4, 0x0b, 0x0f, 0xec, 0x4e, 0xbb, 0x46, 0x8e, 0x6d, 0xb7, 0x4e, 0xec, 0x4a, 0xdb, + 0x6d, 0x9c, 0x5e, 0x5a, 0x42, 0x0f, 0x61, 0xfb, 0x0a, 0x62, 0xbc, 0x26, 0xac, 0x14, 0xfa, 0x06, + 0x1e, 0x2e, 0x70, 0xa0, 0x54, 0x0d, 0xec, 0xbe, 0x77, 0xaa, 0x56, 0x7a, 0xef, 0x0c, 0xac, 0xcb, + 0xfb, 0x1e, 0xdd, 0x85, 0xcd, 0x7a, 0xe3, 0x17, 0xf7, 0xf2, 0xc7, 0x10, 0xe4, 0x13, 0x75, 0xa3, + 0xa9, 0x3c, 0xd9, 0x75, 0x2b, 0x85, 0x6e, 0x43, 0x36, 0xd1, 0xd9, 0xd5, 0x13, 0xf7, 0xd4, 0x4a, + 0x4f, 0x41, 0xd8, 0x79, 0xd3, 0x71, 0xb1, 0x53, 0xb5, 0x32, 0x7b, 0x23, 0xd8, 0x98, 0x7b, 0x95, + 0xd4, 0x7b, 0xdf, 0x72, 0x2a, 0x1d, 0xec, 0x5c, 0xfd, 0x82, 0xd1, 0x4f, 0xff, 0x2a, 0x2c, 0xc8, + 0x19, 0xdd, 0xa9, 0xf3, 0xd6, 0xc1, 0x56, 0x0a, 0x6d, 0xc1, 0x6d, 0xa3, 0x99, 0x5c, 0x24, 0x8d, + 0x36, 0x61, 0xc3, 0x28, 0xed, 0xfa, 0x6f, 0xf6, 0xbb, 0x56, 0xf2, 0xf7, 0xf0, 0xba, 0x0f, 0x5f, + 0x75, 0xc3, 0xc1, 0xa2, 0xcd, 0xf4, 0x3a, 0x67, 0x47, 0xd1, 0x3b, 0x3a, 0xf0, 0x9b, 0xaa, 0x96, + 0xcd, 0xd4, 0xfb, 0x57, 0x06, 0xd4, 0x0f, 0x7d, 0x1a, 0xf4, 0x4b, 0xa1, 0xe8, 0x97, 0xfb, 0x2c, + 0xd0, 0x95, 0x2e, 0x27, 0x26, 0x1a, 0xf1, 0x78, 0xee, 0x3f, 0xfa, 0xe5, 0x44, 0x38, 0x5b, 0xd1, + 0xc0, 0xa3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x96, 0x5d, 0x26, 0x6c, 0x6f, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/appengine.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/appengine.pb.go new file mode 100644 index 000000000..d4ce9eedf --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/appengine.pb.go @@ -0,0 +1,1359 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/appengine.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/iam/v1" +import _ "google.golang.org/genproto/googleapis/iam/v1" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Fields that should be returned when [Version][google.appengine.v1.Version] resources +// are retreived. +type VersionView int32 + +const ( + // Basic version information including scaling and inbound services, + // but not detailed deployment information. + VersionView_BASIC VersionView = 0 + // The information from `BASIC`, plus detailed information about the + // deployment. This format is required when creating resources, but + // is not returned in `Get` or `List` by default. + VersionView_FULL VersionView = 1 +) + +var VersionView_name = map[int32]string{ + 0: "BASIC", + 1: "FULL", +} +var VersionView_value = map[string]int32{ + "BASIC": 0, + "FULL": 1, +} + +func (x VersionView) String() string { + return proto.EnumName(VersionView_name, int32(x)) +} +func (VersionView) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Request message for `Applications.GetApplication`. +type GetApplicationRequest struct { + // Name of the Application resource to get. Example: `apps/myapp`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetApplicationRequest) Reset() { *m = GetApplicationRequest{} } +func (m *GetApplicationRequest) String() string { return proto.CompactTextString(m) } +func (*GetApplicationRequest) ProtoMessage() {} +func (*GetApplicationRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *GetApplicationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for 'Applications.RepairApplication'. +type RepairApplicationRequest struct { + // Name of the application to repair. Example: `apps/myapp` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *RepairApplicationRequest) Reset() { *m = RepairApplicationRequest{} } +func (m *RepairApplicationRequest) String() string { return proto.CompactTextString(m) } +func (*RepairApplicationRequest) ProtoMessage() {} +func (*RepairApplicationRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *RepairApplicationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Services.ListServices`. +type ListServicesRequest struct { + // Name of the parent Application resource. Example: `apps/myapp`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Maximum results to return per page. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Continuation token for fetching the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (m *ListServicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ListServicesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListServicesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServicesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for `Services.ListServices`. +type ListServicesResponse struct { + // The services belonging to the requested application. + Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` + // Continuation token for fetching the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (m *ListServicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *ListServicesResponse) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *ListServicesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `Services.GetService`. +type GetServiceRequest struct { + // Name of the resource requested. Example: `apps/myapp/services/default`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (m *GetServiceRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *GetServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Services.UpdateService`. +type UpdateServiceRequest struct { + // Name of the resource to update. Example: `apps/myapp/services/default`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A Service resource containing the updated service. Only fields set in the + // field mask will be updated. + Service *Service `protobuf:"bytes,2,opt,name=service" json:"service,omitempty"` + // Standard field mask for the set of fields to be updated. + UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // Set to `true` to gradually shift traffic from one version to another + // single version. By default, traffic is shifted immediately. + // For gradual traffic migration, the target version + // must be located within instances that are configured for both + // [warmup requests](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#inboundservicetype) + // and + // [automatic scaling](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#automaticscaling). + // You must specify the + // [`shardBy`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services#shardby) + // field in the Service resource. Gradual traffic migration is not + // supported in the App Engine flexible environment. For examples, see + // [Migrating and Splitting Traffic](https://cloud.google.com/appengine/docs/admin-api/migrating-splitting-traffic). + MigrateTraffic bool `protobuf:"varint,4,opt,name=migrate_traffic,json=migrateTraffic" json:"migrate_traffic,omitempty"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (m *UpdateServiceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *UpdateServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateServiceRequest) GetService() *Service { + if m != nil { + return m.Service + } + return nil +} + +func (m *UpdateServiceRequest) GetUpdateMask() *google_protobuf5.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateServiceRequest) GetMigrateTraffic() bool { + if m != nil { + return m.MigrateTraffic + } + return false +} + +// Request message for `Services.DeleteService`. +type DeleteServiceRequest struct { + // Name of the resource requested. Example: `apps/myapp/services/default`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteServiceRequest) Reset() { *m = DeleteServiceRequest{} } +func (m *DeleteServiceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceRequest) ProtoMessage() {} +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *DeleteServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Versions.ListVersions`. +type ListVersionsRequest struct { + // Name of the parent Service resource. Example: + // `apps/myapp/services/default`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Controls the set of fields returned in the `List` response. + View VersionView `protobuf:"varint,2,opt,name=view,enum=google.appengine.v1.VersionView" json:"view,omitempty"` + // Maximum results to return per page. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Continuation token for fetching the next page of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListVersionsRequest) Reset() { *m = ListVersionsRequest{} } +func (m *ListVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListVersionsRequest) ProtoMessage() {} +func (*ListVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *ListVersionsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListVersionsRequest) GetView() VersionView { + if m != nil { + return m.View + } + return VersionView_BASIC +} + +func (m *ListVersionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListVersionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for `Versions.ListVersions`. +type ListVersionsResponse struct { + // The versions belonging to the requested service. + Versions []*Version `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` + // Continuation token for fetching the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListVersionsResponse) Reset() { *m = ListVersionsResponse{} } +func (m *ListVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListVersionsResponse) ProtoMessage() {} +func (*ListVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *ListVersionsResponse) GetVersions() []*Version { + if m != nil { + return m.Versions + } + return nil +} + +func (m *ListVersionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `Versions.GetVersion`. +type GetVersionRequest struct { + // Name of the resource requested. Example: + // `apps/myapp/services/default/versions/v1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Controls the set of fields returned in the `Get` response. + View VersionView `protobuf:"varint,2,opt,name=view,enum=google.appengine.v1.VersionView" json:"view,omitempty"` +} + +func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } +func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionRequest) ProtoMessage() {} +func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *GetVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetVersionRequest) GetView() VersionView { + if m != nil { + return m.View + } + return VersionView_BASIC +} + +// Request message for `Versions.CreateVersion`. +type CreateVersionRequest struct { + // Name of the parent resource to create this version under. Example: + // `apps/myapp/services/default`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Application deployment configuration. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *CreateVersionRequest) Reset() { *m = CreateVersionRequest{} } +func (m *CreateVersionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVersionRequest) ProtoMessage() {} +func (*CreateVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *CreateVersionRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateVersionRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +// Request message for `Versions.UpdateVersion`. +type UpdateVersionRequest struct { + // Name of the resource to update. Example: + // `apps/myapp/services/default/versions/1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A Version containing the updated resource. Only fields set in the field + // mask will be updated. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // Standard field mask for the set of fields to be updated. + UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateVersionRequest) Reset() { *m = UpdateVersionRequest{} } +func (m *UpdateVersionRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateVersionRequest) ProtoMessage() {} +func (*UpdateVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *UpdateVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateVersionRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *UpdateVersionRequest) GetUpdateMask() *google_protobuf5.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for `Versions.DeleteVersion`. +type DeleteVersionRequest struct { + // Name of the resource requested. Example: + // `apps/myapp/services/default/versions/v1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteVersionRequest) Reset() { *m = DeleteVersionRequest{} } +func (m *DeleteVersionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVersionRequest) ProtoMessage() {} +func (*DeleteVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DeleteVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Instances.ListInstances`. +type ListInstancesRequest struct { + // Name of the parent Version resource. Example: + // `apps/myapp/services/default/versions/v1`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Maximum results to return per page. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Continuation token for fetching the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} } +func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstancesRequest) ProtoMessage() {} +func (*ListInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *ListInstancesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstancesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstancesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for `Instances.ListInstances`. +type ListInstancesResponse struct { + // The instances belonging to the requested version. + Instances []*Instance `protobuf:"bytes,1,rep,name=instances" json:"instances,omitempty"` + // Continuation token for fetching the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} } +func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstancesResponse) ProtoMessage() {} +func (*ListInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *ListInstancesResponse) GetInstances() []*Instance { + if m != nil { + return m.Instances + } + return nil +} + +func (m *ListInstancesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `Instances.GetInstance`. +type GetInstanceRequest struct { + // Name of the resource requested. Example: + // `apps/myapp/services/default/versions/v1/instances/instance-1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} } +func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceRequest) ProtoMessage() {} +func (*GetInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *GetInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Instances.DeleteInstance`. +type DeleteInstanceRequest struct { + // Name of the resource requested. Example: + // `apps/myapp/services/default/versions/v1/instances/instance-1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} } +func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceRequest) ProtoMessage() {} +func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *DeleteInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `Instances.DebugInstance`. +type DebugInstanceRequest struct { + // Name of the resource requested. Example: + // `apps/myapp/services/default/versions/v1/instances/instance-1`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DebugInstanceRequest) Reset() { *m = DebugInstanceRequest{} } +func (m *DebugInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DebugInstanceRequest) ProtoMessage() {} +func (*DebugInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *DebugInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*GetApplicationRequest)(nil), "google.appengine.v1.GetApplicationRequest") + proto.RegisterType((*RepairApplicationRequest)(nil), "google.appengine.v1.RepairApplicationRequest") + proto.RegisterType((*ListServicesRequest)(nil), "google.appengine.v1.ListServicesRequest") + proto.RegisterType((*ListServicesResponse)(nil), "google.appengine.v1.ListServicesResponse") + proto.RegisterType((*GetServiceRequest)(nil), "google.appengine.v1.GetServiceRequest") + proto.RegisterType((*UpdateServiceRequest)(nil), "google.appengine.v1.UpdateServiceRequest") + proto.RegisterType((*DeleteServiceRequest)(nil), "google.appengine.v1.DeleteServiceRequest") + proto.RegisterType((*ListVersionsRequest)(nil), "google.appengine.v1.ListVersionsRequest") + proto.RegisterType((*ListVersionsResponse)(nil), "google.appengine.v1.ListVersionsResponse") + proto.RegisterType((*GetVersionRequest)(nil), "google.appengine.v1.GetVersionRequest") + proto.RegisterType((*CreateVersionRequest)(nil), "google.appengine.v1.CreateVersionRequest") + proto.RegisterType((*UpdateVersionRequest)(nil), "google.appengine.v1.UpdateVersionRequest") + proto.RegisterType((*DeleteVersionRequest)(nil), "google.appengine.v1.DeleteVersionRequest") + proto.RegisterType((*ListInstancesRequest)(nil), "google.appengine.v1.ListInstancesRequest") + proto.RegisterType((*ListInstancesResponse)(nil), "google.appengine.v1.ListInstancesResponse") + proto.RegisterType((*GetInstanceRequest)(nil), "google.appengine.v1.GetInstanceRequest") + proto.RegisterType((*DeleteInstanceRequest)(nil), "google.appengine.v1.DeleteInstanceRequest") + proto.RegisterType((*DebugInstanceRequest)(nil), "google.appengine.v1.DebugInstanceRequest") + proto.RegisterEnum("google.appengine.v1.VersionView", VersionView_name, VersionView_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Instances service + +type InstancesClient interface { + // Lists the instances of a version. + ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) + // Gets instance information. + GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) + // Stops a running instance. + DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Enables debugging on a VM instance. This allows you to use the SSH + // command to connect to the virtual machine where the instance lives. + // While in "debug mode", the instance continues to serve live traffic. + // You should delete the instance when you are done debugging and then + // allow the system to take over and determine if another instance + // should be started. + // + // Only applicable for instances in App Engine flexible environment. + DebugInstance(ctx context.Context, in *DebugInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type instancesClient struct { + cc *grpc.ClientConn +} + +func NewInstancesClient(cc *grpc.ClientConn) InstancesClient { + return &instancesClient{cc} +} + +func (c *instancesClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) { + out := new(ListInstancesResponse) + err := grpc.Invoke(ctx, "/google.appengine.v1.Instances/ListInstances", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instancesClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := grpc.Invoke(ctx, "/google.appengine.v1.Instances/GetInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instancesClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Instances/DeleteInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instancesClient) DebugInstance(ctx context.Context, in *DebugInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Instances/DebugInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Instances service + +type InstancesServer interface { + // Lists the instances of a version. + ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) + // Gets instance information. + GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) + // Stops a running instance. + DeleteInstance(context.Context, *DeleteInstanceRequest) (*google_longrunning.Operation, error) + // Enables debugging on a VM instance. This allows you to use the SSH + // command to connect to the virtual machine where the instance lives. + // While in "debug mode", the instance continues to serve live traffic. + // You should delete the instance when you are done debugging and then + // allow the system to take over and determine if another instance + // should be started. + // + // Only applicable for instances in App Engine flexible environment. + DebugInstance(context.Context, *DebugInstanceRequest) (*google_longrunning.Operation, error) +} + +func RegisterInstancesServer(s *grpc.Server, srv InstancesServer) { + s.RegisterService(&_Instances_serviceDesc, srv) +} + +func _Instances_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstancesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstancesServer).ListInstances(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Instances/ListInstances", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstancesServer).ListInstances(ctx, req.(*ListInstancesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Instances_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstancesServer).GetInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Instances/GetInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstancesServer).GetInstance(ctx, req.(*GetInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Instances_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstancesServer).DeleteInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Instances/DeleteInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstancesServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Instances_DebugInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DebugInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstancesServer).DebugInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Instances/DebugInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstancesServer).DebugInstance(ctx, req.(*DebugInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Instances_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.appengine.v1.Instances", + HandlerType: (*InstancesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListInstances", + Handler: _Instances_ListInstances_Handler, + }, + { + MethodName: "GetInstance", + Handler: _Instances_GetInstance_Handler, + }, + { + MethodName: "DeleteInstance", + Handler: _Instances_DeleteInstance_Handler, + }, + { + MethodName: "DebugInstance", + Handler: _Instances_DebugInstance_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/appengine/v1/appengine.proto", +} + +// Client API for Versions service + +type VersionsClient interface { + // Lists the versions of a service. + ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) + // Gets the specified Version resource. + // By default, only a `BASIC_VIEW` will be returned. + // Specify the `FULL_VIEW` parameter to get the full resource. + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) + // Deploys code and resource files to a new version. + CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates the specified Version resource. + // You can specify the following fields depending on the App Engine + // environment and type of scaling that the version resource uses: + // + // * [`serving_status`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status): + // For Version resources that use basic scaling, manual scaling, or run in + // the App Engine flexible environment. + // * [`instance_class`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class): + // For Version resources that run in the App Engine standard environment. + // * [`automatic_scaling.min_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling): + // For Version resources that use automatic scaling and run in the App + // Engine standard environment. + // * [`automatic_scaling.max_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling): + // For Version resources that use automatic scaling and run in the App + // Engine standard environment. + UpdateVersion(ctx context.Context, in *UpdateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes an existing Version resource. + DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type versionsClient struct { + cc *grpc.ClientConn +} + +func NewVersionsClient(cc *grpc.ClientConn) VersionsClient { + return &versionsClient{cc} +} + +func (c *versionsClient) ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) { + out := new(ListVersionsResponse) + err := grpc.Invoke(ctx, "/google.appengine.v1.Versions/ListVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionsClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := grpc.Invoke(ctx, "/google.appengine.v1.Versions/GetVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionsClient) CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Versions/CreateVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionsClient) UpdateVersion(ctx context.Context, in *UpdateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Versions/UpdateVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionsClient) DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Versions/DeleteVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Versions service + +type VersionsServer interface { + // Lists the versions of a service. + ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) + // Gets the specified Version resource. + // By default, only a `BASIC_VIEW` will be returned. + // Specify the `FULL_VIEW` parameter to get the full resource. + GetVersion(context.Context, *GetVersionRequest) (*Version, error) + // Deploys code and resource files to a new version. + CreateVersion(context.Context, *CreateVersionRequest) (*google_longrunning.Operation, error) + // Updates the specified Version resource. + // You can specify the following fields depending on the App Engine + // environment and type of scaling that the version resource uses: + // + // * [`serving_status`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.serving_status): + // For Version resources that use basic scaling, manual scaling, or run in + // the App Engine flexible environment. + // * [`instance_class`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.instance_class): + // For Version resources that run in the App Engine standard environment. + // * [`automatic_scaling.min_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling): + // For Version resources that use automatic scaling and run in the App + // Engine standard environment. + // * [`automatic_scaling.max_idle_instances`](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#Version.FIELDS.automatic_scaling): + // For Version resources that use automatic scaling and run in the App + // Engine standard environment. + UpdateVersion(context.Context, *UpdateVersionRequest) (*google_longrunning.Operation, error) + // Deletes an existing Version resource. + DeleteVersion(context.Context, *DeleteVersionRequest) (*google_longrunning.Operation, error) +} + +func RegisterVersionsServer(s *grpc.Server, srv VersionsServer) { + s.RegisterService(&_Versions_serviceDesc, srv) +} + +func _Versions_ListVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionsServer).ListVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Versions/ListVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionsServer).ListVersions(ctx, req.(*ListVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Versions_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionsServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Versions/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionsServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Versions_CreateVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionsServer).CreateVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Versions/CreateVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionsServer).CreateVersion(ctx, req.(*CreateVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Versions_UpdateVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionsServer).UpdateVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Versions/UpdateVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionsServer).UpdateVersion(ctx, req.(*UpdateVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Versions_DeleteVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionsServer).DeleteVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Versions/DeleteVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionsServer).DeleteVersion(ctx, req.(*DeleteVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Versions_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.appengine.v1.Versions", + HandlerType: (*VersionsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListVersions", + Handler: _Versions_ListVersions_Handler, + }, + { + MethodName: "GetVersion", + Handler: _Versions_GetVersion_Handler, + }, + { + MethodName: "CreateVersion", + Handler: _Versions_CreateVersion_Handler, + }, + { + MethodName: "UpdateVersion", + Handler: _Versions_UpdateVersion_Handler, + }, + { + MethodName: "DeleteVersion", + Handler: _Versions_DeleteVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/appengine/v1/appengine.proto", +} + +// Client API for Services service + +type ServicesClient interface { + // Lists all the services in the application. + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Gets the current configuration of the specified service. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Updates the configuration of the specified service. + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes the specified service and all enclosed versions. + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type servicesClient struct { + cc *grpc.ClientConn +} + +func NewServicesClient(cc *grpc.ClientConn) ServicesClient { + return &servicesClient{cc} +} + +func (c *servicesClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/google.appengine.v1.Services/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *servicesClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := grpc.Invoke(ctx, "/google.appengine.v1.Services/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *servicesClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Services/UpdateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *servicesClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Services/DeleteService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Services service + +type ServicesServer interface { + // Lists all the services in the application. + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Gets the current configuration of the specified service. + GetService(context.Context, *GetServiceRequest) (*Service, error) + // Updates the configuration of the specified service. + UpdateService(context.Context, *UpdateServiceRequest) (*google_longrunning.Operation, error) + // Deletes the specified service and all enclosed versions. + DeleteService(context.Context, *DeleteServiceRequest) (*google_longrunning.Operation, error) +} + +func RegisterServicesServer(s *grpc.Server, srv ServicesServer) { + s.RegisterService(&_Services_serviceDesc, srv) +} + +func _Services_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServicesServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Services/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServicesServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Services_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServicesServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Services/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServicesServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Services_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServicesServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Services/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServicesServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Services_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServicesServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Services/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServicesServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Services_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.appengine.v1.Services", + HandlerType: (*ServicesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListServices", + Handler: _Services_ListServices_Handler, + }, + { + MethodName: "GetService", + Handler: _Services_GetService_Handler, + }, + { + MethodName: "UpdateService", + Handler: _Services_UpdateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _Services_DeleteService_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/appengine/v1/appengine.proto", +} + +// Client API for Applications service + +type ApplicationsClient interface { + // Gets information about an application. + GetApplication(ctx context.Context, in *GetApplicationRequest, opts ...grpc.CallOption) (*Application, error) + // Recreates the required App Engine features for the application in your + // project, for example a Cloud Storage bucket or App Engine service account. + // Use this method if you receive an error message about a missing feature, + // for example "*Error retrieving the App Engine service account*". + RepairApplication(ctx context.Context, in *RepairApplicationRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type applicationsClient struct { + cc *grpc.ClientConn +} + +func NewApplicationsClient(cc *grpc.ClientConn) ApplicationsClient { + return &applicationsClient{cc} +} + +func (c *applicationsClient) GetApplication(ctx context.Context, in *GetApplicationRequest, opts ...grpc.CallOption) (*Application, error) { + out := new(Application) + err := grpc.Invoke(ctx, "/google.appengine.v1.Applications/GetApplication", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *applicationsClient) RepairApplication(ctx context.Context, in *RepairApplicationRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.appengine.v1.Applications/RepairApplication", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Applications service + +type ApplicationsServer interface { + // Gets information about an application. + GetApplication(context.Context, *GetApplicationRequest) (*Application, error) + // Recreates the required App Engine features for the application in your + // project, for example a Cloud Storage bucket or App Engine service account. + // Use this method if you receive an error message about a missing feature, + // for example "*Error retrieving the App Engine service account*". + RepairApplication(context.Context, *RepairApplicationRequest) (*google_longrunning.Operation, error) +} + +func RegisterApplicationsServer(s *grpc.Server, srv ApplicationsServer) { + s.RegisterService(&_Applications_serviceDesc, srv) +} + +func _Applications_GetApplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetApplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApplicationsServer).GetApplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Applications/GetApplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApplicationsServer).GetApplication(ctx, req.(*GetApplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Applications_RepairApplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RepairApplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApplicationsServer).RepairApplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.appengine.v1.Applications/RepairApplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApplicationsServer).RepairApplication(ctx, req.(*RepairApplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Applications_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.appengine.v1.Applications", + HandlerType: (*ApplicationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetApplication", + Handler: _Applications_GetApplication_Handler, + }, + { + MethodName: "RepairApplication", + Handler: _Applications_RepairApplication_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/appengine/v1/appengine.proto", +} + +func init() { proto.RegisterFile("google/appengine/v1/appengine.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1134 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xdf, 0x6f, 0xdb, 0x54, + 0x14, 0xc6, 0x6d, 0xba, 0x25, 0x27, 0x6b, 0xb6, 0xde, 0xb6, 0x10, 0xbc, 0x76, 0x0a, 0x1e, 0x2c, + 0xa9, 0xa7, 0xc5, 0x9a, 0x07, 0xd3, 0x48, 0x01, 0xa9, 0xdd, 0xb4, 0x6a, 0x52, 0x11, 0x95, 0xbb, + 0xed, 0x01, 0x09, 0x55, 0x6e, 0x7a, 0x63, 0x5d, 0x9a, 0xd8, 0xc6, 0x76, 0xb2, 0x6e, 0x10, 0x21, + 0x31, 0x09, 0x01, 0x8f, 0x1b, 0x0f, 0x3c, 0x20, 0xed, 0x81, 0x7f, 0x86, 0x77, 0xfe, 0x01, 0x1e, + 0xf8, 0x43, 0x90, 0xaf, 0xef, 0xb5, 0x63, 0xc7, 0xbf, 0x54, 0xc4, 0x5b, 0x7c, 0xef, 0x77, 0xef, + 0xf9, 0xee, 0x77, 0x3e, 0xfb, 0x9c, 0x1b, 0xb8, 0x6e, 0x58, 0x96, 0x31, 0xc4, 0x8a, 0x6e, 0xdb, + 0xd8, 0x34, 0x88, 0x89, 0x95, 0xc9, 0xed, 0xe8, 0xa1, 0x6b, 0x3b, 0x96, 0x67, 0xa1, 0xd5, 0x00, + 0xd4, 0x8d, 0xc6, 0x27, 0xb7, 0xc5, 0x8d, 0x70, 0x25, 0x51, 0x74, 0xd3, 0xb4, 0x3c, 0xdd, 0x23, + 0x96, 0xe9, 0x06, 0x4b, 0xc4, 0x0f, 0x32, 0xf6, 0x1d, 0x92, 0x3e, 0xc5, 0x31, 0x98, 0x94, 0x06, + 0x23, 0xa6, 0xeb, 0xe9, 0x66, 0x9f, 0x45, 0x17, 0xdf, 0x4b, 0xc3, 0xb8, 0xd8, 0x99, 0x90, 0x7c, + 0xc8, 0x04, 0x3b, 0x6e, 0x14, 0xe9, 0x1a, 0x83, 0x10, 0x7d, 0x44, 0x63, 0xe8, 0xa3, 0x23, 0xdb, + 0x1a, 0x92, 0xfe, 0x73, 0x36, 0x2f, 0xc6, 0xe7, 0x63, 0x73, 0x5c, 0xa4, 0xa1, 0x65, 0x1a, 0xce, + 0xd8, 0x34, 0x89, 0x69, 0x28, 0x96, 0x8d, 0x9d, 0xd8, 0x89, 0xaf, 0x32, 0x10, 0x7d, 0x3a, 0x1e, + 0x0f, 0x14, 0x3c, 0xb2, 0x3d, 0xbe, 0x43, 0x2b, 0x39, 0x39, 0x20, 0x78, 0x78, 0x72, 0x34, 0xd2, + 0xdd, 0xd3, 0x00, 0x21, 0xdd, 0x84, 0xf5, 0x3d, 0xec, 0xed, 0x44, 0x0a, 0x69, 0xf8, 0x9b, 0x31, + 0x76, 0x3d, 0x84, 0xa0, 0x62, 0xea, 0x23, 0xdc, 0x14, 0x5a, 0x42, 0xa7, 0xa6, 0xd1, 0xdf, 0x52, + 0x17, 0x9a, 0x1a, 0xb6, 0x75, 0xe2, 0x94, 0xc4, 0x13, 0x58, 0xdd, 0x27, 0xae, 0x77, 0x18, 0x88, + 0xe6, 0x72, 0xe8, 0xdb, 0x70, 0xc1, 0xd6, 0x1d, 0x6c, 0x7a, 0x0c, 0xcc, 0x9e, 0xd0, 0x55, 0xa8, + 0xd9, 0xba, 0x81, 0x8f, 0x5c, 0xf2, 0x02, 0x37, 0x17, 0x5a, 0x42, 0x67, 0x49, 0xab, 0xfa, 0x03, + 0x87, 0xe4, 0x05, 0x46, 0x9b, 0x00, 0x74, 0xd2, 0xb3, 0x4e, 0xb1, 0xd9, 0x5c, 0xa4, 0x0b, 0x29, + 0xfc, 0xb1, 0x3f, 0x20, 0x9d, 0xc1, 0x5a, 0x3c, 0x94, 0x6b, 0x5b, 0xa6, 0x8b, 0xd1, 0x3d, 0xa8, + 0xb2, 0x9c, 0xb9, 0x4d, 0xa1, 0xb5, 0xd8, 0xa9, 0xab, 0x1b, 0xdd, 0x14, 0x5b, 0x75, 0xd9, 0x42, + 0x2d, 0x44, 0xa3, 0x1b, 0x70, 0xd9, 0xc4, 0x67, 0xde, 0xd1, 0x4c, 0xd4, 0x05, 0x1a, 0x75, 0xd9, + 0x1f, 0x3e, 0x08, 0x23, 0xb7, 0x61, 0x65, 0x0f, 0xf3, 0xc0, 0x79, 0x6a, 0xfc, 0x29, 0xc0, 0xda, + 0x13, 0xfb, 0x44, 0xf7, 0x70, 0x31, 0x18, 0xdd, 0x85, 0x8b, 0x8c, 0x09, 0x8d, 0x5a, 0x44, 0x9b, + 0x83, 0xd1, 0x36, 0xd4, 0xc7, 0x34, 0x06, 0x4d, 0x32, 0xd5, 0xa9, 0xae, 0x8a, 0x7c, 0x2d, 0xf7, + 0x41, 0xf7, 0xa1, 0xef, 0x83, 0xcf, 0x75, 0xf7, 0x54, 0x83, 0x00, 0xee, 0xff, 0x46, 0x6d, 0xb8, + 0x3c, 0x22, 0x86, 0xe3, 0xaf, 0xf6, 0x1c, 0x7d, 0x30, 0x20, 0xfd, 0x66, 0xa5, 0x25, 0x74, 0xaa, + 0x5a, 0x83, 0x0d, 0x3f, 0x0e, 0x46, 0x25, 0x19, 0xd6, 0x1e, 0xe0, 0x21, 0x2e, 0x73, 0x12, 0xe9, + 0x8d, 0x10, 0xb8, 0xe0, 0x69, 0xf0, 0x5e, 0x14, 0xba, 0xe0, 0x43, 0xa8, 0x4c, 0x08, 0x7e, 0x46, + 0x8f, 0xdd, 0x50, 0x5b, 0xa9, 0xc7, 0x66, 0x7b, 0x3d, 0x25, 0xf8, 0x99, 0x46, 0xd1, 0x71, 0xef, + 0x2c, 0xe6, 0x7a, 0xa7, 0x92, 0xe1, 0x9d, 0x88, 0x60, 0xe4, 0x1d, 0xf6, 0x32, 0xe7, 0x7b, 0x87, + 0x2d, 0xd4, 0x42, 0x74, 0x69, 0xef, 0x7c, 0x45, 0xbd, 0xc3, 0xd7, 0xe7, 0xd8, 0xe1, 0x5c, 0xa2, + 0x48, 0x03, 0x58, 0xbb, 0xef, 0x60, 0xdd, 0xc3, 0x89, 0x08, 0x59, 0xd2, 0xdf, 0x85, 0x8b, 0xec, + 0x08, 0xb9, 0xa6, 0xe3, 0xbb, 0x71, 0xb0, 0x9f, 0x62, 0xe6, 0xec, 0x12, 0x47, 0x39, 0x67, 0x90, + 0xff, 0xe4, 0xec, 0xc8, 0xb0, 0xc5, 0x04, 0xa5, 0xaf, 0x03, 0x3b, 0x3c, 0x62, 0xe5, 0xe0, 0x7f, + 0xfd, 0x6c, 0x7d, 0x07, 0xeb, 0x89, 0x58, 0xcc, 0x7b, 0xdb, 0x50, 0xe3, 0xf5, 0x88, 0x9b, 0x6f, + 0x33, 0x55, 0x27, 0xbe, 0x54, 0x8b, 0xf0, 0xa5, 0xed, 0xd7, 0x01, 0xb4, 0x87, 0xc3, 0xe0, 0x79, + 0x9a, 0xdc, 0x84, 0xf5, 0x40, 0xbf, 0x32, 0x60, 0x2a, 0xf6, 0xf1, 0xd8, 0x28, 0x81, 0x95, 0x25, + 0xa8, 0xcf, 0xf8, 0x16, 0xd5, 0x60, 0x69, 0x77, 0xe7, 0xf0, 0xd1, 0xfd, 0x2b, 0x6f, 0xa1, 0x2a, + 0x54, 0x1e, 0x3e, 0xd9, 0xdf, 0xbf, 0x22, 0xa8, 0x2f, 0x97, 0xa0, 0x16, 0x2a, 0x84, 0xfe, 0x10, + 0x60, 0x39, 0xa6, 0x19, 0xda, 0x4a, 0x15, 0x26, 0x2d, 0x87, 0xa2, 0x5c, 0x06, 0x1a, 0xa4, 0x40, + 0xda, 0xfe, 0xe1, 0xaf, 0x7f, 0x5e, 0x2f, 0x7c, 0x84, 0xee, 0xf8, 0x85, 0xf9, 0xdb, 0x20, 0xd9, + 0x9f, 0xea, 0xb6, 0xed, 0x2a, 0x32, 0xef, 0x03, 0xfc, 0x9f, 0xfc, 0xa5, 0x57, 0xe4, 0xa9, 0x12, + 0xa5, 0xe0, 0x95, 0x00, 0xf5, 0x19, 0x6d, 0x51, 0x3b, 0x35, 0xf0, 0xbc, 0xfa, 0x62, 0x7e, 0x96, + 0x13, 0xa4, 0x7c, 0x09, 0x73, 0x29, 0x45, 0x8c, 0x14, 0x79, 0x8a, 0x7e, 0x13, 0xa0, 0x11, 0x4f, + 0x23, 0x4a, 0x17, 0x24, 0x35, 0xd7, 0x11, 0xb5, 0x99, 0x86, 0xa4, 0xfb, 0x05, 0x6f, 0x48, 0x38, + 0x35, 0xf9, 0x5c, 0xd4, 0xde, 0x08, 0xb0, 0x1c, 0x33, 0x4d, 0x46, 0x56, 0xd3, 0x8c, 0x55, 0x44, + 0xec, 0x01, 0x25, 0xf6, 0x99, 0xf4, 0xf1, 0x39, 0x88, 0xf5, 0x4e, 0xfc, 0x80, 0x3d, 0x41, 0x56, + 0xff, 0x5e, 0x82, 0x2a, 0x2f, 0x11, 0xe8, 0x57, 0x01, 0x2e, 0xcd, 0xd6, 0x0c, 0xd4, 0xc9, 0x34, + 0x56, 0xa2, 0xee, 0x89, 0x5b, 0x25, 0x90, 0xcc, 0x81, 0x0a, 0x25, 0xbe, 0x85, 0xda, 0xb9, 0x0e, + 0x9c, 0x86, 0xdc, 0xd1, 0x4b, 0x01, 0x20, 0x2a, 0x28, 0xe8, 0x46, 0x96, 0xe9, 0xe2, 0x5f, 0x41, + 0x31, 0xf7, 0x0b, 0x9c, 0x60, 0x51, 0x28, 0xdf, 0x14, 0xbd, 0x16, 0x60, 0x39, 0x56, 0x77, 0x32, + 0x72, 0x99, 0x56, 0x9b, 0x8a, 0x72, 0x79, 0x8f, 0x92, 0x51, 0xa5, 0xb2, 0x92, 0xf4, 0xc2, 0xfa, + 0xe1, 0xb3, 0x8a, 0x15, 0xa9, 0x0c, 0x56, 0x69, 0x85, 0xac, 0x24, 0x2b, 0xb5, 0xac, 0x44, 0x11, + 0xab, 0x5f, 0xa8, 0xef, 0x67, 0x2a, 0x53, 0xa6, 0xef, 0xe7, 0xab, 0x57, 0x11, 0x2b, 0x96, 0x38, + 0xb9, 0x2c, 0x2b, 0xf5, 0xf7, 0x0a, 0x54, 0x79, 0x07, 0x8d, 0x7e, 0x66, 0x16, 0x0f, 0x07, 0xb2, + 0x2d, 0x9e, 0x68, 0xf0, 0x73, 0x2c, 0x9e, 0xec, 0xcf, 0xa5, 0xf7, 0x29, 0xc7, 0x6b, 0x68, 0x63, + 0x3e, 0x9f, 0xd3, 0x90, 0x26, 0x3a, 0xa3, 0xb6, 0x66, 0x8b, 0xb3, 0x6d, 0x1d, 0xef, 0x46, 0xc5, + 0xdc, 0x96, 0x39, 0x11, 0x39, 0x5d, 0x9d, 0x29, 0xfa, 0x29, 0x74, 0x0d, 0x8f, 0x9e, 0xe7, 0x9a, + 0x04, 0x81, 0x82, 0xfc, 0xdc, 0xa2, 0x0c, 0xda, 0x6a, 0x2e, 0x83, 0x5e, 0xd8, 0xda, 0x7f, 0xcf, + 0x9d, 0x92, 0xcf, 0x24, 0xad, 0x31, 0x2f, 0x62, 0xc2, 0xb4, 0x90, 0x73, 0x99, 0xa8, 0xaf, 0x16, + 0xe0, 0xd2, 0xcc, 0xcd, 0xcf, 0x45, 0xcf, 0xa1, 0x11, 0xbf, 0x3c, 0x66, 0x94, 0x93, 0xd4, 0x1b, + 0xa6, 0x98, 0xde, 0xc5, 0xce, 0x00, 0xa5, 0x77, 0x29, 0xad, 0x55, 0xb4, 0x92, 0xa4, 0x35, 0x45, + 0x3f, 0x0a, 0xb0, 0x32, 0x77, 0x17, 0x45, 0xb7, 0x52, 0xb7, 0xcc, 0xba, 0xb3, 0x16, 0xa9, 0x72, + 0x9d, 0x86, 0xdf, 0x94, 0x9a, 0x73, 0xe1, 0x7b, 0x0e, 0xdd, 0xb2, 0x27, 0xc8, 0xbb, 0x04, 0xde, + 0xe9, 0x5b, 0xa3, 0xb4, 0xb8, 0xbb, 0x8d, 0x1d, 0xfe, 0x74, 0xe0, 0x77, 0xa7, 0x07, 0xc2, 0x97, + 0x9f, 0x30, 0x98, 0x61, 0x0d, 0x75, 0xd3, 0xe8, 0x5a, 0x8e, 0xa1, 0x18, 0xd8, 0xa4, 0xbd, 0xab, + 0x12, 0x4c, 0xe9, 0x36, 0x71, 0x63, 0xff, 0x27, 0x6c, 0x87, 0x0f, 0xc7, 0x17, 0x28, 0xf0, 0xce, + 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x89, 0x36, 0x22, 0x8f, 0x3d, 0x11, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/application.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/application.pb.go new file mode 100644 index 000000000..e029d64d0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/application.pb.go @@ -0,0 +1,222 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/application.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// An Application resource contains the top-level configuration of an App +// Engine application. +type Application struct { + // Full path to the Application resource in the API. + // Example: `apps/myapp`. + // + // @OutputOnly + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Identifier of the Application resource. This identifier is equivalent + // to the project ID of the Google Cloud Platform project where you want to + // deploy your application. + // Example: `myapp`. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // HTTP path dispatch rules for requests to the application that do not + // explicitly target a service or version. Rules are order-dependent. + // + // @OutputOnly + DispatchRules []*UrlDispatchRule `protobuf:"bytes,3,rep,name=dispatch_rules,json=dispatchRules" json:"dispatch_rules,omitempty"` + // Google Apps authentication domain that controls which users can access + // this application. + // + // Defaults to open access for any Google Account. + AuthDomain string `protobuf:"bytes,6,opt,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` + // Location from which this application will be run. Application instances + // will run out of data centers in the chosen location, which is also where + // all of the application's end user content is stored. + // + // Defaults to `us-central`. + // + // Options are: + // + // `us-central` - Central US + // + // `europe-west` - Western Europe + // + // `us-east1` - Eastern US + LocationId string `protobuf:"bytes,7,opt,name=location_id,json=locationId" json:"location_id,omitempty"` + // Google Cloud Storage bucket that can be used for storing files + // associated with this application. This bucket is associated with the + // application and can be used by the gcloud deployment commands. + // + // @OutputOnly + CodeBucket string `protobuf:"bytes,8,opt,name=code_bucket,json=codeBucket" json:"code_bucket,omitempty"` + // Cookie expiration policy for this application. + // + // @OutputOnly + DefaultCookieExpiration *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=default_cookie_expiration,json=defaultCookieExpiration" json:"default_cookie_expiration,omitempty"` + // Hostname used to reach this application, as resolved by App Engine. + // + // @OutputOnly + DefaultHostname string `protobuf:"bytes,11,opt,name=default_hostname,json=defaultHostname" json:"default_hostname,omitempty"` + // Google Cloud Storage bucket that can be used by this application to store + // content. + // + // @OutputOnly + DefaultBucket string `protobuf:"bytes,12,opt,name=default_bucket,json=defaultBucket" json:"default_bucket,omitempty"` +} + +func (m *Application) Reset() { *m = Application{} } +func (m *Application) String() string { return proto.CompactTextString(m) } +func (*Application) ProtoMessage() {} +func (*Application) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Application) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Application) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Application) GetDispatchRules() []*UrlDispatchRule { + if m != nil { + return m.DispatchRules + } + return nil +} + +func (m *Application) GetAuthDomain() string { + if m != nil { + return m.AuthDomain + } + return "" +} + +func (m *Application) GetLocationId() string { + if m != nil { + return m.LocationId + } + return "" +} + +func (m *Application) GetCodeBucket() string { + if m != nil { + return m.CodeBucket + } + return "" +} + +func (m *Application) GetDefaultCookieExpiration() *google_protobuf1.Duration { + if m != nil { + return m.DefaultCookieExpiration + } + return nil +} + +func (m *Application) GetDefaultHostname() string { + if m != nil { + return m.DefaultHostname + } + return "" +} + +func (m *Application) GetDefaultBucket() string { + if m != nil { + return m.DefaultBucket + } + return "" +} + +// Rules to match an HTTP request and dispatch that request to a service. +type UrlDispatchRule struct { + // Domain name to match against. The wildcard "`*`" is supported if + // specified before a period: "`*.`". + // + // Defaults to matching all domains: "`*`". + Domain string `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` + // Pathname within the host. Must start with a "`/`". A + // single "`*`" can be included at the end of the path. The sum + // of the lengths of the domain and path may not exceed 100 + // characters. + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + // Resource ID of a service in this application that should + // serve the matched request. The service must already + // exist. Example: `default`. + Service string `protobuf:"bytes,3,opt,name=service" json:"service,omitempty"` +} + +func (m *UrlDispatchRule) Reset() { *m = UrlDispatchRule{} } +func (m *UrlDispatchRule) String() string { return proto.CompactTextString(m) } +func (*UrlDispatchRule) ProtoMessage() {} +func (*UrlDispatchRule) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *UrlDispatchRule) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *UrlDispatchRule) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *UrlDispatchRule) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func init() { + proto.RegisterType((*Application)(nil), "google.appengine.v1.Application") + proto.RegisterType((*UrlDispatchRule)(nil), "google.appengine.v1.UrlDispatchRule") +} + +func init() { proto.RegisterFile("google/appengine/v1/application.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 409 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x5f, 0x6b, 0xdb, 0x30, + 0x14, 0xc5, 0x71, 0x3c, 0x92, 0x45, 0x5e, 0xfe, 0xa0, 0xc1, 0xa2, 0x84, 0xb1, 0x85, 0xb0, 0x40, + 0xf6, 0x62, 0x93, 0xec, 0x71, 0x7b, 0x59, 0x9a, 0x42, 0x4b, 0x5f, 0x82, 0x21, 0x14, 0xfa, 0x62, + 0x14, 0x4b, 0xb1, 0x45, 0x14, 0xc9, 0xd8, 0x72, 0xe8, 0x67, 0xe8, 0xa7, 0x2e, 0x96, 0x64, 0x37, + 0x2d, 0x79, 0xd3, 0x3d, 0xfa, 0x1d, 0xdd, 0xeb, 0x73, 0x0d, 0xe6, 0x89, 0x94, 0x09, 0xa7, 0x01, + 0xce, 0x32, 0x2a, 0x12, 0x26, 0x68, 0x70, 0x5e, 0x56, 0x05, 0x67, 0x31, 0x56, 0x4c, 0x0a, 0x3f, + 0xcb, 0xa5, 0x92, 0xf0, 0xab, 0xc1, 0xfc, 0x06, 0xf3, 0xcf, 0xcb, 0xc9, 0xf7, 0xc6, 0xcb, 0x02, + 0x2c, 0x84, 0x54, 0xda, 0x51, 0x18, 0xcb, 0xe4, 0x87, 0xbd, 0xd5, 0xd5, 0xbe, 0x3c, 0x04, 0xa4, + 0xcc, 0x2f, 0x9e, 0x9c, 0xbd, 0xb8, 0xc0, 0xfb, 0xff, 0xd6, 0x08, 0x42, 0xf0, 0x49, 0xe0, 0x13, + 0x45, 0xce, 0xd4, 0x59, 0x74, 0x43, 0x7d, 0x86, 0x7d, 0xd0, 0x62, 0x04, 0xb5, 0xb4, 0xd2, 0x62, + 0x04, 0x3e, 0x80, 0x3e, 0x61, 0x45, 0x86, 0x55, 0x9c, 0x46, 0x79, 0xc9, 0x69, 0x81, 0xdc, 0xa9, + 0xbb, 0xf0, 0x56, 0xbf, 0xfc, 0x2b, 0xf3, 0xf9, 0xbb, 0x9c, 0x6f, 0x2c, 0x1d, 0x96, 0x9c, 0x86, + 0x3d, 0x72, 0x51, 0x15, 0xf0, 0x27, 0xf0, 0x70, 0xa9, 0xd2, 0x88, 0xc8, 0x13, 0x66, 0x02, 0xb5, + 0x75, 0x17, 0x50, 0x49, 0x1b, 0xad, 0x54, 0x00, 0x97, 0x66, 0xba, 0x88, 0x11, 0xd4, 0x31, 0x40, + 0x2d, 0xdd, 0x93, 0x0a, 0x88, 0x25, 0xa1, 0xd1, 0xbe, 0x8c, 0x8f, 0x54, 0xa1, 0xcf, 0x06, 0xa8, + 0xa4, 0xb5, 0x56, 0xe0, 0x0e, 0x8c, 0x09, 0x3d, 0xe0, 0x92, 0xab, 0x28, 0x96, 0xf2, 0xc8, 0x68, + 0x44, 0x9f, 0x33, 0x66, 0x62, 0x40, 0xdd, 0xa9, 0xb3, 0xf0, 0x56, 0xe3, 0x7a, 0xf4, 0x3a, 0x27, + 0x7f, 0x63, 0x73, 0x0a, 0x47, 0xd6, 0x7b, 0xa3, 0xad, 0xb7, 0x8d, 0x13, 0xfe, 0x06, 0xc3, 0xfa, + 0xd9, 0x54, 0x16, 0x4a, 0xc7, 0xe6, 0xe9, 0xe6, 0x03, 0xab, 0xdf, 0x59, 0x19, 0xce, 0x41, 0xbf, + 0x46, 0xed, 0x94, 0x5f, 0x34, 0xd8, 0xb3, 0xaa, 0x19, 0x74, 0xf6, 0x08, 0x06, 0x1f, 0xd2, 0x82, + 0xdf, 0x40, 0xdb, 0x26, 0x63, 0x36, 0x62, 0xab, 0x6a, 0x4f, 0x19, 0x56, 0xa9, 0xdd, 0x8a, 0x3e, + 0x43, 0x04, 0x3a, 0x05, 0xcd, 0xcf, 0x2c, 0xa6, 0xc8, 0xd5, 0x72, 0x5d, 0xae, 0x8f, 0x60, 0x14, + 0xcb, 0xd3, 0xb5, 0xf5, 0xac, 0x87, 0x17, 0xdb, 0xdf, 0x56, 0x1f, 0xbf, 0x75, 0x9e, 0xfe, 0x59, + 0x30, 0x91, 0x1c, 0x8b, 0xc4, 0x97, 0x79, 0x12, 0x24, 0x54, 0xe8, 0x68, 0x02, 0x73, 0x85, 0x33, + 0x56, 0xbc, 0xfb, 0x5b, 0xff, 0x36, 0xc5, 0xbe, 0xad, 0xc1, 0x3f, 0xaf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x7a, 0x51, 0x2e, 0x3c, 0xd5, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/audit_data.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/audit_data.pb.go new file mode 100644 index 000000000..3bd16a748 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/audit_data.pb.go @@ -0,0 +1,208 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/audit_data.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/iam/v1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// App Engine admin service audit log. +type AuditData struct { + // Detailed information about methods that require it. Does not include + // simple Get, List or Delete methods because all significant information + // (resource name, number of returned elements for List operations) is already + // included in parent audit log message. + // + // Types that are valid to be assigned to Method: + // *AuditData_UpdateService + // *AuditData_CreateVersion + Method isAuditData_Method `protobuf_oneof:"method"` +} + +func (m *AuditData) Reset() { *m = AuditData{} } +func (m *AuditData) String() string { return proto.CompactTextString(m) } +func (*AuditData) ProtoMessage() {} +func (*AuditData) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +type isAuditData_Method interface { + isAuditData_Method() +} + +type AuditData_UpdateService struct { + UpdateService *UpdateServiceMethod `protobuf:"bytes,1,opt,name=update_service,json=updateService,oneof"` +} +type AuditData_CreateVersion struct { + CreateVersion *CreateVersionMethod `protobuf:"bytes,2,opt,name=create_version,json=createVersion,oneof"` +} + +func (*AuditData_UpdateService) isAuditData_Method() {} +func (*AuditData_CreateVersion) isAuditData_Method() {} + +func (m *AuditData) GetMethod() isAuditData_Method { + if m != nil { + return m.Method + } + return nil +} + +func (m *AuditData) GetUpdateService() *UpdateServiceMethod { + if x, ok := m.GetMethod().(*AuditData_UpdateService); ok { + return x.UpdateService + } + return nil +} + +func (m *AuditData) GetCreateVersion() *CreateVersionMethod { + if x, ok := m.GetMethod().(*AuditData_CreateVersion); ok { + return x.CreateVersion + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AuditData) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AuditData_OneofMarshaler, _AuditData_OneofUnmarshaler, _AuditData_OneofSizer, []interface{}{ + (*AuditData_UpdateService)(nil), + (*AuditData_CreateVersion)(nil), + } +} + +func _AuditData_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AuditData) + // method + switch x := m.Method.(type) { + case *AuditData_UpdateService: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UpdateService); err != nil { + return err + } + case *AuditData_CreateVersion: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateVersion); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AuditData.Method has unexpected type %T", x) + } + return nil +} + +func _AuditData_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AuditData) + switch tag { + case 1: // method.update_service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UpdateServiceMethod) + err := b.DecodeMessage(msg) + m.Method = &AuditData_UpdateService{msg} + return true, err + case 2: // method.create_version + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CreateVersionMethod) + err := b.DecodeMessage(msg) + m.Method = &AuditData_CreateVersion{msg} + return true, err + default: + return false, nil + } +} + +func _AuditData_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AuditData) + // method + switch x := m.Method.(type) { + case *AuditData_UpdateService: + s := proto.Size(x.UpdateService) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_CreateVersion: + s := proto.Size(x.CreateVersion) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Detailed information about UpdateService call. +type UpdateServiceMethod struct { + // Update service request. + Request *UpdateServiceRequest `protobuf:"bytes,1,opt,name=request" json:"request,omitempty"` +} + +func (m *UpdateServiceMethod) Reset() { *m = UpdateServiceMethod{} } +func (m *UpdateServiceMethod) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceMethod) ProtoMessage() {} +func (*UpdateServiceMethod) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *UpdateServiceMethod) GetRequest() *UpdateServiceRequest { + if m != nil { + return m.Request + } + return nil +} + +// Detailed information about CreateVersion call. +type CreateVersionMethod struct { + // Create version request. + Request *CreateVersionRequest `protobuf:"bytes,1,opt,name=request" json:"request,omitempty"` +} + +func (m *CreateVersionMethod) Reset() { *m = CreateVersionMethod{} } +func (m *CreateVersionMethod) String() string { return proto.CompactTextString(m) } +func (*CreateVersionMethod) ProtoMessage() {} +func (*CreateVersionMethod) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *CreateVersionMethod) GetRequest() *CreateVersionRequest { + if m != nil { + return m.Request + } + return nil +} + +func init() { + proto.RegisterType((*AuditData)(nil), "google.appengine.v1.AuditData") + proto.RegisterType((*UpdateServiceMethod)(nil), "google.appengine.v1.UpdateServiceMethod") + proto.RegisterType((*CreateVersionMethod)(nil), "google.appengine.v1.CreateVersionMethod") +} + +func init() { proto.RegisterFile("google/appengine/v1/audit_data.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 290 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x86, 0x09, 0x43, 0x01, 0x23, 0x3a, 0xa4, 0x03, 0x55, 0x07, 0x84, 0x0a, 0x43, 0x59, 0x1c, + 0x15, 0x46, 0x58, 0x48, 0x19, 0x58, 0x90, 0x4a, 0x10, 0x0c, 0x5d, 0xa2, 0x23, 0x39, 0x19, 0x4b, + 0x49, 0x6c, 0x1c, 0x27, 0x12, 0xcf, 0xc6, 0xcb, 0x21, 0xdb, 0x21, 0xb4, 0xc8, 0x82, 0x8e, 0xb9, + 0xfb, 0xfe, 0x2f, 0xbf, 0x74, 0x26, 0xe7, 0x4c, 0x08, 0x56, 0x60, 0x04, 0x52, 0x62, 0xc5, 0x78, + 0x85, 0x51, 0x3b, 0x8f, 0xa0, 0xc9, 0xb9, 0x4e, 0x73, 0xd0, 0x40, 0xa5, 0x12, 0x5a, 0x84, 0x23, + 0x47, 0xd1, 0x9e, 0xa2, 0xed, 0x7c, 0x72, 0xe6, 0x8d, 0xf6, 0x84, 0x4d, 0x4e, 0x4e, 0x3a, 0x88, + 0x43, 0x69, 0xd6, 0x1c, 0xca, 0x54, 0x8a, 0x82, 0x67, 0x1f, 0x6e, 0x3f, 0xfd, 0x0c, 0xc8, 0xc1, + 0xad, 0xf9, 0xdd, 0x1d, 0x68, 0x08, 0x1f, 0xc9, 0xb0, 0x91, 0x39, 0x68, 0x4c, 0x6b, 0x54, 0x2d, + 0xcf, 0x70, 0x1c, 0x9c, 0x06, 0xb3, 0xc3, 0xcb, 0x19, 0xf5, 0x14, 0xa0, 0xcf, 0x16, 0x7d, 0x72, + 0xe4, 0x03, 0xea, 0x37, 0x91, 0xdf, 0xef, 0x24, 0x47, 0xcd, 0xfa, 0xd8, 0x28, 0x33, 0x85, 0x46, + 0xd9, 0xa2, 0xaa, 0xb9, 0xa8, 0xc6, 0xbb, 0x7f, 0x28, 0x17, 0x16, 0x7d, 0x71, 0xe4, 0x8f, 0x32, + 0x5b, 0x1f, 0xc7, 0xfb, 0x64, 0x50, 0xda, 0xd5, 0x74, 0x45, 0x46, 0x9e, 0x12, 0xe1, 0x82, 0xec, + 0x29, 0x7c, 0x6f, 0xb0, 0xd6, 0x5d, 0xff, 0x8b, 0xff, 0xfb, 0x27, 0x2e, 0x90, 0x7c, 0x27, 0x8d, + 0xdb, 0xd3, 0x66, 0x5b, 0xf7, 0x46, 0xf4, 0xb7, 0x3b, 0xe6, 0xe4, 0x38, 0x13, 0xa5, 0x2f, 0x18, + 0x0f, 0xfb, 0x6b, 0x2c, 0xcd, 0x81, 0x96, 0xc1, 0xea, 0xa6, 0xc3, 0x98, 0x28, 0xa0, 0x62, 0x54, + 0x28, 0x16, 0x31, 0xac, 0xec, 0xf9, 0x22, 0xb7, 0x02, 0xc9, 0xeb, 0x8d, 0x67, 0x70, 0xdd, 0x7f, + 0xbc, 0x0e, 0x2c, 0x78, 0xf5, 0x15, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x56, 0x80, 0x49, 0x69, 0x02, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/deploy.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/deploy.pb.go new file mode 100644 index 000000000..46ce408d9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/deploy.pb.go @@ -0,0 +1,183 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/deploy.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Code and application artifacts used to deploy a version to App Engine. +type Deployment struct { + // Manifest of the files stored in Google Cloud Storage that are included + // as part of this version. All files must be readable using the + // credentials supplied with this call. + Files map[string]*FileInfo `protobuf:"bytes,1,rep,name=files" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // A Docker image that App Engine uses to run the version. + // Only applicable for instances in App Engine flexible environment. + Container *ContainerInfo `protobuf:"bytes,2,opt,name=container" json:"container,omitempty"` + // The zip file for this deployment, if this is a zip deployment. + Zip *ZipInfo `protobuf:"bytes,3,opt,name=zip" json:"zip,omitempty"` +} + +func (m *Deployment) Reset() { *m = Deployment{} } +func (m *Deployment) String() string { return proto.CompactTextString(m) } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *Deployment) GetFiles() map[string]*FileInfo { + if m != nil { + return m.Files + } + return nil +} + +func (m *Deployment) GetContainer() *ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +func (m *Deployment) GetZip() *ZipInfo { + if m != nil { + return m.Zip + } + return nil +} + +// Single source file that is part of the version to be deployed. Each source +// file that is deployed must be specified separately. +type FileInfo struct { + // URL source to use to fetch this file. Must be a URL to a resource in + // Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com/\/\'. + SourceUrl string `protobuf:"bytes,1,opt,name=source_url,json=sourceUrl" json:"source_url,omitempty"` + // The SHA1 hash of the file, in hex. + Sha1Sum string `protobuf:"bytes,2,opt,name=sha1_sum,json=sha1Sum" json:"sha1_sum,omitempty"` + // The MIME type of the file. + // + // Defaults to the value from Google Cloud Storage. + MimeType string `protobuf:"bytes,3,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` +} + +func (m *FileInfo) Reset() { *m = FileInfo{} } +func (m *FileInfo) String() string { return proto.CompactTextString(m) } +func (*FileInfo) ProtoMessage() {} +func (*FileInfo) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *FileInfo) GetSourceUrl() string { + if m != nil { + return m.SourceUrl + } + return "" +} + +func (m *FileInfo) GetSha1Sum() string { + if m != nil { + return m.Sha1Sum + } + return "" +} + +func (m *FileInfo) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// Docker image that is used to start a VM container for the version you +// deploy. +type ContainerInfo struct { + // URI to the hosted container image in a Docker repository. The URI must be + // fully qualified and include a tag or digest. + // Examples: "gcr.io/my-project/image:tag" or "gcr.io/my-project/image@digest" + Image string `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` +} + +func (m *ContainerInfo) Reset() { *m = ContainerInfo{} } +func (m *ContainerInfo) String() string { return proto.CompactTextString(m) } +func (*ContainerInfo) ProtoMessage() {} +func (*ContainerInfo) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +func (m *ContainerInfo) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +type ZipInfo struct { + // URL of the zip file to deploy from. Must be a URL to a resource in + // Google Cloud Storage in the form + // 'http(s)://storage.googleapis.com/\/\'. + SourceUrl string `protobuf:"bytes,3,opt,name=source_url,json=sourceUrl" json:"source_url,omitempty"` + // An estimate of the number of files in a zip for a zip deployment. + // If set, must be greater than or equal to the actual number of files. + // Used for optimizing performance; if not provided, deployment may be slow. + FilesCount int32 `protobuf:"varint,4,opt,name=files_count,json=filesCount" json:"files_count,omitempty"` +} + +func (m *ZipInfo) Reset() { *m = ZipInfo{} } +func (m *ZipInfo) String() string { return proto.CompactTextString(m) } +func (*ZipInfo) ProtoMessage() {} +func (*ZipInfo) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *ZipInfo) GetSourceUrl() string { + if m != nil { + return m.SourceUrl + } + return "" +} + +func (m *ZipInfo) GetFilesCount() int32 { + if m != nil { + return m.FilesCount + } + return 0 +} + +func init() { + proto.RegisterType((*Deployment)(nil), "google.appengine.v1.Deployment") + proto.RegisterType((*FileInfo)(nil), "google.appengine.v1.FileInfo") + proto.RegisterType((*ContainerInfo)(nil), "google.appengine.v1.ContainerInfo") + proto.RegisterType((*ZipInfo)(nil), "google.appengine.v1.ZipInfo") +} + +func init() { proto.RegisterFile("google/appengine/v1/deploy.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 394 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xd1, 0xab, 0xd3, 0x30, + 0x14, 0xc6, 0xe9, 0x6a, 0xbd, 0xeb, 0x29, 0x82, 0x44, 0xc1, 0x7a, 0xbd, 0x17, 0x4b, 0x41, 0x28, + 0x3e, 0xa4, 0xec, 0xde, 0x17, 0x51, 0x1f, 0x2e, 0x9b, 0x0a, 0x7b, 0x1b, 0x55, 0x11, 0xf6, 0x52, + 0x62, 0xcd, 0x62, 0xb0, 0x4d, 0x42, 0x9b, 0x0e, 0xea, 0x7f, 0xe2, 0x7f, 0x2b, 0x49, 0xba, 0x8d, + 0x8d, 0xbe, 0xf5, 0x7c, 0xfd, 0x7d, 0x5f, 0x4e, 0x72, 0x0e, 0x24, 0x4c, 0x4a, 0x56, 0xd3, 0x9c, + 0x28, 0x45, 0x05, 0xe3, 0x82, 0xe6, 0xfb, 0x45, 0xfe, 0x8b, 0xaa, 0x5a, 0x0e, 0x58, 0xb5, 0x52, + 0x4b, 0xf4, 0xcc, 0x11, 0xf8, 0x48, 0xe0, 0xfd, 0xe2, 0xfa, 0xe6, 0x68, 0xe3, 0x39, 0x11, 0x42, + 0x6a, 0xa2, 0xb9, 0x14, 0x9d, 0xb3, 0xa4, 0xff, 0x66, 0x00, 0x9f, 0x6c, 0x46, 0x43, 0x85, 0x46, + 0x0f, 0x10, 0xec, 0x78, 0x4d, 0xbb, 0xd8, 0x4b, 0xfc, 0x2c, 0xba, 0x7b, 0x8b, 0x27, 0x12, 0xf1, + 0x89, 0xc7, 0x5f, 0x0c, 0xfc, 0x59, 0xe8, 0x76, 0x28, 0x9c, 0x11, 0x3d, 0x40, 0x58, 0x49, 0xa1, + 0x09, 0x17, 0xb4, 0x8d, 0x67, 0x89, 0x97, 0x45, 0x77, 0xe9, 0x64, 0xca, 0xea, 0x40, 0xad, 0xc5, + 0x4e, 0x16, 0x27, 0x13, 0xc2, 0xe0, 0xff, 0xe5, 0x2a, 0xf6, 0xad, 0xf7, 0x66, 0xd2, 0xbb, 0xe5, + 0xca, 0xba, 0x0c, 0x78, 0xfd, 0x03, 0xe0, 0xd4, 0x06, 0x7a, 0x0a, 0xfe, 0x1f, 0x3a, 0xc4, 0x5e, + 0xe2, 0x65, 0x61, 0x61, 0x3e, 0xd1, 0x3d, 0x04, 0x7b, 0x52, 0xf7, 0x74, 0xec, 0xe6, 0x76, 0x32, + 0xd1, 0x24, 0xd8, 0x48, 0xc7, 0xbe, 0x9f, 0xbd, 0xf3, 0x52, 0x02, 0xf3, 0x83, 0x8c, 0x6e, 0x01, + 0x3a, 0xd9, 0xb7, 0x15, 0x2d, 0xfb, 0xb6, 0x1e, 0xd3, 0x43, 0xa7, 0x7c, 0x6f, 0x6b, 0xf4, 0x12, + 0xe6, 0xdd, 0x6f, 0xb2, 0x28, 0xbb, 0xbe, 0xb1, 0xc7, 0x84, 0xc5, 0x95, 0xa9, 0xbf, 0xf6, 0x0d, + 0x7a, 0x05, 0x61, 0xc3, 0x1b, 0x5a, 0xea, 0x41, 0x51, 0x7b, 0xa9, 0xb0, 0x98, 0x1b, 0xe1, 0xdb, + 0xa0, 0x68, 0xfa, 0x06, 0x9e, 0x9c, 0xbd, 0x03, 0x7a, 0x0e, 0x01, 0x6f, 0x08, 0xa3, 0xe3, 0x11, + 0xae, 0x48, 0xd7, 0x70, 0x35, 0x5e, 0xf9, 0xa2, 0x11, 0xff, 0xb2, 0x91, 0xd7, 0x10, 0xd9, 0x39, + 0x94, 0x95, 0xec, 0x85, 0x8e, 0x1f, 0x25, 0x5e, 0x16, 0x14, 0x60, 0xa5, 0x95, 0x51, 0x96, 0x3b, + 0x78, 0x51, 0xc9, 0x66, 0xea, 0x0d, 0x96, 0x91, 0x1b, 0xec, 0xc6, 0x2c, 0xc6, 0xc6, 0xdb, 0x7e, + 0x1c, 0x19, 0x26, 0x6b, 0x22, 0x18, 0x96, 0x2d, 0xcb, 0x19, 0x15, 0x76, 0x6d, 0x72, 0xf7, 0x8b, + 0x28, 0xde, 0x9d, 0xad, 0xe3, 0x87, 0x63, 0xf1, 0xf3, 0xb1, 0x05, 0xef, 0xff, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x6e, 0xeb, 0x52, 0x5a, 0xb6, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/instance.pb.go new file mode 100644 index 000000000..d2948ac12 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/instance.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/instance.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Availability of the instance. +type Instance_Availability int32 + +const ( + Instance_UNSPECIFIED Instance_Availability = 0 + Instance_RESIDENT Instance_Availability = 1 + Instance_DYNAMIC Instance_Availability = 2 +) + +var Instance_Availability_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "RESIDENT", + 2: "DYNAMIC", +} +var Instance_Availability_value = map[string]int32{ + "UNSPECIFIED": 0, + "RESIDENT": 1, + "DYNAMIC": 2, +} + +func (x Instance_Availability) String() string { + return proto.EnumName(Instance_Availability_name, int32(x)) +} +func (Instance_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 0} } + +// An Instance resource is the computing unit that App Engine uses to +// automatically scale an application. +type Instance struct { + // Full path to the Instance resource in the API. + // Example: `apps/myapp/services/default/versions/v1/instances/instance-1`. + // + // @OutputOnly + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Relative name of the instance within the version. + // Example: `instance-1`. + // + // @OutputOnly + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // App Engine release this instance is running on. + // + // @OutputOnly + AppEngineRelease string `protobuf:"bytes,3,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` + // Availability of the instance. + // + // @OutputOnly + Availability Instance_Availability `protobuf:"varint,4,opt,name=availability,enum=google.appengine.v1.Instance_Availability" json:"availability,omitempty"` + // Name of the virtual machine where this instance lives. Only applicable + // for instances in App Engine flexible environment. + // + // @OutputOnly + VmName string `protobuf:"bytes,5,opt,name=vm_name,json=vmName" json:"vm_name,omitempty"` + // Zone where the virtual machine is located. Only applicable for instances + // in App Engine flexible environment. + // + // @OutputOnly + VmZoneName string `protobuf:"bytes,6,opt,name=vm_zone_name,json=vmZoneName" json:"vm_zone_name,omitempty"` + // Virtual machine ID of this instance. Only applicable for instances in + // App Engine flexible environment. + // + // @OutputOnly + VmId string `protobuf:"bytes,7,opt,name=vm_id,json=vmId" json:"vm_id,omitempty"` + // Time that this instance was started. + // + // @OutputOnly + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Number of requests since this instance was started. + // + // @OutputOnly + Requests int32 `protobuf:"varint,9,opt,name=requests" json:"requests,omitempty"` + // Number of errors since this instance was started. + // + // @OutputOnly + Errors int32 `protobuf:"varint,10,opt,name=errors" json:"errors,omitempty"` + // Average queries per second (QPS) over the last minute. + // + // @OutputOnly + Qps float32 `protobuf:"fixed32,11,opt,name=qps" json:"qps,omitempty"` + // Average latency (ms) over the last minute. + // + // @OutputOnly + AverageLatency int32 `protobuf:"varint,12,opt,name=average_latency,json=averageLatency" json:"average_latency,omitempty"` + // Total memory in use (bytes). + // + // @OutputOnly + MemoryUsage int64 `protobuf:"varint,13,opt,name=memory_usage,json=memoryUsage" json:"memory_usage,omitempty"` + // Status of the virtual machine where this instance lives. Only applicable + // for instances in App Engine flexible environment. + // + // @OutputOnly + VmStatus string `protobuf:"bytes,14,opt,name=vm_status,json=vmStatus" json:"vm_status,omitempty"` + // Whether this instance is in debug mode. Only applicable for instances in + // App Engine flexible environment. + // + // @OutputOnly + VmDebugEnabled bool `protobuf:"varint,15,opt,name=vm_debug_enabled,json=vmDebugEnabled" json:"vm_debug_enabled,omitempty"` +} + +func (m *Instance) Reset() { *m = Instance{} } +func (m *Instance) String() string { return proto.CompactTextString(m) } +func (*Instance) ProtoMessage() {} +func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func (m *Instance) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Instance) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Instance) GetAppEngineRelease() string { + if m != nil { + return m.AppEngineRelease + } + return "" +} + +func (m *Instance) GetAvailability() Instance_Availability { + if m != nil { + return m.Availability + } + return Instance_UNSPECIFIED +} + +func (m *Instance) GetVmName() string { + if m != nil { + return m.VmName + } + return "" +} + +func (m *Instance) GetVmZoneName() string { + if m != nil { + return m.VmZoneName + } + return "" +} + +func (m *Instance) GetVmId() string { + if m != nil { + return m.VmId + } + return "" +} + +func (m *Instance) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Instance) GetRequests() int32 { + if m != nil { + return m.Requests + } + return 0 +} + +func (m *Instance) GetErrors() int32 { + if m != nil { + return m.Errors + } + return 0 +} + +func (m *Instance) GetQps() float32 { + if m != nil { + return m.Qps + } + return 0 +} + +func (m *Instance) GetAverageLatency() int32 { + if m != nil { + return m.AverageLatency + } + return 0 +} + +func (m *Instance) GetMemoryUsage() int64 { + if m != nil { + return m.MemoryUsage + } + return 0 +} + +func (m *Instance) GetVmStatus() string { + if m != nil { + return m.VmStatus + } + return "" +} + +func (m *Instance) GetVmDebugEnabled() bool { + if m != nil { + return m.VmDebugEnabled + } + return false +} + +func init() { + proto.RegisterType((*Instance)(nil), "google.appengine.v1.Instance") + proto.RegisterEnum("google.appengine.v1.Instance_Availability", Instance_Availability_name, Instance_Availability_value) +} + +func init() { proto.RegisterFile("google/appengine/v1/instance.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 521 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0xdb, 0x3c, + 0x14, 0x7e, 0x9d, 0xb6, 0xa9, 0x73, 0xe2, 0x26, 0x46, 0x85, 0xb7, 0x22, 0x1b, 0xcc, 0xcb, 0xcd, + 0xcc, 0x18, 0x36, 0xed, 0xae, 0xf6, 0x71, 0xd3, 0x36, 0x1e, 0x18, 0xb6, 0x10, 0x9c, 0xf6, 0x62, + 0xbd, 0x31, 0x4a, 0xac, 0x79, 0x02, 0x4b, 0x72, 0x2d, 0x45, 0x90, 0xfd, 0xc6, 0xfd, 0xa8, 0x61, + 0x39, 0x09, 0x2d, 0xf4, 0xce, 0xcf, 0xc7, 0x41, 0xcf, 0x79, 0x0e, 0x86, 0x69, 0x29, 0x65, 0x59, + 0xd1, 0x98, 0xd4, 0x35, 0x15, 0x25, 0x13, 0x34, 0x36, 0x97, 0x31, 0x13, 0x4a, 0x13, 0xb1, 0xa6, + 0x51, 0xdd, 0x48, 0x2d, 0xd1, 0x79, 0xe7, 0x89, 0x0e, 0x9e, 0xc8, 0x5c, 0x4e, 0x5e, 0x1f, 0x06, + 0x59, 0x4c, 0x84, 0x90, 0x9a, 0x68, 0x26, 0x85, 0xea, 0x46, 0x26, 0x6f, 0x76, 0xaa, 0x45, 0xab, + 0xcd, 0xaf, 0x58, 0x33, 0x4e, 0x95, 0x26, 0xbc, 0xee, 0x0c, 0xd3, 0xbf, 0xc7, 0xe0, 0xa6, 0xbb, + 0x67, 0x10, 0x82, 0x63, 0x41, 0x38, 0xc5, 0x4e, 0xe0, 0x84, 0x83, 0xcc, 0x7e, 0xa3, 0x11, 0xf4, + 0x58, 0x81, 0x7b, 0x96, 0xe9, 0xb1, 0x02, 0x7d, 0x00, 0x44, 0xea, 0x3a, 0xef, 0x02, 0xe4, 0x0d, + 0xad, 0x28, 0x51, 0x14, 0x1f, 0x59, 0xdd, 0x27, 0x75, 0x9d, 0x58, 0x21, 0xeb, 0x78, 0x34, 0x07, + 0x8f, 0x18, 0xc2, 0x2a, 0xb2, 0x62, 0x15, 0xd3, 0x5b, 0x7c, 0x1c, 0x38, 0xe1, 0xe8, 0xea, 0x7d, + 0xf4, 0xc2, 0x26, 0xd1, 0x3e, 0x46, 0x74, 0xfd, 0x64, 0x22, 0x7b, 0x36, 0x8f, 0x2e, 0xe0, 0xd4, + 0xf0, 0xdc, 0x86, 0x3c, 0xb1, 0x4f, 0xf6, 0x0d, 0x9f, 0xb7, 0x31, 0x03, 0xf0, 0x0c, 0xcf, 0xff, + 0x48, 0x41, 0x3b, 0xb5, 0x6f, 0x55, 0x30, 0xfc, 0x41, 0x0a, 0x6a, 0x1d, 0xe7, 0x70, 0x62, 0x78, + 0xce, 0x0a, 0x7c, 0xda, 0x6d, 0x67, 0x78, 0x5a, 0xa0, 0x4f, 0x00, 0x4a, 0x93, 0x46, 0xe7, 0x6d, + 0x2f, 0xd8, 0x0d, 0x9c, 0x70, 0x78, 0x35, 0xd9, 0xa7, 0xdb, 0x97, 0x16, 0xdd, 0xed, 0x4b, 0xcb, + 0x06, 0xd6, 0xdd, 0x62, 0x34, 0x01, 0xb7, 0xa1, 0x8f, 0x1b, 0xaa, 0xb4, 0xc2, 0x83, 0xc0, 0x09, + 0x4f, 0xb2, 0x03, 0x46, 0xff, 0x43, 0x9f, 0x36, 0x8d, 0x6c, 0x14, 0x06, 0xab, 0xec, 0x10, 0xf2, + 0xe1, 0xe8, 0xb1, 0x56, 0x78, 0x18, 0x38, 0x61, 0x2f, 0x6b, 0x3f, 0xd1, 0x3b, 0x18, 0x13, 0x43, + 0x1b, 0x52, 0xd2, 0xbc, 0x22, 0x9a, 0x8a, 0xf5, 0x16, 0x7b, 0x76, 0x64, 0xb4, 0xa3, 0xbf, 0x77, + 0x2c, 0x7a, 0x0b, 0x1e, 0xa7, 0x5c, 0x36, 0xdb, 0x7c, 0xa3, 0x48, 0x49, 0xf1, 0x59, 0xe0, 0x84, + 0x47, 0xd9, 0xb0, 0xe3, 0xee, 0x5b, 0x0a, 0xbd, 0x82, 0x81, 0xe1, 0xb9, 0xd2, 0x44, 0x6f, 0x14, + 0x1e, 0xd9, 0x2d, 0x5d, 0xc3, 0x97, 0x16, 0xa3, 0x10, 0x7c, 0xc3, 0xf3, 0x82, 0xae, 0x36, 0x65, + 0x4e, 0x05, 0x59, 0x55, 0xb4, 0xc0, 0xe3, 0xc0, 0x09, 0xdd, 0x6c, 0x64, 0xf8, 0xac, 0xa5, 0x93, + 0x8e, 0x9d, 0x7e, 0x06, 0xef, 0xe9, 0x05, 0xd0, 0x18, 0x86, 0xf7, 0xf3, 0xe5, 0x22, 0xb9, 0x4d, + 0xbf, 0xa5, 0xc9, 0xcc, 0xff, 0x0f, 0x79, 0xe0, 0x66, 0xc9, 0x32, 0x9d, 0x25, 0xf3, 0x3b, 0xdf, + 0x41, 0x43, 0x38, 0x9d, 0xfd, 0x9c, 0x5f, 0xff, 0x48, 0x6f, 0xfd, 0xde, 0xcd, 0x6f, 0xb8, 0x58, + 0x4b, 0xfe, 0xd2, 0x79, 0x6f, 0xce, 0xf6, 0xf7, 0x5d, 0xb4, 0xb5, 0x2e, 0x9c, 0x87, 0xaf, 0x3b, + 0x57, 0x29, 0x2b, 0x22, 0xca, 0x48, 0x36, 0x65, 0x5c, 0x52, 0x61, 0x4b, 0x8f, 0x3b, 0x89, 0xd4, + 0x4c, 0x3d, 0xfb, 0x23, 0xbe, 0x1c, 0xc0, 0xaa, 0x6f, 0x8d, 0x1f, 0xff, 0x05, 0x00, 0x00, 0xff, + 0xff, 0x97, 0xe7, 0x7d, 0x88, 0x39, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/location.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/location.pb.go new file mode 100644 index 000000000..83d2efd2b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/location.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/location.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Metadata for the given [google.cloud.location.Location][google.cloud.location.Location]. +type LocationMetadata struct { + // App Engine Standard Environment is available in the given location. + // + // @OutputOnly + StandardEnvironmentAvailable bool `protobuf:"varint,2,opt,name=standard_environment_available,json=standardEnvironmentAvailable" json:"standard_environment_available,omitempty"` + // App Engine Flexible Environment is available in the given location. + // + // @OutputOnly + FlexibleEnvironmentAvailable bool `protobuf:"varint,4,opt,name=flexible_environment_available,json=flexibleEnvironmentAvailable" json:"flexible_environment_available,omitempty"` +} + +func (m *LocationMetadata) Reset() { *m = LocationMetadata{} } +func (m *LocationMetadata) String() string { return proto.CompactTextString(m) } +func (*LocationMetadata) ProtoMessage() {} +func (*LocationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *LocationMetadata) GetStandardEnvironmentAvailable() bool { + if m != nil { + return m.StandardEnvironmentAvailable + } + return false +} + +func (m *LocationMetadata) GetFlexibleEnvironmentAvailable() bool { + if m != nil { + return m.FlexibleEnvironmentAvailable + } + return false +} + +func init() { + proto.RegisterType((*LocationMetadata)(nil), "google.appengine.v1.LocationMetadata") +} + +func init() { proto.RegisterFile("google/appengine/v1/location.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 236 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x89, 0x88, 0x48, 0x40, 0x90, 0x7a, 0xb0, 0x94, 0x22, 0xd2, 0x93, 0xa7, 0x5d, 0x8a, + 0x47, 0xbd, 0x58, 0xf4, 0xa6, 0x50, 0x3c, 0x7a, 0x29, 0x93, 0x66, 0x1c, 0x17, 0xa6, 0x33, 0x4b, + 0x32, 0x04, 0xfd, 0x33, 0xfe, 0x56, 0x69, 0x36, 0x1b, 0x11, 0xea, 0x71, 0x79, 0xdf, 0xfb, 0xd8, + 0x37, 0xe5, 0x82, 0x54, 0x89, 0xd1, 0x43, 0x8c, 0x28, 0x14, 0x04, 0x7d, 0xb7, 0xf4, 0xac, 0x5b, + 0xb0, 0xa0, 0xe2, 0x62, 0xa3, 0xa6, 0x93, 0x8b, 0xc4, 0xb8, 0x91, 0x71, 0xdd, 0x72, 0x36, 0x1f, + 0x8b, 0xc1, 0x83, 0x88, 0x5a, 0xdf, 0x68, 0x53, 0x65, 0x36, 0x1d, 0x52, 0xfb, 0x8a, 0xe8, 0x19, + 0x8c, 0x85, 0x52, 0xb2, 0xf8, 0x2e, 0xca, 0xf3, 0xe7, 0xc1, 0xff, 0x82, 0x06, 0x35, 0x18, 0x4c, + 0x1e, 0xcb, 0xab, 0xd6, 0x40, 0x6a, 0x68, 0xea, 0x0d, 0x4a, 0x17, 0x1a, 0x95, 0x1d, 0x8a, 0x6d, + 0xa0, 0x83, 0xc0, 0x50, 0x31, 0x4e, 0x8f, 0xae, 0x8b, 0x9b, 0xd3, 0xd7, 0x79, 0xa6, 0x9e, 0x7e, + 0xa1, 0x87, 0xcc, 0xec, 0x2d, 0xef, 0x8c, 0x9f, 0xa1, 0x62, 0xfc, 0xc7, 0x72, 0x9c, 0x2c, 0x99, + 0x3a, 0x64, 0x59, 0x7d, 0x94, 0x97, 0x5b, 0xdd, 0xb9, 0x03, 0x9b, 0x57, 0x67, 0xf9, 0xe3, 0xeb, + 0xfd, 0x94, 0x75, 0xf1, 0x76, 0x3f, 0x50, 0xa4, 0x0c, 0x42, 0x4e, 0x1b, 0xf2, 0x84, 0xd2, 0x0f, + 0xf5, 0x29, 0x82, 0x18, 0xda, 0x3f, 0xc7, 0xbd, 0x1b, 0x1f, 0xd5, 0x49, 0x0f, 0xde, 0xfe, 0x04, + 0x00, 0x00, 0xff, 0xff, 0x93, 0x9b, 0x7c, 0xf8, 0x84, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/operation.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/operation.pb.go new file mode 100644 index 000000000..2000f6fc3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/operation.pb.go @@ -0,0 +1,108 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/operation.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Metadata for the given [google.longrunning.Operation][google.longrunning.Operation]. +type OperationMetadataV1 struct { + // API method that initiated this operation. Example: + // `google.appengine.v1.Versions.CreateVersion`. + // + // @OutputOnly + Method string `protobuf:"bytes,1,opt,name=method" json:"method,omitempty"` + // Time that this operation was created. + // + // @OutputOnly + InsertTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=insert_time,json=insertTime" json:"insert_time,omitempty"` + // Time that this operation completed. + // + // @OutputOnly + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // User who requested this operation. + // + // @OutputOnly + User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` + // Name of the resource that this operation is acting on. Example: + // `apps/myapp/services/default`. + // + // @OutputOnly + Target string `protobuf:"bytes,5,opt,name=target" json:"target,omitempty"` +} + +func (m *OperationMetadataV1) Reset() { *m = OperationMetadataV1{} } +func (m *OperationMetadataV1) String() string { return proto.CompactTextString(m) } +func (*OperationMetadataV1) ProtoMessage() {} +func (*OperationMetadataV1) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } + +func (m *OperationMetadataV1) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *OperationMetadataV1) GetInsertTime() *google_protobuf2.Timestamp { + if m != nil { + return m.InsertTime + } + return nil +} + +func (m *OperationMetadataV1) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *OperationMetadataV1) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *OperationMetadataV1) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +func init() { + proto.RegisterType((*OperationMetadataV1)(nil), "google.appengine.v1.OperationMetadataV1") +} + +func init() { proto.RegisterFile("google/appengine/v1/operation.proto", fileDescriptor7) } + +var fileDescriptor7 = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x41, 0x4b, 0x03, 0x31, + 0x10, 0x85, 0x59, 0xad, 0x55, 0x53, 0xf0, 0xb0, 0x05, 0x5d, 0x16, 0xc1, 0xa2, 0x97, 0x9e, 0x12, + 0x56, 0xf1, 0x54, 0x4f, 0xbd, 0x8b, 0xa5, 0x88, 0x07, 0x2f, 0x92, 0xba, 0x63, 0x0c, 0x74, 0x67, + 0x42, 0x32, 0xed, 0xbf, 0xf4, 0x3f, 0xc9, 0x26, 0xbb, 0x0b, 0x82, 0xd0, 0x5b, 0x5e, 0xe6, 0x7d, + 0x79, 0x2f, 0x89, 0xb8, 0x33, 0x44, 0x66, 0x0b, 0x4a, 0x3b, 0x07, 0x68, 0x2c, 0x82, 0xda, 0x57, + 0x8a, 0x1c, 0x78, 0xcd, 0x96, 0x50, 0x3a, 0x4f, 0x4c, 0xf9, 0x34, 0x99, 0xe4, 0x60, 0x92, 0xfb, + 0xaa, 0xbc, 0x1e, 0x48, 0xab, 0x34, 0x22, 0x71, 0x24, 0x42, 0x42, 0xca, 0x9b, 0x6e, 0x1a, 0xd5, + 0x66, 0xf7, 0xa5, 0xd8, 0x36, 0x10, 0x58, 0x37, 0x2e, 0x19, 0x6e, 0x7f, 0x32, 0x31, 0x7d, 0xe9, + 0x73, 0x9e, 0x81, 0x75, 0xad, 0x59, 0xbf, 0x55, 0xf9, 0xa5, 0x18, 0x37, 0xc0, 0xdf, 0x54, 0x17, + 0xd9, 0x2c, 0x9b, 0x9f, 0xaf, 0x3b, 0x95, 0x2f, 0xc4, 0xc4, 0x62, 0x00, 0xcf, 0x1f, 0xed, 0x49, + 0xc5, 0xd1, 0x2c, 0x9b, 0x4f, 0xee, 0x4b, 0xd9, 0x35, 0xeb, 0x63, 0xe4, 0x6b, 0x1f, 0xb3, 0x16, + 0xc9, 0xde, 0x6e, 0xe4, 0x8f, 0xe2, 0x0c, 0xb0, 0x4e, 0xe4, 0xf1, 0x41, 0xf2, 0x14, 0xb0, 0x8e, + 0x58, 0x2e, 0x46, 0xbb, 0x00, 0xbe, 0x18, 0xc5, 0x26, 0x71, 0xdd, 0xf6, 0x63, 0xed, 0x0d, 0x70, + 0x71, 0x92, 0xfa, 0x25, 0xb5, 0xb4, 0xe2, 0xea, 0x93, 0x1a, 0xf9, 0xcf, 0x4b, 0x2d, 0x2f, 0x86, + 0x7b, 0xae, 0xda, 0xb0, 0x55, 0xf6, 0xfe, 0xd4, 0xd9, 0x0c, 0x6d, 0x35, 0x1a, 0x49, 0xde, 0x28, + 0x03, 0x18, 0xab, 0xa8, 0x34, 0xd2, 0xce, 0x86, 0x3f, 0x9f, 0xb2, 0x18, 0xc4, 0x66, 0x1c, 0x8d, + 0x0f, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x8e, 0xb2, 0x00, 0xbc, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/service.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/service.pb.go new file mode 100644 index 000000000..0ccbde779 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/service.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/service.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Available sharding mechanisms. +type TrafficSplit_ShardBy int32 + +const ( + // Diversion method unspecified. + TrafficSplit_UNSPECIFIED TrafficSplit_ShardBy = 0 + // Diversion based on a specially named cookie, "GOOGAPPUID." The cookie + // must be set by the application itself or no diversion will occur. + TrafficSplit_COOKIE TrafficSplit_ShardBy = 1 + // Diversion based on applying the modulus operation to a fingerprint + // of the IP address. + TrafficSplit_IP TrafficSplit_ShardBy = 2 +) + +var TrafficSplit_ShardBy_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "COOKIE", + 2: "IP", +} +var TrafficSplit_ShardBy_value = map[string]int32{ + "UNSPECIFIED": 0, + "COOKIE": 1, + "IP": 2, +} + +func (x TrafficSplit_ShardBy) String() string { + return proto.EnumName(TrafficSplit_ShardBy_name, int32(x)) +} +func (TrafficSplit_ShardBy) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{1, 0} } + +// A Service resource is a logical component of an application that can share +// state and communicate in a secure fashion with other services. +// For example, an application that handles customer requests might +// include separate services to handle tasks such as backend data +// analysis or API requests from mobile devices. Each service has a +// collection of versions that define a specific set of code used to +// implement the functionality of that service. +type Service struct { + // Full path to the Service resource in the API. + // Example: `apps/myapp/services/default`. + // + // @OutputOnly + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Relative name of the service within the application. + // Example: `default`. + // + // @OutputOnly + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Mapping that defines fractional HTTP traffic diversion to + // different versions within the service. + Split *TrafficSplit `protobuf:"bytes,3,opt,name=split" json:"split,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +func (m *Service) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Service) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Service) GetSplit() *TrafficSplit { + if m != nil { + return m.Split + } + return nil +} + +// Traffic routing configuration for versions within a single service. Traffic +// splits define how traffic directed to the service is assigned to versions. +type TrafficSplit struct { + // Mechanism used to determine which version a request is sent to. + // The traffic selection algorithm will + // be stable for either type until allocations are changed. + ShardBy TrafficSplit_ShardBy `protobuf:"varint,1,opt,name=shard_by,json=shardBy,enum=google.appengine.v1.TrafficSplit_ShardBy" json:"shard_by,omitempty"` + // Mapping from version IDs within the service to fractional + // (0.000, 1] allocations of traffic for that version. Each version can + // be specified only once, but some versions in the service may not + // have any traffic allocation. Services that have traffic allocated + // cannot be deleted until either the service is deleted or + // their traffic allocation is removed. Allocations must sum to 1. + // Up to two decimal place precision is supported for IP-based splits and + // up to three decimal places is supported for cookie-based splits. + Allocations map[string]float64 `protobuf:"bytes,2,rep,name=allocations" json:"allocations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` +} + +func (m *TrafficSplit) Reset() { *m = TrafficSplit{} } +func (m *TrafficSplit) String() string { return proto.CompactTextString(m) } +func (*TrafficSplit) ProtoMessage() {} +func (*TrafficSplit) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } + +func (m *TrafficSplit) GetShardBy() TrafficSplit_ShardBy { + if m != nil { + return m.ShardBy + } + return TrafficSplit_UNSPECIFIED +} + +func (m *TrafficSplit) GetAllocations() map[string]float64 { + if m != nil { + return m.Allocations + } + return nil +} + +func init() { + proto.RegisterType((*Service)(nil), "google.appengine.v1.Service") + proto.RegisterType((*TrafficSplit)(nil), "google.appengine.v1.TrafficSplit") + proto.RegisterEnum("google.appengine.v1.TrafficSplit_ShardBy", TrafficSplit_ShardBy_name, TrafficSplit_ShardBy_value) +} + +func init() { proto.RegisterFile("google/appengine/v1/service.proto", fileDescriptor8) } + +var fileDescriptor8 = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x51, 0x8b, 0xda, 0x40, + 0x10, 0xc7, 0x9b, 0xb5, 0x6a, 0x3b, 0x11, 0x1b, 0xb6, 0x85, 0x06, 0xe9, 0x83, 0xfa, 0x64, 0x5f, + 0x36, 0x98, 0x3e, 0xb4, 0xb4, 0xa5, 0x50, 0x35, 0x85, 0x50, 0x38, 0x43, 0xe2, 0xbd, 0xdc, 0xcb, + 0xb1, 0xc6, 0x75, 0x6f, 0xb9, 0xb8, 0x1b, 0x92, 0x5c, 0x20, 0x5f, 0xe5, 0x3e, 0xed, 0x91, 0x6c, + 0xf0, 0xbc, 0x43, 0xf0, 0x6d, 0x66, 0xf2, 0xfb, 0xff, 0x67, 0x32, 0xb3, 0x30, 0xe1, 0x4a, 0xf1, + 0x84, 0x39, 0x34, 0x4d, 0x99, 0xe4, 0x42, 0x32, 0xa7, 0x9c, 0x3b, 0x39, 0xcb, 0x4a, 0x11, 0x33, + 0x92, 0x66, 0xaa, 0x50, 0xf8, 0xa3, 0x46, 0xc8, 0x11, 0x21, 0xe5, 0x7c, 0xf4, 0xe5, 0xa8, 0x13, + 0x0e, 0x95, 0x52, 0x15, 0xb4, 0x10, 0x4a, 0xe6, 0x5a, 0x32, 0xdd, 0x43, 0x3f, 0xd2, 0x1e, 0x18, + 0xc3, 0x5b, 0x49, 0x0f, 0xcc, 0x36, 0xc6, 0xc6, 0xec, 0x7d, 0xd8, 0xc4, 0x78, 0x08, 0x48, 0xec, + 0x6c, 0xd4, 0x54, 0x90, 0xd8, 0xe1, 0xef, 0xd0, 0xcd, 0xd3, 0x44, 0x14, 0x76, 0x67, 0x6c, 0xcc, + 0x4c, 0x77, 0x42, 0xce, 0x74, 0x24, 0x9b, 0x8c, 0xee, 0xf7, 0x22, 0x8e, 0x6a, 0x30, 0xd4, 0xfc, + 0xf4, 0x11, 0xc1, 0xe0, 0xb4, 0x8e, 0x57, 0xf0, 0x2e, 0xbf, 0xa3, 0xd9, 0xee, 0x76, 0x5b, 0x35, + 0x1d, 0x87, 0xee, 0xd7, 0x8b, 0x66, 0x24, 0xaa, 0x15, 0x8b, 0x2a, 0xec, 0xe7, 0x3a, 0xc0, 0x1b, + 0x30, 0x69, 0x92, 0xa8, 0x58, 0xff, 0x93, 0x8d, 0xc6, 0x9d, 0x99, 0xe9, 0xba, 0x97, 0x8d, 0xfe, + 0x3e, 0x8b, 0x3c, 0x59, 0x64, 0x55, 0x78, 0x6a, 0x33, 0xfa, 0x03, 0xd6, 0x6b, 0x00, 0x5b, 0xd0, + 0xb9, 0x67, 0x55, 0xbb, 0x9c, 0x3a, 0xc4, 0x9f, 0xa0, 0x5b, 0xd2, 0xe4, 0x81, 0x35, 0xeb, 0x31, + 0x42, 0x9d, 0xfc, 0x44, 0x3f, 0x8c, 0x29, 0x81, 0x7e, 0x3b, 0x29, 0xfe, 0x00, 0xe6, 0xf5, 0x55, + 0x14, 0x78, 0x4b, 0xff, 0x9f, 0xef, 0xad, 0xac, 0x37, 0x18, 0xa0, 0xb7, 0x5c, 0xaf, 0xff, 0xfb, + 0x9e, 0x65, 0xe0, 0x1e, 0x20, 0x3f, 0xb0, 0xd0, 0x82, 0xc3, 0xe7, 0x58, 0x1d, 0xce, 0x4d, 0xbd, + 0x18, 0xb4, 0xd7, 0x09, 0xea, 0x6b, 0x05, 0xc6, 0xcd, 0xef, 0x16, 0xe2, 0x2a, 0xa1, 0x92, 0x13, + 0x95, 0x71, 0x87, 0x33, 0xd9, 0xdc, 0xd2, 0xd1, 0x9f, 0x68, 0x2a, 0xf2, 0x17, 0x8f, 0xe4, 0xd7, + 0x31, 0xd9, 0xf6, 0x1a, 0xf0, 0xdb, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xbc, 0x13, 0xf4, + 0x4c, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/appengine/v1/version.pb.go b/vendor/google.golang.org/genproto/googleapis/appengine/v1/version.pb.go new file mode 100644 index 000000000..21f86bf1e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/appengine/v1/version.pb.go @@ -0,0 +1,1072 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/appengine/v1/version.proto + +package appengine + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Available inbound services. +type InboundServiceType int32 + +const ( + // Not specified. + InboundServiceType_INBOUND_SERVICE_UNSPECIFIED InboundServiceType = 0 + // Allows an application to receive mail. + InboundServiceType_INBOUND_SERVICE_MAIL InboundServiceType = 1 + // Allows an application to receive email-bound notifications. + InboundServiceType_INBOUND_SERVICE_MAIL_BOUNCE InboundServiceType = 2 + // Allows an application to receive error stanzas. + InboundServiceType_INBOUND_SERVICE_XMPP_ERROR InboundServiceType = 3 + // Allows an application to receive instant messages. + InboundServiceType_INBOUND_SERVICE_XMPP_MESSAGE InboundServiceType = 4 + // Allows an application to receive user subscription POSTs. + InboundServiceType_INBOUND_SERVICE_XMPP_SUBSCRIBE InboundServiceType = 5 + // Allows an application to receive a user's chat presence. + InboundServiceType_INBOUND_SERVICE_XMPP_PRESENCE InboundServiceType = 6 + // Registers an application for notifications when a client connects or + // disconnects from a channel. + InboundServiceType_INBOUND_SERVICE_CHANNEL_PRESENCE InboundServiceType = 7 + // Enables warmup requests. + InboundServiceType_INBOUND_SERVICE_WARMUP InboundServiceType = 9 +) + +var InboundServiceType_name = map[int32]string{ + 0: "INBOUND_SERVICE_UNSPECIFIED", + 1: "INBOUND_SERVICE_MAIL", + 2: "INBOUND_SERVICE_MAIL_BOUNCE", + 3: "INBOUND_SERVICE_XMPP_ERROR", + 4: "INBOUND_SERVICE_XMPP_MESSAGE", + 5: "INBOUND_SERVICE_XMPP_SUBSCRIBE", + 6: "INBOUND_SERVICE_XMPP_PRESENCE", + 7: "INBOUND_SERVICE_CHANNEL_PRESENCE", + 9: "INBOUND_SERVICE_WARMUP", +} +var InboundServiceType_value = map[string]int32{ + "INBOUND_SERVICE_UNSPECIFIED": 0, + "INBOUND_SERVICE_MAIL": 1, + "INBOUND_SERVICE_MAIL_BOUNCE": 2, + "INBOUND_SERVICE_XMPP_ERROR": 3, + "INBOUND_SERVICE_XMPP_MESSAGE": 4, + "INBOUND_SERVICE_XMPP_SUBSCRIBE": 5, + "INBOUND_SERVICE_XMPP_PRESENCE": 6, + "INBOUND_SERVICE_CHANNEL_PRESENCE": 7, + "INBOUND_SERVICE_WARMUP": 9, +} + +func (x InboundServiceType) String() string { + return proto.EnumName(InboundServiceType_name, int32(x)) +} +func (InboundServiceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } + +// Run states of a version. +type ServingStatus int32 + +const ( + // Not specified. + ServingStatus_SERVING_STATUS_UNSPECIFIED ServingStatus = 0 + // Currently serving. Instances are created according to the + // scaling settings of the version. + ServingStatus_SERVING ServingStatus = 1 + // Disabled. No instances will be created and the scaling + // settings are ignored until the state of the version changes + // to `SERVING`. + ServingStatus_STOPPED ServingStatus = 2 +) + +var ServingStatus_name = map[int32]string{ + 0: "SERVING_STATUS_UNSPECIFIED", + 1: "SERVING", + 2: "STOPPED", +} +var ServingStatus_value = map[string]int32{ + "SERVING_STATUS_UNSPECIFIED": 0, + "SERVING": 1, + "STOPPED": 2, +} + +func (x ServingStatus) String() string { + return proto.EnumName(ServingStatus_name, int32(x)) +} +func (ServingStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } + +// A Version resource is a specific set of source code and configuration files +// that are deployed into a service. +type Version struct { + // Full path to the Version resource in the API. Example: + // `apps/myapp/services/default/versions/v1`. + // + // @OutputOnly + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Relative name of the version within the service. Example: `v1`. + // Version names can contain only lowercase letters, numbers, or hyphens. + // Reserved names: "default", "latest", and any name with the prefix "ah-". + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Controls how instances are created. + // + // Defaults to `AutomaticScaling`. + // + // Types that are valid to be assigned to Scaling: + // *Version_AutomaticScaling + // *Version_BasicScaling + // *Version_ManualScaling + Scaling isVersion_Scaling `protobuf_oneof:"scaling"` + // Before an application can receive email or XMPP messages, the application + // must be configured to enable the service. + InboundServices []InboundServiceType `protobuf:"varint,6,rep,packed,name=inbound_services,json=inboundServices,enum=google.appengine.v1.InboundServiceType" json:"inbound_services,omitempty"` + // Instance class that is used to run this version. Valid values are: + // * AutomaticScaling: `F1`, `F2`, `F4`, `F4_1G` + // * ManualScaling or BasicScaling: `B1`, `B2`, `B4`, `B8`, `B4_1G` + // + // Defaults to `F1` for AutomaticScaling and `B1` for ManualScaling or + // BasicScaling. + InstanceClass string `protobuf:"bytes,7,opt,name=instance_class,json=instanceClass" json:"instance_class,omitempty"` + // Extra network settings. Only applicable for VM runtimes. + Network *Network `protobuf:"bytes,8,opt,name=network" json:"network,omitempty"` + // Machine resources for this version. Only applicable for VM runtimes. + Resources *Resources `protobuf:"bytes,9,opt,name=resources" json:"resources,omitempty"` + // Desired runtime. Example: `python27`. + Runtime string `protobuf:"bytes,10,opt,name=runtime" json:"runtime,omitempty"` + // Whether multiple requests can be dispatched to this version at once. + Threadsafe bool `protobuf:"varint,11,opt,name=threadsafe" json:"threadsafe,omitempty"` + // Whether to deploy this version in a container on a virtual machine. + Vm bool `protobuf:"varint,12,opt,name=vm" json:"vm,omitempty"` + // Metadata settings that are supplied to this version to enable + // beta runtime features. + BetaSettings map[string]string `protobuf:"bytes,13,rep,name=beta_settings,json=betaSettings" json:"beta_settings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // App Engine execution environment for this version. + // + // Defaults to `standard`. + Env string `protobuf:"bytes,14,opt,name=env" json:"env,omitempty"` + // Current serving status of this version. Only the versions with a + // `SERVING` status create instances and can be billed. + // + // `SERVING_STATUS_UNSPECIFIED` is an invalid value. Defaults to `SERVING`. + ServingStatus ServingStatus `protobuf:"varint,15,opt,name=serving_status,json=servingStatus,enum=google.appengine.v1.ServingStatus" json:"serving_status,omitempty"` + // Email address of the user who created this version. + // + // @OutputOnly + CreatedBy string `protobuf:"bytes,16,opt,name=created_by,json=createdBy" json:"created_by,omitempty"` + // Time that this version was created. + // + // @OutputOnly + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,17,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Total size in bytes of all the files that are included in this version + // and curerntly hosted on the App Engine disk. + // + // @OutputOnly + DiskUsageBytes int64 `protobuf:"varint,18,opt,name=disk_usage_bytes,json=diskUsageBytes" json:"disk_usage_bytes,omitempty"` + // An ordered list of URL-matching patterns that should be applied to incoming + // requests. The first matching URL handles the request and other request + // handlers are not attempted. + // + // Only returned in `GET` requests if `view=FULL` is set. + Handlers []*UrlMap `protobuf:"bytes,100,rep,name=handlers" json:"handlers,omitempty"` + // Custom static error pages. Limited to 10KB per page. + // + // Only returned in `GET` requests if `view=FULL` is set. + ErrorHandlers []*ErrorHandler `protobuf:"bytes,101,rep,name=error_handlers,json=errorHandlers" json:"error_handlers,omitempty"` + // Configuration for third-party Python runtime libraries that are required + // by the application. + // + // Only returned in `GET` requests if `view=FULL` is set. + Libraries []*Library `protobuf:"bytes,102,rep,name=libraries" json:"libraries,omitempty"` + // Serving configuration for + // [Google Cloud Endpoints](https://cloud.google.com/appengine/docs/python/endpoints/). + // + // Only returned in `GET` requests if `view=FULL` is set. + ApiConfig *ApiConfigHandler `protobuf:"bytes,103,opt,name=api_config,json=apiConfig" json:"api_config,omitempty"` + // Environment variables available to the application. + // + // Only returned in `GET` requests if `view=FULL` is set. + EnvVariables map[string]string `protobuf:"bytes,104,rep,name=env_variables,json=envVariables" json:"env_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Duration that static files should be cached by web proxies and browsers. + // Only applicable if the corresponding + // [StaticFilesHandler](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#staticfileshandler) + // does not specify its own expiration time. + // + // Only returned in `GET` requests if `view=FULL` is set. + DefaultExpiration *google_protobuf1.Duration `protobuf:"bytes,105,opt,name=default_expiration,json=defaultExpiration" json:"default_expiration,omitempty"` + // Configures health checking for VM instances. Unhealthy instances are + // stopped and replaced with new instances. Only applicable for VM + // runtimes. + // + // Only returned in `GET` requests if `view=FULL` is set. + HealthCheck *HealthCheck `protobuf:"bytes,106,opt,name=health_check,json=healthCheck" json:"health_check,omitempty"` + // Files that match this pattern will not be built into this version. + // Only applicable for Go runtimes. + // + // Only returned in `GET` requests if `view=FULL` is set. + NobuildFilesRegex string `protobuf:"bytes,107,opt,name=nobuild_files_regex,json=nobuildFilesRegex" json:"nobuild_files_regex,omitempty"` + // Code and application artifacts that make up this version. + // + // Only returned in `GET` requests if `view=FULL` is set. + Deployment *Deployment `protobuf:"bytes,108,opt,name=deployment" json:"deployment,omitempty"` + // Serving URL for this version. Example: + // "https://myversion-dot-myservice-dot-myapp.appspot.com" + // + // @OutputOnly + VersionUrl string `protobuf:"bytes,109,opt,name=version_url,json=versionUrl" json:"version_url,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } + +type isVersion_Scaling interface { + isVersion_Scaling() +} + +type Version_AutomaticScaling struct { + AutomaticScaling *AutomaticScaling `protobuf:"bytes,3,opt,name=automatic_scaling,json=automaticScaling,oneof"` +} +type Version_BasicScaling struct { + BasicScaling *BasicScaling `protobuf:"bytes,4,opt,name=basic_scaling,json=basicScaling,oneof"` +} +type Version_ManualScaling struct { + ManualScaling *ManualScaling `protobuf:"bytes,5,opt,name=manual_scaling,json=manualScaling,oneof"` +} + +func (*Version_AutomaticScaling) isVersion_Scaling() {} +func (*Version_BasicScaling) isVersion_Scaling() {} +func (*Version_ManualScaling) isVersion_Scaling() {} + +func (m *Version) GetScaling() isVersion_Scaling { + if m != nil { + return m.Scaling + } + return nil +} + +func (m *Version) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Version) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Version) GetAutomaticScaling() *AutomaticScaling { + if x, ok := m.GetScaling().(*Version_AutomaticScaling); ok { + return x.AutomaticScaling + } + return nil +} + +func (m *Version) GetBasicScaling() *BasicScaling { + if x, ok := m.GetScaling().(*Version_BasicScaling); ok { + return x.BasicScaling + } + return nil +} + +func (m *Version) GetManualScaling() *ManualScaling { + if x, ok := m.GetScaling().(*Version_ManualScaling); ok { + return x.ManualScaling + } + return nil +} + +func (m *Version) GetInboundServices() []InboundServiceType { + if m != nil { + return m.InboundServices + } + return nil +} + +func (m *Version) GetInstanceClass() string { + if m != nil { + return m.InstanceClass + } + return "" +} + +func (m *Version) GetNetwork() *Network { + if m != nil { + return m.Network + } + return nil +} + +func (m *Version) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Version) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *Version) GetThreadsafe() bool { + if m != nil { + return m.Threadsafe + } + return false +} + +func (m *Version) GetVm() bool { + if m != nil { + return m.Vm + } + return false +} + +func (m *Version) GetBetaSettings() map[string]string { + if m != nil { + return m.BetaSettings + } + return nil +} + +func (m *Version) GetEnv() string { + if m != nil { + return m.Env + } + return "" +} + +func (m *Version) GetServingStatus() ServingStatus { + if m != nil { + return m.ServingStatus + } + return ServingStatus_SERVING_STATUS_UNSPECIFIED +} + +func (m *Version) GetCreatedBy() string { + if m != nil { + return m.CreatedBy + } + return "" +} + +func (m *Version) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Version) GetDiskUsageBytes() int64 { + if m != nil { + return m.DiskUsageBytes + } + return 0 +} + +func (m *Version) GetHandlers() []*UrlMap { + if m != nil { + return m.Handlers + } + return nil +} + +func (m *Version) GetErrorHandlers() []*ErrorHandler { + if m != nil { + return m.ErrorHandlers + } + return nil +} + +func (m *Version) GetLibraries() []*Library { + if m != nil { + return m.Libraries + } + return nil +} + +func (m *Version) GetApiConfig() *ApiConfigHandler { + if m != nil { + return m.ApiConfig + } + return nil +} + +func (m *Version) GetEnvVariables() map[string]string { + if m != nil { + return m.EnvVariables + } + return nil +} + +func (m *Version) GetDefaultExpiration() *google_protobuf1.Duration { + if m != nil { + return m.DefaultExpiration + } + return nil +} + +func (m *Version) GetHealthCheck() *HealthCheck { + if m != nil { + return m.HealthCheck + } + return nil +} + +func (m *Version) GetNobuildFilesRegex() string { + if m != nil { + return m.NobuildFilesRegex + } + return "" +} + +func (m *Version) GetDeployment() *Deployment { + if m != nil { + return m.Deployment + } + return nil +} + +func (m *Version) GetVersionUrl() string { + if m != nil { + return m.VersionUrl + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Version) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Version_OneofMarshaler, _Version_OneofUnmarshaler, _Version_OneofSizer, []interface{}{ + (*Version_AutomaticScaling)(nil), + (*Version_BasicScaling)(nil), + (*Version_ManualScaling)(nil), + } +} + +func _Version_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Version) + // scaling + switch x := m.Scaling.(type) { + case *Version_AutomaticScaling: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AutomaticScaling); err != nil { + return err + } + case *Version_BasicScaling: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicScaling); err != nil { + return err + } + case *Version_ManualScaling: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ManualScaling); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Version.Scaling has unexpected type %T", x) + } + return nil +} + +func _Version_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Version) + switch tag { + case 3: // scaling.automatic_scaling + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AutomaticScaling) + err := b.DecodeMessage(msg) + m.Scaling = &Version_AutomaticScaling{msg} + return true, err + case 4: // scaling.basic_scaling + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicScaling) + err := b.DecodeMessage(msg) + m.Scaling = &Version_BasicScaling{msg} + return true, err + case 5: // scaling.manual_scaling + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ManualScaling) + err := b.DecodeMessage(msg) + m.Scaling = &Version_ManualScaling{msg} + return true, err + default: + return false, nil + } +} + +func _Version_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Version) + // scaling + switch x := m.Scaling.(type) { + case *Version_AutomaticScaling: + s := proto.Size(x.AutomaticScaling) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Version_BasicScaling: + s := proto.Size(x.BasicScaling) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Version_ManualScaling: + s := proto.Size(x.ManualScaling) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Automatic scaling is based on request rate, response latencies, and other +// application metrics. +type AutomaticScaling struct { + // Amount of time that the + // [Autoscaler](https://cloud.google.com/compute/docs/autoscaler/) + // should wait between changes to the number of virtual machines. + // Only applicable for VM runtimes. + CoolDownPeriod *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=cool_down_period,json=coolDownPeriod" json:"cool_down_period,omitempty"` + // Target scaling by CPU usage. + CpuUtilization *CpuUtilization `protobuf:"bytes,2,opt,name=cpu_utilization,json=cpuUtilization" json:"cpu_utilization,omitempty"` + // Number of concurrent requests an automatic scaling instance can accept + // before the scheduler spawns a new instance. + // + // Defaults to a runtime-specific value. + MaxConcurrentRequests int32 `protobuf:"varint,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests" json:"max_concurrent_requests,omitempty"` + // Maximum number of idle instances that should be maintained for this + // version. + MaxIdleInstances int32 `protobuf:"varint,4,opt,name=max_idle_instances,json=maxIdleInstances" json:"max_idle_instances,omitempty"` + // Maximum number of instances that should be started to handle requests. + MaxTotalInstances int32 `protobuf:"varint,5,opt,name=max_total_instances,json=maxTotalInstances" json:"max_total_instances,omitempty"` + // Maximum amount of time that a request should wait in the pending queue + // before starting a new instance to handle it. + MaxPendingLatency *google_protobuf1.Duration `protobuf:"bytes,6,opt,name=max_pending_latency,json=maxPendingLatency" json:"max_pending_latency,omitempty"` + // Minimum number of idle instances that should be maintained for + // this version. Only applicable for the default version of a service. + MinIdleInstances int32 `protobuf:"varint,7,opt,name=min_idle_instances,json=minIdleInstances" json:"min_idle_instances,omitempty"` + // Minimum number of instances that should be maintained for this version. + MinTotalInstances int32 `protobuf:"varint,8,opt,name=min_total_instances,json=minTotalInstances" json:"min_total_instances,omitempty"` + // Minimum amount of time a request should wait in the pending queue before + // starting a new instance to handle it. + MinPendingLatency *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=min_pending_latency,json=minPendingLatency" json:"min_pending_latency,omitempty"` + // Target scaling by request utilization. + RequestUtilization *RequestUtilization `protobuf:"bytes,10,opt,name=request_utilization,json=requestUtilization" json:"request_utilization,omitempty"` + // Target scaling by disk usage. + DiskUtilization *DiskUtilization `protobuf:"bytes,11,opt,name=disk_utilization,json=diskUtilization" json:"disk_utilization,omitempty"` + // Target scaling by network usage. + NetworkUtilization *NetworkUtilization `protobuf:"bytes,12,opt,name=network_utilization,json=networkUtilization" json:"network_utilization,omitempty"` +} + +func (m *AutomaticScaling) Reset() { *m = AutomaticScaling{} } +func (m *AutomaticScaling) String() string { return proto.CompactTextString(m) } +func (*AutomaticScaling) ProtoMessage() {} +func (*AutomaticScaling) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } + +func (m *AutomaticScaling) GetCoolDownPeriod() *google_protobuf1.Duration { + if m != nil { + return m.CoolDownPeriod + } + return nil +} + +func (m *AutomaticScaling) GetCpuUtilization() *CpuUtilization { + if m != nil { + return m.CpuUtilization + } + return nil +} + +func (m *AutomaticScaling) GetMaxConcurrentRequests() int32 { + if m != nil { + return m.MaxConcurrentRequests + } + return 0 +} + +func (m *AutomaticScaling) GetMaxIdleInstances() int32 { + if m != nil { + return m.MaxIdleInstances + } + return 0 +} + +func (m *AutomaticScaling) GetMaxTotalInstances() int32 { + if m != nil { + return m.MaxTotalInstances + } + return 0 +} + +func (m *AutomaticScaling) GetMaxPendingLatency() *google_protobuf1.Duration { + if m != nil { + return m.MaxPendingLatency + } + return nil +} + +func (m *AutomaticScaling) GetMinIdleInstances() int32 { + if m != nil { + return m.MinIdleInstances + } + return 0 +} + +func (m *AutomaticScaling) GetMinTotalInstances() int32 { + if m != nil { + return m.MinTotalInstances + } + return 0 +} + +func (m *AutomaticScaling) GetMinPendingLatency() *google_protobuf1.Duration { + if m != nil { + return m.MinPendingLatency + } + return nil +} + +func (m *AutomaticScaling) GetRequestUtilization() *RequestUtilization { + if m != nil { + return m.RequestUtilization + } + return nil +} + +func (m *AutomaticScaling) GetDiskUtilization() *DiskUtilization { + if m != nil { + return m.DiskUtilization + } + return nil +} + +func (m *AutomaticScaling) GetNetworkUtilization() *NetworkUtilization { + if m != nil { + return m.NetworkUtilization + } + return nil +} + +// A service with basic scaling will create an instance when the application +// receives a request. The instance will be turned down when the app becomes +// idle. Basic scaling is ideal for work that is intermittent or driven by +// user activity. +type BasicScaling struct { + // Duration of time after the last request that an instance must wait before + // the instance is shut down. + IdleTimeout *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout" json:"idle_timeout,omitempty"` + // Maximum number of instances to create for this version. + MaxInstances int32 `protobuf:"varint,2,opt,name=max_instances,json=maxInstances" json:"max_instances,omitempty"` +} + +func (m *BasicScaling) Reset() { *m = BasicScaling{} } +func (m *BasicScaling) String() string { return proto.CompactTextString(m) } +func (*BasicScaling) ProtoMessage() {} +func (*BasicScaling) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} } + +func (m *BasicScaling) GetIdleTimeout() *google_protobuf1.Duration { + if m != nil { + return m.IdleTimeout + } + return nil +} + +func (m *BasicScaling) GetMaxInstances() int32 { + if m != nil { + return m.MaxInstances + } + return 0 +} + +// A service with manual scaling runs continuously, allowing you to perform +// complex initialization and rely on the state of its memory over time. +type ManualScaling struct { + // Number of instances to assign to the service at the start. This number + // can later be altered by using the + // [Modules API](https://cloud.google.com/appengine/docs/python/modules/functions) + // `set_num_instances()` function. + Instances int32 `protobuf:"varint,1,opt,name=instances" json:"instances,omitempty"` +} + +func (m *ManualScaling) Reset() { *m = ManualScaling{} } +func (m *ManualScaling) String() string { return proto.CompactTextString(m) } +func (*ManualScaling) ProtoMessage() {} +func (*ManualScaling) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{3} } + +func (m *ManualScaling) GetInstances() int32 { + if m != nil { + return m.Instances + } + return 0 +} + +// Target scaling by CPU usage. +type CpuUtilization struct { + // Period of time over which CPU utilization is calculated. + AggregationWindowLength *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=aggregation_window_length,json=aggregationWindowLength" json:"aggregation_window_length,omitempty"` + // Target CPU utilization ratio to maintain when scaling. Must be between 0 + // and 1. + TargetUtilization float64 `protobuf:"fixed64,2,opt,name=target_utilization,json=targetUtilization" json:"target_utilization,omitempty"` +} + +func (m *CpuUtilization) Reset() { *m = CpuUtilization{} } +func (m *CpuUtilization) String() string { return proto.CompactTextString(m) } +func (*CpuUtilization) ProtoMessage() {} +func (*CpuUtilization) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } + +func (m *CpuUtilization) GetAggregationWindowLength() *google_protobuf1.Duration { + if m != nil { + return m.AggregationWindowLength + } + return nil +} + +func (m *CpuUtilization) GetTargetUtilization() float64 { + if m != nil { + return m.TargetUtilization + } + return 0 +} + +// Target scaling by request utilization. Only applicable for VM runtimes. +type RequestUtilization struct { + // Target requests per second. + TargetRequestCountPerSecond int32 `protobuf:"varint,1,opt,name=target_request_count_per_second,json=targetRequestCountPerSecond" json:"target_request_count_per_second,omitempty"` + // Target number of concurrent requests. + TargetConcurrentRequests int32 `protobuf:"varint,2,opt,name=target_concurrent_requests,json=targetConcurrentRequests" json:"target_concurrent_requests,omitempty"` +} + +func (m *RequestUtilization) Reset() { *m = RequestUtilization{} } +func (m *RequestUtilization) String() string { return proto.CompactTextString(m) } +func (*RequestUtilization) ProtoMessage() {} +func (*RequestUtilization) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{5} } + +func (m *RequestUtilization) GetTargetRequestCountPerSecond() int32 { + if m != nil { + return m.TargetRequestCountPerSecond + } + return 0 +} + +func (m *RequestUtilization) GetTargetConcurrentRequests() int32 { + if m != nil { + return m.TargetConcurrentRequests + } + return 0 +} + +// Target scaling by disk usage. Only applicable for VM runtimes. +type DiskUtilization struct { + // Target bytes written per second. + TargetWriteBytesPerSecond int32 `protobuf:"varint,14,opt,name=target_write_bytes_per_second,json=targetWriteBytesPerSecond" json:"target_write_bytes_per_second,omitempty"` + // Target ops written per second. + TargetWriteOpsPerSecond int32 `protobuf:"varint,15,opt,name=target_write_ops_per_second,json=targetWriteOpsPerSecond" json:"target_write_ops_per_second,omitempty"` + // Target bytes read per second. + TargetReadBytesPerSecond int32 `protobuf:"varint,16,opt,name=target_read_bytes_per_second,json=targetReadBytesPerSecond" json:"target_read_bytes_per_second,omitempty"` + // Target ops read per seconds. + TargetReadOpsPerSecond int32 `protobuf:"varint,17,opt,name=target_read_ops_per_second,json=targetReadOpsPerSecond" json:"target_read_ops_per_second,omitempty"` +} + +func (m *DiskUtilization) Reset() { *m = DiskUtilization{} } +func (m *DiskUtilization) String() string { return proto.CompactTextString(m) } +func (*DiskUtilization) ProtoMessage() {} +func (*DiskUtilization) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } + +func (m *DiskUtilization) GetTargetWriteBytesPerSecond() int32 { + if m != nil { + return m.TargetWriteBytesPerSecond + } + return 0 +} + +func (m *DiskUtilization) GetTargetWriteOpsPerSecond() int32 { + if m != nil { + return m.TargetWriteOpsPerSecond + } + return 0 +} + +func (m *DiskUtilization) GetTargetReadBytesPerSecond() int32 { + if m != nil { + return m.TargetReadBytesPerSecond + } + return 0 +} + +func (m *DiskUtilization) GetTargetReadOpsPerSecond() int32 { + if m != nil { + return m.TargetReadOpsPerSecond + } + return 0 +} + +// Target scaling by network usage. Only applicable for VM runtimes. +type NetworkUtilization struct { + // Target bytes sent per second. + TargetSentBytesPerSecond int32 `protobuf:"varint,1,opt,name=target_sent_bytes_per_second,json=targetSentBytesPerSecond" json:"target_sent_bytes_per_second,omitempty"` + // Target packets sent per second. + TargetSentPacketsPerSecond int32 `protobuf:"varint,11,opt,name=target_sent_packets_per_second,json=targetSentPacketsPerSecond" json:"target_sent_packets_per_second,omitempty"` + // Target bytes received per second. + TargetReceivedBytesPerSecond int32 `protobuf:"varint,12,opt,name=target_received_bytes_per_second,json=targetReceivedBytesPerSecond" json:"target_received_bytes_per_second,omitempty"` + // Target packets received per second. + TargetReceivedPacketsPerSecond int32 `protobuf:"varint,13,opt,name=target_received_packets_per_second,json=targetReceivedPacketsPerSecond" json:"target_received_packets_per_second,omitempty"` +} + +func (m *NetworkUtilization) Reset() { *m = NetworkUtilization{} } +func (m *NetworkUtilization) String() string { return proto.CompactTextString(m) } +func (*NetworkUtilization) ProtoMessage() {} +func (*NetworkUtilization) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{7} } + +func (m *NetworkUtilization) GetTargetSentBytesPerSecond() int32 { + if m != nil { + return m.TargetSentBytesPerSecond + } + return 0 +} + +func (m *NetworkUtilization) GetTargetSentPacketsPerSecond() int32 { + if m != nil { + return m.TargetSentPacketsPerSecond + } + return 0 +} + +func (m *NetworkUtilization) GetTargetReceivedBytesPerSecond() int32 { + if m != nil { + return m.TargetReceivedBytesPerSecond + } + return 0 +} + +func (m *NetworkUtilization) GetTargetReceivedPacketsPerSecond() int32 { + if m != nil { + return m.TargetReceivedPacketsPerSecond + } + return 0 +} + +// Extra network settings. Only applicable for VM runtimes. +type Network struct { + // List of ports, or port pairs, to forward from the virtual machine to the + // application container. + ForwardedPorts []string `protobuf:"bytes,1,rep,name=forwarded_ports,json=forwardedPorts" json:"forwarded_ports,omitempty"` + // Tag to apply to the VM instance during creation. + InstanceTag string `protobuf:"bytes,2,opt,name=instance_tag,json=instanceTag" json:"instance_tag,omitempty"` + // Google Cloud Platform network where the virtual machines are created. + // Specify the short name, not the resource path. + // + // Defaults to `default`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (m *Network) String() string { return proto.CompactTextString(m) } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{8} } + +func (m *Network) GetForwardedPorts() []string { + if m != nil { + return m.ForwardedPorts + } + return nil +} + +func (m *Network) GetInstanceTag() string { + if m != nil { + return m.InstanceTag + } + return "" +} + +func (m *Network) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Machine resources for a version. +type Resources struct { + // Number of CPU cores needed. + Cpu float64 `protobuf:"fixed64,1,opt,name=cpu" json:"cpu,omitempty"` + // Disk size (GB) needed. + DiskGb float64 `protobuf:"fixed64,2,opt,name=disk_gb,json=diskGb" json:"disk_gb,omitempty"` + // Memory (GB) needed. + MemoryGb float64 `protobuf:"fixed64,3,opt,name=memory_gb,json=memoryGb" json:"memory_gb,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{9} } + +func (m *Resources) GetCpu() float64 { + if m != nil { + return m.Cpu + } + return 0 +} + +func (m *Resources) GetDiskGb() float64 { + if m != nil { + return m.DiskGb + } + return 0 +} + +func (m *Resources) GetMemoryGb() float64 { + if m != nil { + return m.MemoryGb + } + return 0 +} + +func init() { + proto.RegisterType((*Version)(nil), "google.appengine.v1.Version") + proto.RegisterType((*AutomaticScaling)(nil), "google.appengine.v1.AutomaticScaling") + proto.RegisterType((*BasicScaling)(nil), "google.appengine.v1.BasicScaling") + proto.RegisterType((*ManualScaling)(nil), "google.appengine.v1.ManualScaling") + proto.RegisterType((*CpuUtilization)(nil), "google.appengine.v1.CpuUtilization") + proto.RegisterType((*RequestUtilization)(nil), "google.appengine.v1.RequestUtilization") + proto.RegisterType((*DiskUtilization)(nil), "google.appengine.v1.DiskUtilization") + proto.RegisterType((*NetworkUtilization)(nil), "google.appengine.v1.NetworkUtilization") + proto.RegisterType((*Network)(nil), "google.appengine.v1.Network") + proto.RegisterType((*Resources)(nil), "google.appengine.v1.Resources") + proto.RegisterEnum("google.appengine.v1.InboundServiceType", InboundServiceType_name, InboundServiceType_value) + proto.RegisterEnum("google.appengine.v1.ServingStatus", ServingStatus_name, ServingStatus_value) +} + +func init() { proto.RegisterFile("google/appengine/v1/version.proto", fileDescriptor9) } + +var fileDescriptor9 = []byte{ + // 1767 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x5d, 0x73, 0xdb, 0xc6, + 0x15, 0x0d, 0x29, 0x4b, 0x14, 0x2f, 0x3f, 0x04, 0xad, 0xd3, 0x08, 0x96, 0x64, 0x89, 0x66, 0x92, + 0xb1, 0x26, 0xd3, 0x50, 0x13, 0x77, 0x26, 0xed, 0x38, 0x9e, 0xa6, 0x22, 0x45, 0x5b, 0x6c, 0xf5, + 0xc1, 0x01, 0x48, 0x3b, 0xd3, 0x17, 0xcc, 0x12, 0x58, 0x41, 0x5b, 0x01, 0x0b, 0x64, 0xb1, 0xa0, + 0xc4, 0xfe, 0x87, 0x3e, 0xb6, 0xd3, 0x1f, 0xd1, 0xc7, 0xbe, 0xf5, 0xcf, 0x75, 0x76, 0x01, 0x90, + 0x20, 0x89, 0xd8, 0xed, 0x1b, 0xf7, 0xee, 0x39, 0x67, 0x0f, 0xee, 0xde, 0xbd, 0x0b, 0x10, 0x5e, + 0xb8, 0x41, 0xe0, 0x7a, 0xe4, 0x14, 0x87, 0x21, 0x61, 0x2e, 0x65, 0xe4, 0x74, 0xfa, 0xdd, 0xe9, + 0x94, 0xf0, 0x88, 0x06, 0xac, 0x13, 0xf2, 0x40, 0x04, 0xe8, 0x69, 0x02, 0xe9, 0xcc, 0x21, 0x9d, + 0xe9, 0x77, 0xfb, 0x87, 0x73, 0x1e, 0x3d, 0xc5, 0x8c, 0x05, 0x02, 0x0b, 0x1a, 0xb0, 0x28, 0xa1, + 0xec, 0xb7, 0x8b, 0x54, 0x71, 0x18, 0x5a, 0x33, 0xec, 0x7b, 0x29, 0xa6, 0x55, 0x84, 0x71, 0x48, + 0xe8, 0x05, 0xb3, 0x14, 0x71, 0x94, 0x22, 0xd4, 0x68, 0x12, 0xdf, 0x9e, 0x3a, 0x31, 0x57, 0xcb, + 0xa4, 0xf3, 0xc7, 0xab, 0xf3, 0x82, 0xfa, 0x24, 0x12, 0xd8, 0x0f, 0x13, 0x40, 0xfb, 0xef, 0x0d, + 0xa8, 0xbc, 0x4f, 0x9e, 0x05, 0x21, 0x78, 0xc2, 0xb0, 0x4f, 0xf4, 0x52, 0xab, 0x74, 0x52, 0x35, + 0xd4, 0x6f, 0xd4, 0x84, 0x32, 0x75, 0xf4, 0xb2, 0x8a, 0x94, 0xa9, 0x83, 0x46, 0xb0, 0x8b, 0x63, + 0x11, 0xf8, 0x58, 0x50, 0xdb, 0x8a, 0x6c, 0xec, 0x51, 0xe6, 0xea, 0x1b, 0xad, 0xd2, 0x49, 0xed, + 0xd5, 0xd7, 0x9d, 0x82, 0x2c, 0x74, 0xce, 0x32, 0xb4, 0x99, 0x80, 0x2f, 0x3e, 0x33, 0x34, 0xbc, + 0x12, 0x43, 0x17, 0xd0, 0x98, 0xe0, 0x28, 0xa7, 0xf8, 0x44, 0x29, 0xbe, 0x28, 0x54, 0xec, 0x4a, + 0xe4, 0x42, 0xad, 0x3e, 0xc9, 0x8d, 0xd1, 0x9f, 0xa0, 0xe9, 0x63, 0x16, 0x63, 0x6f, 0x2e, 0xb5, + 0xa9, 0xa4, 0xda, 0x85, 0x52, 0x57, 0x0a, 0xba, 0xd0, 0x6a, 0xf8, 0xf9, 0x00, 0x32, 0x40, 0xa3, + 0x6c, 0x12, 0xc4, 0xcc, 0xb1, 0x22, 0xc2, 0xa7, 0xd4, 0x26, 0x91, 0xbe, 0xd5, 0xda, 0x38, 0x69, + 0xbe, 0x7a, 0x59, 0x28, 0x37, 0x48, 0xc0, 0x66, 0x82, 0x1d, 0xcd, 0x42, 0x62, 0xec, 0xd0, 0xa5, + 0x58, 0x84, 0xbe, 0x86, 0x26, 0x65, 0x91, 0xc0, 0xcc, 0x26, 0x96, 0xed, 0xe1, 0x28, 0xd2, 0x2b, + 0x2a, 0xb9, 0x8d, 0x2c, 0xda, 0x93, 0x41, 0xf4, 0x3d, 0x54, 0x18, 0x11, 0x0f, 0x01, 0xbf, 0xd7, + 0xb7, 0xd5, 0x03, 0x1c, 0x16, 0xae, 0x78, 0x9d, 0x60, 0x8c, 0x0c, 0x8c, 0xde, 0x40, 0x95, 0x93, + 0x28, 0x88, 0xb9, 0xf4, 0x5a, 0x55, 0xcc, 0xa3, 0x42, 0xa6, 0x91, 0xa1, 0x8c, 0x05, 0x01, 0xe9, + 0x50, 0xe1, 0x31, 0x93, 0x35, 0xa2, 0x83, 0x72, 0x95, 0x0d, 0xd1, 0x11, 0x80, 0xb8, 0xe3, 0x04, + 0x3b, 0x11, 0xbe, 0x25, 0x7a, 0xad, 0x55, 0x3a, 0xd9, 0x36, 0x72, 0x11, 0x59, 0x27, 0x53, 0x5f, + 0xaf, 0xab, 0x78, 0x79, 0xea, 0x23, 0x13, 0x1a, 0x13, 0x22, 0xb0, 0x15, 0x11, 0x21, 0x28, 0x73, + 0x23, 0xbd, 0xd1, 0xda, 0x38, 0xa9, 0xbd, 0xea, 0x14, 0x7a, 0x49, 0x0b, 0xb0, 0xd3, 0x25, 0x02, + 0x9b, 0x29, 0xa1, 0xcf, 0x04, 0x9f, 0x19, 0xf5, 0x49, 0x2e, 0x84, 0x34, 0xd8, 0x20, 0x6c, 0xaa, + 0x37, 0x95, 0x35, 0xf9, 0x13, 0x0d, 0xa0, 0xa9, 0x76, 0x86, 0xb9, 0x56, 0x24, 0xb0, 0x88, 0x23, + 0x7d, 0xa7, 0x55, 0x3a, 0x69, 0xfe, 0xc2, 0x76, 0x9b, 0x09, 0xd4, 0x54, 0x48, 0xa3, 0x11, 0xe5, + 0x87, 0xe8, 0x39, 0x80, 0xcd, 0x09, 0x16, 0xc4, 0xb1, 0x26, 0x33, 0x5d, 0x53, 0x6b, 0x54, 0xd3, + 0x48, 0x77, 0x86, 0x7e, 0x80, 0x5a, 0x32, 0xb0, 0x54, 0x7a, 0x76, 0x55, 0x6a, 0xf7, 0xb3, 0x65, + 0xb2, 0xf3, 0xd5, 0x19, 0x65, 0xe7, 0xcb, 0x48, 0xd5, 0x64, 0x00, 0x9d, 0x80, 0xe6, 0xd0, 0xe8, + 0xde, 0x8a, 0x23, 0xec, 0x12, 0x6b, 0x32, 0x13, 0x24, 0xd2, 0x51, 0xab, 0x74, 0xb2, 0x61, 0x34, + 0x65, 0x7c, 0x2c, 0xc3, 0x5d, 0x19, 0x45, 0xbf, 0x85, 0xed, 0x3b, 0xcc, 0x1c, 0x8f, 0xf0, 0x48, + 0x77, 0x54, 0xca, 0x0e, 0x0a, 0x1f, 0x65, 0xcc, 0xbd, 0x2b, 0x1c, 0x1a, 0x73, 0x30, 0xba, 0x80, + 0x26, 0xe1, 0x3c, 0xe0, 0xd6, 0x9c, 0x4e, 0x14, 0xbd, 0xf8, 0x0c, 0xf5, 0x25, 0xf4, 0x22, 0x41, + 0x1a, 0x0d, 0x92, 0x1b, 0x45, 0xe8, 0x35, 0x54, 0x3d, 0x3a, 0xe1, 0x98, 0x53, 0x12, 0xe9, 0xb7, + 0x4a, 0xa4, 0xb8, 0xf8, 0x2e, 0x15, 0x6a, 0x66, 0x2c, 0xe0, 0xe8, 0x1c, 0x00, 0x87, 0xd4, 0xb2, + 0x03, 0x76, 0x4b, 0x5d, 0xdd, 0xfd, 0x58, 0x5f, 0x08, 0x69, 0x4f, 0xa1, 0x32, 0x17, 0x55, 0x9c, + 0x45, 0x64, 0xf1, 0x10, 0x36, 0xb5, 0xa6, 0x98, 0x53, 0x3c, 0xf1, 0x48, 0xa4, 0xdf, 0xfd, 0x0f, + 0xc5, 0xd3, 0x67, 0xd3, 0xf7, 0x19, 0x21, 0x2d, 0x1e, 0x92, 0x0b, 0xa1, 0x0b, 0x40, 0x0e, 0xb9, + 0xc5, 0xb1, 0x27, 0x2c, 0xf2, 0x18, 0xd2, 0xa4, 0x4d, 0xea, 0x54, 0x59, 0x7c, 0xb6, 0xb6, 0x8f, + 0xe7, 0x69, 0x1f, 0x35, 0x76, 0x53, 0x52, 0x7f, 0xce, 0x41, 0x3d, 0xa8, 0xdf, 0x11, 0xec, 0x89, + 0x3b, 0xcb, 0xbe, 0x23, 0xf6, 0xbd, 0xfe, 0x17, 0xa5, 0xd1, 0x2a, 0x74, 0x77, 0xa1, 0x80, 0x3d, + 0x89, 0x33, 0x6a, 0x77, 0x8b, 0x01, 0xea, 0xc0, 0x53, 0x16, 0x4c, 0x62, 0xea, 0x39, 0xd6, 0x2d, + 0xf5, 0x48, 0x64, 0x71, 0xe2, 0x92, 0x47, 0xfd, 0x5e, 0xd5, 0xdd, 0x6e, 0x3a, 0xf5, 0x56, 0xce, + 0x18, 0x72, 0x02, 0xfd, 0x08, 0x90, 0x74, 0x7e, 0x9f, 0x30, 0xa1, 0x7b, 0x6a, 0xc9, 0xe3, 0xc2, + 0x25, 0xcf, 0xe7, 0x30, 0x23, 0x47, 0x41, 0xc7, 0x50, 0x4b, 0x2f, 0x2d, 0x2b, 0xe6, 0x9e, 0xee, + 0xab, 0x85, 0x20, 0x0d, 0x8d, 0xb9, 0xb7, 0xff, 0x23, 0xec, 0xae, 0x1d, 0x40, 0x79, 0xe4, 0xee, + 0xc9, 0x2c, 0xbd, 0x12, 0xe4, 0x4f, 0xf4, 0x39, 0x6c, 0x4e, 0xb1, 0x17, 0x93, 0xf4, 0x52, 0x48, + 0x06, 0xaf, 0xcb, 0xbf, 0x2b, 0x49, 0x81, 0xb5, 0x4d, 0xf8, 0x7f, 0x04, 0xba, 0x55, 0xa8, 0xa4, + 0x5d, 0xbb, 0xfd, 0xef, 0x2d, 0xd0, 0x56, 0xaf, 0x0e, 0xd4, 0x03, 0xcd, 0x0e, 0x02, 0xcf, 0x72, + 0x82, 0x07, 0x66, 0x85, 0x84, 0xd3, 0xc0, 0x51, 0xc2, 0x1f, 0xdd, 0xc0, 0xa6, 0xa4, 0x9c, 0x07, + 0x0f, 0x6c, 0xa8, 0x08, 0xe8, 0x12, 0x76, 0xec, 0x30, 0xb6, 0x62, 0x41, 0x3d, 0xfa, 0xd7, 0xa4, + 0x08, 0xca, 0x4a, 0xe3, 0xcb, 0xc2, 0x6c, 0xf6, 0xc2, 0x78, 0xbc, 0x80, 0x1a, 0x4d, 0x7b, 0x69, + 0x8c, 0xbe, 0x87, 0x3d, 0x1f, 0x3f, 0xca, 0x82, 0xb7, 0x63, 0xce, 0x09, 0x13, 0x16, 0x27, 0x3f, + 0xc7, 0x24, 0x12, 0x91, 0xba, 0x15, 0x37, 0x8d, 0x5f, 0xf9, 0xf8, 0xb1, 0x37, 0x9f, 0x35, 0xd2, + 0x49, 0xf4, 0x6b, 0x40, 0x92, 0x47, 0x1d, 0x8f, 0x58, 0x59, 0xe7, 0x8f, 0xd4, 0xb5, 0xb7, 0x69, + 0x68, 0x3e, 0x7e, 0x1c, 0x38, 0x1e, 0x19, 0x64, 0x71, 0x59, 0x2c, 0x12, 0x2d, 0x02, 0x81, 0xbd, + 0x1c, 0x7c, 0x53, 0xc1, 0x77, 0x7d, 0xfc, 0x38, 0x92, 0x33, 0x0b, 0xfc, 0x20, 0xc1, 0x87, 0x84, + 0x39, 0xb2, 0x35, 0x7a, 0x58, 0x10, 0x66, 0xcf, 0xf4, 0xad, 0x4f, 0x16, 0xbb, 0x8f, 0x1f, 0x87, + 0x09, 0xe9, 0x32, 0xe1, 0x28, 0xa3, 0x94, 0xad, 0x1a, 0xad, 0xa4, 0x46, 0x29, 0x5b, 0x37, 0x4a, + 0xd9, 0x9a, 0xd1, 0xed, 0xd4, 0x28, 0x65, 0x05, 0x46, 0x29, 0x5b, 0x33, 0x5a, 0xfd, 0xb4, 0x51, + 0xca, 0x56, 0x8c, 0xfe, 0x04, 0x4f, 0xd3, 0xd4, 0x2f, 0xed, 0x2d, 0x28, 0xa9, 0x97, 0xbf, 0x70, + 0x07, 0x2a, 0x7c, 0x7e, 0x7f, 0x11, 0x5f, 0x8b, 0xa1, 0x9b, 0xac, 0x7b, 0xe7, 0x64, 0x6b, 0x4a, + 0xf6, 0xab, 0xe2, 0x03, 0x28, 0x5b, 0x7a, 0x4e, 0x73, 0xc7, 0x59, 0x0e, 0x48, 0xab, 0xe9, 0x7d, + 0xbd, 0xa4, 0x59, 0xff, 0x88, 0xd5, 0xf4, 0xa2, 0x5f, 0xb2, 0xca, 0xd6, 0x62, 0xed, 0x9f, 0xa1, + 0x9e, 0x7f, 0x3d, 0x42, 0x6f, 0xa0, 0xae, 0x76, 0x4e, 0xde, 0x59, 0x41, 0x2c, 0x3e, 0x7d, 0x5a, + 0x6a, 0x12, 0x3e, 0x4a, 0xd0, 0xe8, 0x4b, 0x68, 0xa8, 0x22, 0x9d, 0xef, 0x63, 0x59, 0xed, 0x63, + 0x5d, 0xd6, 0x67, 0x16, 0x6b, 0x7f, 0x0b, 0x8d, 0xa5, 0xd7, 0x28, 0x74, 0x08, 0xd5, 0x05, 0xa3, + 0xa4, 0x18, 0x8b, 0x40, 0xfb, 0x1f, 0x25, 0x68, 0x2e, 0x9f, 0x29, 0x34, 0x86, 0x67, 0xd8, 0x75, + 0x39, 0x71, 0xd5, 0xd0, 0x7a, 0xa0, 0xcc, 0x09, 0x1e, 0x2c, 0x8f, 0x30, 0x57, 0xdc, 0x7d, 0xda, + 0xf1, 0x5e, 0x8e, 0xfb, 0x41, 0x51, 0x2f, 0x15, 0x13, 0x7d, 0x0b, 0x48, 0x60, 0xee, 0x12, 0xb1, + 0x76, 0xd6, 0x4b, 0xc6, 0x6e, 0x32, 0x93, 0x4f, 0xdd, 0x3f, 0x4b, 0x80, 0xd6, 0x0b, 0x02, 0x9d, + 0xc3, 0x71, 0xaa, 0x92, 0x55, 0x97, 0x1d, 0xc4, 0x4c, 0xc8, 0xf6, 0x63, 0x45, 0xc4, 0x0e, 0x98, + 0x93, 0x3e, 0xe3, 0x41, 0x02, 0x4b, 0x25, 0x7a, 0x12, 0x34, 0x24, 0xdc, 0x54, 0x10, 0xf4, 0x06, + 0xf6, 0x53, 0x95, 0xa2, 0x4e, 0x91, 0xa4, 0x55, 0x4f, 0x10, 0xeb, 0xcd, 0xa2, 0xfd, 0xb7, 0x32, + 0xec, 0xac, 0x14, 0x15, 0xfa, 0x03, 0x3c, 0x4f, 0x15, 0x1f, 0x38, 0x15, 0xe9, 0x4b, 0x45, 0xde, + 0x55, 0x53, 0x89, 0x3e, 0x4b, 0x40, 0x1f, 0x24, 0x46, 0xbd, 0x61, 0xe4, 0x3d, 0x1d, 0x2c, 0x29, + 0x04, 0xe1, 0x12, 0x7f, 0x47, 0xf1, 0xf7, 0x72, 0xfc, 0x9b, 0x30, 0xc7, 0xfe, 0x3d, 0x1c, 0xce, + 0xf3, 0x82, 0x9d, 0xf5, 0xe5, 0xb5, 0xfc, 0x33, 0x19, 0x04, 0x3b, 0x2b, 0xab, 0xbf, 0x9e, 0x67, + 0x44, 0xf1, 0x57, 0x16, 0xdf, 0x55, 0xec, 0x2f, 0x16, 0xec, 0xfc, 0xda, 0xed, 0x7f, 0x95, 0x01, + 0xad, 0x1f, 0x88, 0x9c, 0xa5, 0x48, 0xa6, 0x77, 0xcd, 0x52, 0x29, 0x6f, 0xc9, 0x24, 0x4c, 0xac, + 0x58, 0xea, 0xc2, 0x51, 0x9e, 0x1f, 0x62, 0xfb, 0x9e, 0x88, 0x25, 0x85, 0x9a, 0x52, 0xd8, 0x5f, + 0x28, 0x0c, 0x13, 0xcc, 0x42, 0xe3, 0x2d, 0xb4, 0xe6, 0x8f, 0x65, 0x13, 0x3a, 0x25, 0x05, 0xa9, + 0xa9, 0x2b, 0x95, 0xc3, 0xec, 0xe1, 0x12, 0xd8, 0x8a, 0x97, 0x3f, 0x42, 0x7b, 0x55, 0xa7, 0xc0, + 0x4f, 0x43, 0x29, 0x1d, 0x2d, 0x2b, 0xad, 0x7a, 0x6a, 0x53, 0xa8, 0xa4, 0xd9, 0x42, 0x2f, 0x61, + 0xe7, 0x36, 0xe0, 0x0f, 0x98, 0x3b, 0x52, 0x30, 0xe0, 0x42, 0x9e, 0xd0, 0x8d, 0x93, 0xaa, 0xd1, + 0x9c, 0x87, 0x87, 0x32, 0x8a, 0x5e, 0x40, 0x7d, 0xfe, 0x99, 0x22, 0xb0, 0x9b, 0xde, 0xd5, 0xb5, + 0x2c, 0x36, 0xc2, 0xee, 0xfc, 0x73, 0x71, 0x63, 0xf1, 0xb9, 0xd8, 0x36, 0xa1, 0x3a, 0xff, 0xb0, + 0x90, 0x57, 0xbf, 0x1d, 0xc6, 0x2a, 0xed, 0x25, 0x43, 0xfe, 0x44, 0x7b, 0x50, 0x51, 0x9d, 0xd4, + 0x9d, 0xa4, 0xe7, 0x70, 0x4b, 0x0e, 0xdf, 0x4d, 0xd0, 0x01, 0x54, 0x7d, 0xe2, 0x07, 0x7c, 0x26, + 0xa7, 0x36, 0xd4, 0xd4, 0x76, 0x12, 0x78, 0x37, 0xf9, 0xe6, 0x3f, 0x65, 0x40, 0xeb, 0x9f, 0x56, + 0xe8, 0x18, 0x0e, 0x06, 0xd7, 0xdd, 0x9b, 0xf1, 0xf5, 0xb9, 0x65, 0xf6, 0x8d, 0xf7, 0x83, 0x5e, + 0xdf, 0x1a, 0x5f, 0x9b, 0xc3, 0x7e, 0x6f, 0xf0, 0x76, 0xd0, 0x3f, 0xd7, 0x3e, 0x43, 0x3a, 0x7c, + 0xbe, 0x0a, 0xb8, 0x3a, 0x1b, 0x5c, 0x6a, 0xa5, 0x22, 0xaa, 0x9c, 0xb1, 0x64, 0xa8, 0xd7, 0xd7, + 0xca, 0xe8, 0x08, 0xf6, 0x57, 0x01, 0x3f, 0x5d, 0x0d, 0x87, 0x56, 0xdf, 0x30, 0x6e, 0x0c, 0x6d, + 0x03, 0xb5, 0xe0, 0xb0, 0x70, 0xfe, 0xaa, 0x6f, 0x9a, 0x67, 0xef, 0xfa, 0xda, 0x13, 0xd4, 0x86, + 0xa3, 0x42, 0x84, 0x39, 0xee, 0x9a, 0x3d, 0x63, 0xd0, 0xed, 0x6b, 0x9b, 0xe8, 0x05, 0x3c, 0x2f, + 0xc4, 0x0c, 0x8d, 0xbe, 0xd9, 0x97, 0x46, 0xb6, 0xd0, 0x57, 0xd0, 0x5a, 0x85, 0xf4, 0x2e, 0xce, + 0xae, 0xaf, 0xfb, 0x97, 0x0b, 0x54, 0x05, 0xed, 0xc3, 0x17, 0xab, 0xa8, 0x0f, 0x67, 0xc6, 0xd5, + 0x78, 0xa8, 0x55, 0xbf, 0x19, 0x40, 0x63, 0xe9, 0xbb, 0x47, 0x3e, 0x9b, 0x02, 0x5d, 0xbf, 0xb3, + 0xcc, 0xd1, 0xd9, 0x68, 0x6c, 0xae, 0xa4, 0xad, 0x06, 0x95, 0x74, 0x5e, 0x2b, 0xa9, 0xc1, 0xe8, + 0x66, 0x38, 0xec, 0x9f, 0x6b, 0xe5, 0xae, 0x0b, 0x7b, 0x76, 0xe0, 0x17, 0xdd, 0x4f, 0xdd, 0x7a, + 0xfa, 0x1a, 0x3e, 0x94, 0xfd, 0x79, 0x58, 0xfa, 0xf3, 0x9b, 0x14, 0xe4, 0x06, 0x1e, 0x66, 0x6e, + 0x27, 0xe0, 0xee, 0xa9, 0x4b, 0x98, 0xea, 0xde, 0xa7, 0xc9, 0x14, 0x0e, 0x69, 0xb4, 0xf4, 0xcf, + 0xc6, 0x0f, 0xf3, 0xc1, 0x64, 0x4b, 0x01, 0x7f, 0xf3, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, + 0xe1, 0xd6, 0xfa, 0x7b, 0x11, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1/embedded_assistant.pb.go b/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1/embedded_assistant.pb.go new file mode 100644 index 000000000..f3917faf9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1/embedded_assistant.pb.go @@ -0,0 +1,969 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/assistant/embedded/v1alpha1/embedded_assistant.proto + +/* +Package embedded is a generated protocol buffer package. + +It is generated from these files: + google/assistant/embedded/v1alpha1/embedded_assistant.proto + +It has these top-level messages: + ConverseConfig + AudioInConfig + AudioOutConfig + ConverseState + AudioOut + ConverseResult + ConverseRequest + ConverseResponse +*/ +package embedded + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audio encoding of the data sent in the audio message. +// Audio must be one-channel (mono). The only language supported is "en-US". +type AudioInConfig_Encoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // This encoding includes no header, only the raw audio bytes. + AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1 + // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio + // Codec) is the recommended encoding because it is + // lossless--therefore recognition is not compromised--and + // requires only about half the bandwidth of `LINEAR16`. This encoding + // includes the `FLAC` stream header followed by audio data. It supports + // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are + // supported. + AudioInConfig_FLAC AudioInConfig_Encoding = 2 +) + +var AudioInConfig_Encoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "FLAC", +} +var AudioInConfig_Encoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "FLAC": 2, +} + +func (x AudioInConfig_Encoding) String() string { + return proto.EnumName(AudioInConfig_Encoding_name, int32(x)) +} +func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// Audio encoding of the data returned in the audio message. All encodings are +// raw audio bytes with no header, except as indicated below. +type AudioOutConfig_Encoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1 + // MP3 audio encoding. The sample rate is encoded in the payload. + AudioOutConfig_MP3 AudioOutConfig_Encoding = 2 + // Opus-encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android and in some browsers (such + // as Chrome). The quality of the encoding is considerably higher than MP3 + // while using the same bitrate. The sample rate is encoded in the payload. + AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3 +) + +var AudioOutConfig_Encoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "MP3", + 3: "OPUS_IN_OGG", +} +var AudioOutConfig_Encoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "MP3": 2, + "OPUS_IN_OGG": 3, +} + +func (x AudioOutConfig_Encoding) String() string { + return proto.EnumName(AudioOutConfig_Encoding_name, int32(x)) +} +func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// Possible states of the microphone after a `Converse` RPC completes. +type ConverseResult_MicrophoneMode int32 + +const ( + // No mode specified. + ConverseResult_MICROPHONE_MODE_UNSPECIFIED ConverseResult_MicrophoneMode = 0 + // The service is not expecting a follow-on question from the user. + // The microphone should remain off until the user re-activates it. + ConverseResult_CLOSE_MICROPHONE ConverseResult_MicrophoneMode = 1 + // The service is expecting a follow-on question from the user. The + // microphone should be re-opened when the `AudioOut` playback completes + // (by starting a new `Converse` RPC call to send the new audio). + ConverseResult_DIALOG_FOLLOW_ON ConverseResult_MicrophoneMode = 2 +) + +var ConverseResult_MicrophoneMode_name = map[int32]string{ + 0: "MICROPHONE_MODE_UNSPECIFIED", + 1: "CLOSE_MICROPHONE", + 2: "DIALOG_FOLLOW_ON", +} +var ConverseResult_MicrophoneMode_value = map[string]int32{ + "MICROPHONE_MODE_UNSPECIFIED": 0, + "CLOSE_MICROPHONE": 1, + "DIALOG_FOLLOW_ON": 2, +} + +func (x ConverseResult_MicrophoneMode) String() string { + return proto.EnumName(ConverseResult_MicrophoneMode_name, int32(x)) +} +func (ConverseResult_MicrophoneMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + +// Indicates the type of event. +type ConverseResponse_EventType int32 + +const ( + // No event specified. + ConverseResponse_EVENT_TYPE_UNSPECIFIED ConverseResponse_EventType = 0 + // This event indicates that the server has detected the end of the user's + // speech utterance and expects no additional speech. Therefore, the server + // will not process additional audio (although it may subsequently return + // additional results). The client should stop sending additional audio + // data, half-close the gRPC connection, and wait for any additional results + // until the server closes the gRPC connection. + ConverseResponse_END_OF_UTTERANCE ConverseResponse_EventType = 1 +) + +var ConverseResponse_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNSPECIFIED", + 1: "END_OF_UTTERANCE", +} +var ConverseResponse_EventType_value = map[string]int32{ + "EVENT_TYPE_UNSPECIFIED": 0, + "END_OF_UTTERANCE": 1, +} + +func (x ConverseResponse_EventType) String() string { + return proto.EnumName(ConverseResponse_EventType_name, int32(x)) +} +func (ConverseResponse_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 0} +} + +// Specifies how to process the `ConverseRequest` messages. +type ConverseConfig struct { + // *Required* Specifies how to process the subsequent incoming audio. + AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig" json:"audio_in_config,omitempty"` + // *Required* Specifies how to format the audio that will be returned. + AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig" json:"audio_out_config,omitempty"` + // *Required* Represents the current dialog state. + ConverseState *ConverseState `protobuf:"bytes,3,opt,name=converse_state,json=converseState" json:"converse_state,omitempty"` +} + +func (m *ConverseConfig) Reset() { *m = ConverseConfig{} } +func (m *ConverseConfig) String() string { return proto.CompactTextString(m) } +func (*ConverseConfig) ProtoMessage() {} +func (*ConverseConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ConverseConfig) GetAudioInConfig() *AudioInConfig { + if m != nil { + return m.AudioInConfig + } + return nil +} + +func (m *ConverseConfig) GetAudioOutConfig() *AudioOutConfig { + if m != nil { + return m.AudioOutConfig + } + return nil +} + +func (m *ConverseConfig) GetConverseState() *ConverseState { + if m != nil { + return m.ConverseState + } + return nil +} + +// Specifies how to process the `audio_in` data that will be provided in +// subsequent requests. For recommended settings, see the Google Assistant SDK +// [best practices](https://developers.google.com/assistant/sdk/develop/grpc/best-practices/audio). +type AudioInConfig struct { + // *Required* Encoding of audio data sent in all `audio_in` messages. + Encoding AudioInConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha1.AudioInConfig_Encoding" json:"encoding,omitempty"` + // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in` + // messages. Valid values are from 16000-24000, but 16000 is optimal. + // For best results, set the sampling rate of the audio source to 16000 Hz. + // If that's not possible, use the native sample rate of the audio source + // (instead of re-sampling). + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` +} + +func (m *AudioInConfig) Reset() { *m = AudioInConfig{} } +func (m *AudioInConfig) String() string { return proto.CompactTextString(m) } +func (*AudioInConfig) ProtoMessage() {} +func (*AudioInConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *AudioInConfig) GetEncoding() AudioInConfig_Encoding { + if m != nil { + return m.Encoding + } + return AudioInConfig_ENCODING_UNSPECIFIED +} + +func (m *AudioInConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +// Specifies the desired format for the server to use when it returns +// `audio_out` messages. +type AudioOutConfig struct { + // *Required* The encoding of audio data to be returned in all `audio_out` + // messages. + Encoding AudioOutConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha1.AudioOutConfig_Encoding" json:"encoding,omitempty"` + // *Required* The sample rate in Hertz of the audio data returned in + // `audio_out` messages. Valid values are: 16000-24000. + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` + // *Required* Current volume setting of the device's audio output. + // Valid values are 1 to 100 (corresponding to 1% to 100%). + VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"` +} + +func (m *AudioOutConfig) Reset() { *m = AudioOutConfig{} } +func (m *AudioOutConfig) String() string { return proto.CompactTextString(m) } +func (*AudioOutConfig) ProtoMessage() {} +func (*AudioOutConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding { + if m != nil { + return m.Encoding + } + return AudioOutConfig_ENCODING_UNSPECIFIED +} + +func (m *AudioOutConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +func (m *AudioOutConfig) GetVolumePercentage() int32 { + if m != nil { + return m.VolumePercentage + } + return 0 +} + +// Provides information about the current dialog state. +type ConverseState struct { + // *Required* The `conversation_state` value returned in the prior + // `ConverseResponse`. Omit (do not set the field) if there was no prior + // `ConverseResponse`. If there was a prior `ConverseResponse`, do not omit + // this field; doing so will end that conversation (and this new request will + // start a new conversation). + ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` +} + +func (m *ConverseState) Reset() { *m = ConverseState{} } +func (m *ConverseState) String() string { return proto.CompactTextString(m) } +func (*ConverseState) ProtoMessage() {} +func (*ConverseState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ConverseState) GetConversationState() []byte { + if m != nil { + return m.ConversationState + } + return nil +} + +// The audio containing the assistant's response to the query. Sequential chunks +// of audio data are received in sequential `ConverseResponse` messages. +type AudioOut struct { + // *Output-only* The audio data containing the assistant's response to the + // query. Sequential chunks of audio data are received in sequential + // `ConverseResponse` messages. + AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"` +} + +func (m *AudioOut) Reset() { *m = AudioOut{} } +func (m *AudioOut) String() string { return proto.CompactTextString(m) } +func (*AudioOut) ProtoMessage() {} +func (*AudioOut) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *AudioOut) GetAudioData() []byte { + if m != nil { + return m.AudioData + } + return nil +} + +// The semantic result for the user's spoken query. +type ConverseResult struct { + // *Output-only* The recognized transcript of what the user said. + SpokenRequestText string `protobuf:"bytes,1,opt,name=spoken_request_text,json=spokenRequestText" json:"spoken_request_text,omitempty"` + // *Output-only* The text of the assistant's spoken response. This is only + // returned for an IFTTT action. + SpokenResponseText string `protobuf:"bytes,2,opt,name=spoken_response_text,json=spokenResponseText" json:"spoken_response_text,omitempty"` + // *Output-only* State information for subsequent `ConverseRequest`. This + // value should be saved in the client and returned in the + // `conversation_state` with the next `ConverseRequest`. (The client does not + // need to interpret or otherwise use this value.) There is no need to save + // this information across device restarts. + ConversationState []byte `protobuf:"bytes,3,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` + // *Output-only* Specifies the mode of the microphone after this `Converse` + // RPC is processed. + MicrophoneMode ConverseResult_MicrophoneMode `protobuf:"varint,4,opt,name=microphone_mode,json=microphoneMode,enum=google.assistant.embedded.v1alpha1.ConverseResult_MicrophoneMode" json:"microphone_mode,omitempty"` + // *Output-only* Updated volume level. The value will be 0 or omitted + // (indicating no change) unless a voice command such as "Increase the volume" + // or "Set volume level 4" was recognized, in which case the value will be + // between 1 and 100 (corresponding to the new volume level of 1% to 100%). + // Typically, a client should use this volume level when playing the + // `audio_out` data, and retain this value as the current volume level and + // supply it in the `AudioOutConfig` of the next `ConverseRequest`. (Some + // clients may also implement other ways to allow the current volume level to + // be changed, for example, by providing a knob that the user can turn.) + VolumePercentage int32 `protobuf:"varint,5,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"` +} + +func (m *ConverseResult) Reset() { *m = ConverseResult{} } +func (m *ConverseResult) String() string { return proto.CompactTextString(m) } +func (*ConverseResult) ProtoMessage() {} +func (*ConverseResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ConverseResult) GetSpokenRequestText() string { + if m != nil { + return m.SpokenRequestText + } + return "" +} + +func (m *ConverseResult) GetSpokenResponseText() string { + if m != nil { + return m.SpokenResponseText + } + return "" +} + +func (m *ConverseResult) GetConversationState() []byte { + if m != nil { + return m.ConversationState + } + return nil +} + +func (m *ConverseResult) GetMicrophoneMode() ConverseResult_MicrophoneMode { + if m != nil { + return m.MicrophoneMode + } + return ConverseResult_MICROPHONE_MODE_UNSPECIFIED +} + +func (m *ConverseResult) GetVolumePercentage() int32 { + if m != nil { + return m.VolumePercentage + } + return 0 +} + +// The top-level message sent by the client. Clients must send at least two, and +// typically numerous `ConverseRequest` messages. The first message must +// contain a `config` message and must not contain `audio_in` data. All +// subsequent messages must contain `audio_in` data and must not contain a +// `config` message. +type ConverseRequest struct { + // Exactly one of these fields must be specified in each `ConverseRequest`. + // + // Types that are valid to be assigned to ConverseRequest: + // *ConverseRequest_Config + // *ConverseRequest_AudioIn + ConverseRequest isConverseRequest_ConverseRequest `protobuf_oneof:"converse_request"` +} + +func (m *ConverseRequest) Reset() { *m = ConverseRequest{} } +func (m *ConverseRequest) String() string { return proto.CompactTextString(m) } +func (*ConverseRequest) ProtoMessage() {} +func (*ConverseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isConverseRequest_ConverseRequest interface { + isConverseRequest_ConverseRequest() +} + +type ConverseRequest_Config struct { + Config *ConverseConfig `protobuf:"bytes,1,opt,name=config,oneof"` +} +type ConverseRequest_AudioIn struct { + AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"` +} + +func (*ConverseRequest_Config) isConverseRequest_ConverseRequest() {} +func (*ConverseRequest_AudioIn) isConverseRequest_ConverseRequest() {} + +func (m *ConverseRequest) GetConverseRequest() isConverseRequest_ConverseRequest { + if m != nil { + return m.ConverseRequest + } + return nil +} + +func (m *ConverseRequest) GetConfig() *ConverseConfig { + if x, ok := m.GetConverseRequest().(*ConverseRequest_Config); ok { + return x.Config + } + return nil +} + +func (m *ConverseRequest) GetAudioIn() []byte { + if x, ok := m.GetConverseRequest().(*ConverseRequest_AudioIn); ok { + return x.AudioIn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConverseRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConverseRequest_OneofMarshaler, _ConverseRequest_OneofUnmarshaler, _ConverseRequest_OneofSizer, []interface{}{ + (*ConverseRequest_Config)(nil), + (*ConverseRequest_AudioIn)(nil), + } +} + +func _ConverseRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConverseRequest) + // converse_request + switch x := m.ConverseRequest.(type) { + case *ConverseRequest_Config: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case *ConverseRequest_AudioIn: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AudioIn) + case nil: + default: + return fmt.Errorf("ConverseRequest.ConverseRequest has unexpected type %T", x) + } + return nil +} + +func _ConverseRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConverseRequest) + switch tag { + case 1: // converse_request.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ConverseConfig) + err := b.DecodeMessage(msg) + m.ConverseRequest = &ConverseRequest_Config{msg} + return true, err + case 2: // converse_request.audio_in + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConverseRequest = &ConverseRequest_AudioIn{x} + return true, err + default: + return false, nil + } +} + +func _ConverseRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConverseRequest) + // converse_request + switch x := m.ConverseRequest.(type) { + case *ConverseRequest_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ConverseRequest_AudioIn: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AudioIn))) + n += len(x.AudioIn) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The top-level message received by the client. A series of one or more +// `ConverseResponse` messages are streamed back to the client. +type ConverseResponse struct { + // Exactly one of these fields will be populated in each `ConverseResponse`. + // + // Types that are valid to be assigned to ConverseResponse: + // *ConverseResponse_Error + // *ConverseResponse_EventType_ + // *ConverseResponse_AudioOut + // *ConverseResponse_Result + ConverseResponse isConverseResponse_ConverseResponse `protobuf_oneof:"converse_response"` +} + +func (m *ConverseResponse) Reset() { *m = ConverseResponse{} } +func (m *ConverseResponse) String() string { return proto.CompactTextString(m) } +func (*ConverseResponse) ProtoMessage() {} +func (*ConverseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isConverseResponse_ConverseResponse interface { + isConverseResponse_ConverseResponse() +} + +type ConverseResponse_Error struct { + Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error,oneof"` +} +type ConverseResponse_EventType_ struct { + EventType ConverseResponse_EventType `protobuf:"varint,2,opt,name=event_type,json=eventType,enum=google.assistant.embedded.v1alpha1.ConverseResponse_EventType,oneof"` +} +type ConverseResponse_AudioOut struct { + AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut,oneof"` +} +type ConverseResponse_Result struct { + Result *ConverseResult `protobuf:"bytes,5,opt,name=result,oneof"` +} + +func (*ConverseResponse_Error) isConverseResponse_ConverseResponse() {} +func (*ConverseResponse_EventType_) isConverseResponse_ConverseResponse() {} +func (*ConverseResponse_AudioOut) isConverseResponse_ConverseResponse() {} +func (*ConverseResponse_Result) isConverseResponse_ConverseResponse() {} + +func (m *ConverseResponse) GetConverseResponse() isConverseResponse_ConverseResponse { + if m != nil { + return m.ConverseResponse + } + return nil +} + +func (m *ConverseResponse) GetError() *google_rpc.Status { + if x, ok := m.GetConverseResponse().(*ConverseResponse_Error); ok { + return x.Error + } + return nil +} + +func (m *ConverseResponse) GetEventType() ConverseResponse_EventType { + if x, ok := m.GetConverseResponse().(*ConverseResponse_EventType_); ok { + return x.EventType + } + return ConverseResponse_EVENT_TYPE_UNSPECIFIED +} + +func (m *ConverseResponse) GetAudioOut() *AudioOut { + if x, ok := m.GetConverseResponse().(*ConverseResponse_AudioOut); ok { + return x.AudioOut + } + return nil +} + +func (m *ConverseResponse) GetResult() *ConverseResult { + if x, ok := m.GetConverseResponse().(*ConverseResponse_Result); ok { + return x.Result + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConverseResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConverseResponse_OneofMarshaler, _ConverseResponse_OneofUnmarshaler, _ConverseResponse_OneofSizer, []interface{}{ + (*ConverseResponse_Error)(nil), + (*ConverseResponse_EventType_)(nil), + (*ConverseResponse_AudioOut)(nil), + (*ConverseResponse_Result)(nil), + } +} + +func _ConverseResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConverseResponse) + // converse_response + switch x := m.ConverseResponse.(type) { + case *ConverseResponse_Error: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case *ConverseResponse_EventType_: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.EventType)) + case *ConverseResponse_AudioOut: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AudioOut); err != nil { + return err + } + case *ConverseResponse_Result: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Result); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConverseResponse.ConverseResponse has unexpected type %T", x) + } + return nil +} + +func _ConverseResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConverseResponse) + switch tag { + case 1: // converse_response.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_rpc.Status) + err := b.DecodeMessage(msg) + m.ConverseResponse = &ConverseResponse_Error{msg} + return true, err + case 2: // converse_response.event_type + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConverseResponse = &ConverseResponse_EventType_{ConverseResponse_EventType(x)} + return true, err + case 3: // converse_response.audio_out + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AudioOut) + err := b.DecodeMessage(msg) + m.ConverseResponse = &ConverseResponse_AudioOut{msg} + return true, err + case 5: // converse_response.result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ConverseResult) + err := b.DecodeMessage(msg) + m.ConverseResponse = &ConverseResponse_Result{msg} + return true, err + default: + return false, nil + } +} + +func _ConverseResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConverseResponse) + // converse_response + switch x := m.ConverseResponse.(type) { + case *ConverseResponse_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ConverseResponse_EventType_: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.EventType)) + case *ConverseResponse_AudioOut: + s := proto.Size(x.AudioOut) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ConverseResponse_Result: + s := proto.Size(x.Result) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*ConverseConfig)(nil), "google.assistant.embedded.v1alpha1.ConverseConfig") + proto.RegisterType((*AudioInConfig)(nil), "google.assistant.embedded.v1alpha1.AudioInConfig") + proto.RegisterType((*AudioOutConfig)(nil), "google.assistant.embedded.v1alpha1.AudioOutConfig") + proto.RegisterType((*ConverseState)(nil), "google.assistant.embedded.v1alpha1.ConverseState") + proto.RegisterType((*AudioOut)(nil), "google.assistant.embedded.v1alpha1.AudioOut") + proto.RegisterType((*ConverseResult)(nil), "google.assistant.embedded.v1alpha1.ConverseResult") + proto.RegisterType((*ConverseRequest)(nil), "google.assistant.embedded.v1alpha1.ConverseRequest") + proto.RegisterType((*ConverseResponse)(nil), "google.assistant.embedded.v1alpha1.ConverseResponse") + proto.RegisterEnum("google.assistant.embedded.v1alpha1.AudioInConfig_Encoding", AudioInConfig_Encoding_name, AudioInConfig_Encoding_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha1.AudioOutConfig_Encoding", AudioOutConfig_Encoding_name, AudioOutConfig_Encoding_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha1.ConverseResult_MicrophoneMode", ConverseResult_MicrophoneMode_name, ConverseResult_MicrophoneMode_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha1.ConverseResponse_EventType", ConverseResponse_EventType_name, ConverseResponse_EventType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for EmbeddedAssistant service + +type EmbeddedAssistantClient interface { + // Initiates or continues a conversation with the embedded assistant service. + // Each call performs one round-trip, sending an audio request to the service + // and receiving the audio response. Uses bidirectional streaming to receive + // results, such as the `END_OF_UTTERANCE` event, while sending audio. + // + // A conversation is one or more gRPC connections, each consisting of several + // streamed requests and responses. + // For example, the user says *Add to my shopping list* and the assistant + // responds *What do you want to add?*. The sequence of streamed requests and + // responses in the first gRPC message could be: + // + // * ConverseRequest.config + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseResponse.event_type.END_OF_UTTERANCE + // * ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // + // The user then says *bagels* and the assistant responds + // *OK, I've added bagels to your shopping list*. This is sent as another gRPC + // connection call to the `Converse` method, again with streamed requests and + // responses, such as: + // + // * ConverseRequest.config + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseResponse.event_type.END_OF_UTTERANCE + // * ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // + // Although the precise order of responses is not guaranteed, sequential + // ConverseResponse.audio_out messages will always contain sequential portions + // of audio. + Converse(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_ConverseClient, error) +} + +type embeddedAssistantClient struct { + cc *grpc.ClientConn +} + +func NewEmbeddedAssistantClient(cc *grpc.ClientConn) EmbeddedAssistantClient { + return &embeddedAssistantClient{cc} +} + +func (c *embeddedAssistantClient) Converse(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_ConverseClient, error) { + stream, err := grpc.NewClientStream(ctx, &_EmbeddedAssistant_serviceDesc.Streams[0], c.cc, "/google.assistant.embedded.v1alpha1.EmbeddedAssistant/Converse", opts...) + if err != nil { + return nil, err + } + x := &embeddedAssistantConverseClient{stream} + return x, nil +} + +type EmbeddedAssistant_ConverseClient interface { + Send(*ConverseRequest) error + Recv() (*ConverseResponse, error) + grpc.ClientStream +} + +type embeddedAssistantConverseClient struct { + grpc.ClientStream +} + +func (x *embeddedAssistantConverseClient) Send(m *ConverseRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *embeddedAssistantConverseClient) Recv() (*ConverseResponse, error) { + m := new(ConverseResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for EmbeddedAssistant service + +type EmbeddedAssistantServer interface { + // Initiates or continues a conversation with the embedded assistant service. + // Each call performs one round-trip, sending an audio request to the service + // and receiving the audio response. Uses bidirectional streaming to receive + // results, such as the `END_OF_UTTERANCE` event, while sending audio. + // + // A conversation is one or more gRPC connections, each consisting of several + // streamed requests and responses. + // For example, the user says *Add to my shopping list* and the assistant + // responds *What do you want to add?*. The sequence of streamed requests and + // responses in the first gRPC message could be: + // + // * ConverseRequest.config + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseResponse.event_type.END_OF_UTTERANCE + // * ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // + // The user then says *bagels* and the assistant responds + // *OK, I've added bagels to your shopping list*. This is sent as another gRPC + // connection call to the `Converse` method, again with streamed requests and + // responses, such as: + // + // * ConverseRequest.config + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseRequest.audio_in + // * ConverseResponse.event_type.END_OF_UTTERANCE + // * ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // * ConverseResponse.audio_out + // + // Although the precise order of responses is not guaranteed, sequential + // ConverseResponse.audio_out messages will always contain sequential portions + // of audio. + Converse(EmbeddedAssistant_ConverseServer) error +} + +func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer) { + s.RegisterService(&_EmbeddedAssistant_serviceDesc, srv) +} + +func _EmbeddedAssistant_Converse_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(EmbeddedAssistantServer).Converse(&embeddedAssistantConverseServer{stream}) +} + +type EmbeddedAssistant_ConverseServer interface { + Send(*ConverseResponse) error + Recv() (*ConverseRequest, error) + grpc.ServerStream +} + +type embeddedAssistantConverseServer struct { + grpc.ServerStream +} + +func (x *embeddedAssistantConverseServer) Send(m *ConverseResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *embeddedAssistantConverseServer) Recv() (*ConverseRequest, error) { + m := new(ConverseRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _EmbeddedAssistant_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.assistant.embedded.v1alpha1.EmbeddedAssistant", + HandlerType: (*EmbeddedAssistantServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Converse", + Handler: _EmbeddedAssistant_Converse_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/assistant/embedded/v1alpha1/embedded_assistant.proto", +} + +func init() { + proto.RegisterFile("google/assistant/embedded/v1alpha1/embedded_assistant.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 892 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x51, 0x73, 0xdb, 0x44, + 0x10, 0xb6, 0xec, 0xa6, 0xb5, 0xb7, 0x89, 0x2c, 0x5f, 0x33, 0x90, 0x49, 0x61, 0x60, 0xf4, 0xc0, + 0x94, 0x02, 0x72, 0xe3, 0x30, 0x3c, 0x10, 0xe8, 0x8c, 0x63, 0x2b, 0xb1, 0xc1, 0x96, 0x3c, 0x67, + 0xa7, 0xa5, 0x0c, 0xcc, 0xcd, 0x55, 0x3e, 0x1c, 0x81, 0x7d, 0x27, 0xa4, 0x73, 0xa6, 0xe1, 0x07, + 0xf0, 0xd8, 0xe1, 0x95, 0x67, 0x7e, 0x11, 0xff, 0x88, 0xd1, 0x9d, 0xa4, 0xd8, 0x90, 0x42, 0x1c, + 0x1e, 0x6f, 0xf7, 0xbe, 0x4f, 0xbb, 0xdf, 0x7e, 0xb7, 0x23, 0x38, 0x9a, 0x09, 0x31, 0x9b, 0xb3, + 0x26, 0x4d, 0x92, 0x30, 0x91, 0x94, 0xcb, 0x26, 0x5b, 0xbc, 0x64, 0xd3, 0x29, 0x9b, 0x36, 0x2f, + 0x0e, 0xe8, 0x3c, 0x3a, 0xa7, 0x07, 0x45, 0x84, 0x14, 0x97, 0x9c, 0x28, 0x16, 0x52, 0x20, 0x5b, + 0x83, 0x9d, 0xab, 0x78, 0x7e, 0xd5, 0xc9, 0xc1, 0xfb, 0xef, 0xe4, 0x1f, 0x88, 0xc2, 0x26, 0xe5, + 0x5c, 0x48, 0x2a, 0x43, 0xc1, 0x13, 0xcd, 0xb0, 0xff, 0x76, 0x96, 0x8d, 0xa3, 0xa0, 0x99, 0x48, + 0x2a, 0x97, 0x59, 0xc2, 0xfe, 0xa3, 0x0c, 0x66, 0x47, 0xf0, 0x0b, 0x16, 0x27, 0xac, 0x23, 0xf8, + 0x0f, 0xe1, 0x0c, 0xbd, 0x80, 0x3a, 0x5d, 0x4e, 0x43, 0x41, 0x42, 0x4e, 0x02, 0x15, 0xda, 0x33, + 0xde, 0x37, 0x1e, 0xdd, 0x6f, 0x1d, 0x38, 0xff, 0x5d, 0x87, 0xd3, 0x4e, 0xa1, 0x7d, 0xae, 0xb9, + 0xf0, 0x0e, 0x5d, 0x3d, 0xa2, 0xef, 0xc0, 0xd2, 0xd4, 0x62, 0x29, 0x73, 0xee, 0xb2, 0xe2, 0x6e, + 0xdd, 0x98, 0xdb, 0x5f, 0xca, 0x8c, 0xdc, 0xa4, 0x6b, 0x67, 0xf4, 0x0d, 0x98, 0x41, 0xd6, 0x0a, + 0x49, 0x9b, 0x64, 0x7b, 0x95, 0x9b, 0xd7, 0x9d, 0x8b, 0x30, 0x4e, 0x81, 0x78, 0x27, 0x58, 0x3d, + 0xda, 0x7f, 0x1a, 0xb0, 0xb3, 0xd6, 0x18, 0x7a, 0x06, 0x55, 0xc6, 0x03, 0x31, 0x0d, 0xb9, 0x56, + 0xc7, 0x6c, 0x7d, 0xbe, 0xb1, 0x3a, 0x8e, 0x9b, 0x31, 0xe0, 0x82, 0x0b, 0x3d, 0x86, 0x46, 0x42, + 0x17, 0xd1, 0x9c, 0x91, 0x98, 0x4a, 0x46, 0xce, 0x59, 0x2c, 0x7f, 0x51, 0x12, 0x6d, 0xe1, 0xba, + 0x4e, 0x60, 0x2a, 0x59, 0x2f, 0x0d, 0xdb, 0x5f, 0x40, 0x35, 0x67, 0x40, 0x7b, 0xb0, 0xeb, 0x7a, + 0x1d, 0xbf, 0xdb, 0xf7, 0x4e, 0xc9, 0x99, 0x37, 0x1e, 0xb9, 0x9d, 0xfe, 0x49, 0xdf, 0xed, 0x5a, + 0x25, 0xb4, 0x0d, 0xd5, 0x41, 0xdf, 0x73, 0xdb, 0xf8, 0xe0, 0x33, 0xcb, 0x40, 0x55, 0xb8, 0x73, + 0x32, 0x68, 0x77, 0xac, 0xb2, 0xfd, 0x5b, 0x19, 0xcc, 0x75, 0x41, 0xd1, 0xf3, 0x7f, 0x34, 0x75, + 0xb4, 0xf9, 0x58, 0xfe, 0x67, 0x57, 0xe8, 0x23, 0x68, 0x5c, 0x88, 0xf9, 0x72, 0xc1, 0x48, 0xc4, + 0xe2, 0x80, 0x71, 0x49, 0x67, 0x7a, 0x90, 0x5b, 0xd8, 0xd2, 0x89, 0x51, 0x11, 0xb7, 0x07, 0xb7, + 0x90, 0xe0, 0x1e, 0x54, 0x86, 0xa3, 0x43, 0xab, 0x8c, 0xea, 0x70, 0xdf, 0x1f, 0x9d, 0x8d, 0x49, + 0xdf, 0x23, 0xfe, 0xe9, 0xa9, 0x55, 0xb1, 0x9f, 0xc2, 0xce, 0x9a, 0x0d, 0xd0, 0x27, 0x80, 0x32, + 0x23, 0xa8, 0xd7, 0x94, 0xb9, 0x2a, 0x95, 0x66, 0x1b, 0x37, 0x56, 0x33, 0xda, 0x26, 0x1f, 0x42, + 0x35, 0xd7, 0x02, 0xbd, 0x0b, 0xa0, 0xad, 0x3e, 0xa5, 0x92, 0x66, 0x90, 0x9a, 0x8a, 0x74, 0xa9, + 0xa4, 0xf6, 0xef, 0x95, 0xab, 0x77, 0x87, 0x59, 0xb2, 0x9c, 0x4b, 0xe4, 0xc0, 0x83, 0x24, 0x12, + 0x3f, 0x31, 0x4e, 0x62, 0xf6, 0xf3, 0x92, 0x25, 0x92, 0x48, 0xf6, 0x4a, 0x2a, 0x68, 0x0d, 0x37, + 0x74, 0x0a, 0xeb, 0xcc, 0x84, 0xbd, 0x92, 0xe8, 0x09, 0xec, 0x16, 0xf7, 0x93, 0x48, 0xf0, 0x84, + 0x69, 0x40, 0x59, 0x01, 0x50, 0x0e, 0xd0, 0x29, 0x85, 0xb8, 0xbe, 0x9d, 0xca, 0x1b, 0xda, 0x41, + 0x3f, 0x42, 0x7d, 0x11, 0x06, 0xb1, 0x88, 0xce, 0x05, 0x67, 0x64, 0x21, 0xa6, 0x6c, 0xef, 0x8e, + 0x72, 0x45, 0x7b, 0x93, 0x07, 0xa5, 0xbb, 0x73, 0x86, 0x05, 0xd3, 0x50, 0x4c, 0x19, 0x36, 0x17, + 0x6b, 0xe7, 0xeb, 0xa7, 0xbe, 0xf5, 0x86, 0xa9, 0x7f, 0x0f, 0xe6, 0x3a, 0x1d, 0x7a, 0x0f, 0x1e, + 0x0e, 0xfb, 0x1d, 0xec, 0x8f, 0x7a, 0xbe, 0xe7, 0x92, 0xa1, 0xdf, 0x75, 0xff, 0x66, 0x81, 0x5d, + 0xb0, 0x3a, 0x03, 0x7f, 0xec, 0x92, 0xab, 0x6b, 0x96, 0x91, 0x46, 0xbb, 0xfd, 0xf6, 0xc0, 0x3f, + 0x25, 0x27, 0xfe, 0x60, 0xe0, 0x3f, 0x27, 0xbe, 0x97, 0xbe, 0x0c, 0x03, 0xea, 0x57, 0xd5, 0x2b, + 0xc1, 0xd1, 0x00, 0xee, 0xae, 0xed, 0xc2, 0xd6, 0x26, 0x12, 0xe8, 0x87, 0xd1, 0x2b, 0xe1, 0x8c, + 0x03, 0x3d, 0x84, 0x6a, 0xbe, 0x62, 0xd5, 0xb8, 0xb6, 0x7b, 0x25, 0x7c, 0x2f, 0x5b, 0x95, 0xc7, + 0x08, 0xac, 0x62, 0x8d, 0x65, 0x4e, 0xb0, 0x5f, 0x57, 0xc0, 0x5a, 0x11, 0x54, 0x8d, 0x14, 0x3d, + 0x86, 0x2d, 0x16, 0xc7, 0x22, 0xce, 0x4a, 0x42, 0x79, 0x49, 0x71, 0x14, 0x38, 0x63, 0xb5, 0xe4, + 0x7b, 0x25, 0xac, 0xaf, 0x20, 0x02, 0xc0, 0x2e, 0x18, 0x97, 0x44, 0x5e, 0x46, 0x4c, 0x7d, 0xd3, + 0x6c, 0x3d, 0xdd, 0x70, 0x8c, 0xea, 0xab, 0x8e, 0x9b, 0xd2, 0x4c, 0x2e, 0x23, 0xd6, 0x2b, 0xe1, + 0x1a, 0xcb, 0x0f, 0xe8, 0x6b, 0xa8, 0x15, 0xab, 0x3d, 0xdb, 0xbb, 0x1f, 0x6f, 0xb2, 0x3c, 0x7a, + 0x25, 0x5c, 0xcd, 0xf7, 0x79, 0xaa, 0x76, 0xac, 0x6c, 0xa3, 0x2c, 0xb0, 0xa1, 0xda, 0xda, 0x70, + 0xa9, 0xda, 0x9a, 0xc3, 0xfe, 0x12, 0x6a, 0x45, 0xd1, 0x68, 0x1f, 0xde, 0x72, 0x9f, 0xb9, 0xde, + 0x84, 0x4c, 0x5e, 0x8c, 0xae, 0x31, 0x89, 0xeb, 0x75, 0x89, 0x7f, 0x42, 0xce, 0x26, 0x13, 0x17, + 0xb7, 0xbd, 0x8e, 0x6b, 0x19, 0xc7, 0x0f, 0xa0, 0xb1, 0x32, 0x0f, 0xad, 0x42, 0xeb, 0xb5, 0x01, + 0x0d, 0x37, 0x2b, 0xa1, 0x9d, 0x17, 0x85, 0x2e, 0xa1, 0x9a, 0x57, 0x81, 0x0e, 0x37, 0xab, 0x59, + 0xcd, 0x79, 0xff, 0xd3, 0xdb, 0x8c, 0xe4, 0x91, 0xf1, 0xc4, 0x38, 0xfe, 0xd5, 0x80, 0x0f, 0x02, + 0xb1, 0xb8, 0x01, 0xfe, 0xd8, 0x2c, 0x0a, 0x1e, 0xa5, 0xff, 0x00, 0x23, 0xe3, 0xdb, 0xaf, 0x32, + 0xd4, 0x4c, 0xcc, 0x29, 0x9f, 0x39, 0x22, 0x9e, 0x35, 0x67, 0x8c, 0xab, 0x3f, 0x84, 0xa6, 0x4e, + 0xd1, 0x28, 0x4c, 0xfe, 0xed, 0xe7, 0xe5, 0x28, 0x8f, 0xbc, 0xbc, 0xab, 0x60, 0x87, 0x7f, 0x05, + 0x00, 0x00, 0xff, 0xff, 0xec, 0x7a, 0x68, 0xfa, 0xf2, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_data.pb.go new file mode 100644 index 000000000..90fbe3020 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_data.pb.go @@ -0,0 +1,252 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto + +/* +Package cluster is a generated protocol buffer package. + +It is generated from these files: + google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto + google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto + google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto + +It has these top-level messages: + Zone + Cluster + ListZonesRequest + ListZonesResponse + GetClusterRequest + ListClustersRequest + ListClustersResponse + CreateClusterRequest + CreateClusterMetadata + UpdateClusterMetadata + DeleteClusterRequest + UndeleteClusterRequest + UndeleteClusterMetadata + V2OperationMetadata +*/ +package cluster + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StorageType int32 + +const ( + // The storage type used is unspecified. + StorageType_STORAGE_UNSPECIFIED StorageType = 0 + // Data will be stored in SSD, providing low and consistent latencies. + StorageType_STORAGE_SSD StorageType = 1 + // Data will be stored in HDD, providing high and less predictable + // latencies. + StorageType_STORAGE_HDD StorageType = 2 +) + +var StorageType_name = map[int32]string{ + 0: "STORAGE_UNSPECIFIED", + 1: "STORAGE_SSD", + 2: "STORAGE_HDD", +} +var StorageType_value = map[string]int32{ + "STORAGE_UNSPECIFIED": 0, + "STORAGE_SSD": 1, + "STORAGE_HDD": 2, +} + +func (x StorageType) String() string { + return proto.EnumName(StorageType_name, int32(x)) +} +func (StorageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Possible states of a zone. +type Zone_Status int32 + +const ( + // The state of the zone is unknown or unspecified. + Zone_UNKNOWN Zone_Status = 0 + // The zone is in a good state. + Zone_OK Zone_Status = 1 + // The zone is down for planned maintenance. + Zone_PLANNED_MAINTENANCE Zone_Status = 2 + // The zone is down for emergency or unplanned maintenance. + Zone_EMERGENCY_MAINENANCE Zone_Status = 3 +) + +var Zone_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "OK", + 2: "PLANNED_MAINTENANCE", + 3: "EMERGENCY_MAINENANCE", +} +var Zone_Status_value = map[string]int32{ + "UNKNOWN": 0, + "OK": 1, + "PLANNED_MAINTENANCE": 2, + "EMERGENCY_MAINENANCE": 3, +} + +func (x Zone_Status) String() string { + return proto.EnumName(Zone_Status_name, int32(x)) +} +func (Zone_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// A physical location in which a particular project can allocate Cloud BigTable +// resources. +type Zone struct { + // A permanent unique identifier for the zone. + // Values are of the form projects//zones/[a-z][-a-z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of this zone as it appears in UIs. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // The current state of this zone. + Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"` +} + +func (m *Zone) Reset() { *m = Zone{} } +func (m *Zone) String() string { return proto.CompactTextString(m) } +func (*Zone) ProtoMessage() {} +func (*Zone) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Zone) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Zone) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Zone) GetStatus() Zone_Status { + if m != nil { + return m.Status + } + return Zone_UNKNOWN +} + +// An isolated set of Cloud BigTable resources on which tables can be hosted. +type Cluster struct { + // A permanent unique identifier for the cluster. For technical reasons, the + // zone in which the cluster resides is included here. + // Values are of the form + // projects//zones//clusters/[a-z][-a-z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The operation currently running on the cluster, if any. + // This cannot be set directly, only through CreateCluster, UpdateCluster, + // or UndeleteCluster. Calls to these methods will be rejected if + // "current_operation" is already set. + CurrentOperation *google_longrunning.Operation `protobuf:"bytes,3,opt,name=current_operation,json=currentOperation" json:"current_operation,omitempty"` + // The descriptive name for this cluster as it appears in UIs. + // Must be unique per zone. + DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // The number of serve nodes allocated to this cluster. + ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes,json=serveNodes" json:"serve_nodes,omitempty"` + // What storage type to use for tables in this cluster. Only configurable at + // cluster creation time. If unspecified, STORAGE_SSD will be used. + DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,json=defaultStorageType,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetCurrentOperation() *google_longrunning.Operation { + if m != nil { + return m.CurrentOperation + } + return nil +} + +func (m *Cluster) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Cluster) GetServeNodes() int32 { + if m != nil { + return m.ServeNodes + } + return 0 +} + +func (m *Cluster) GetDefaultStorageType() StorageType { + if m != nil { + return m.DefaultStorageType + } + return StorageType_STORAGE_UNSPECIFIED +} + +func init() { + proto.RegisterType((*Zone)(nil), "google.bigtable.admin.cluster.v1.Zone") + proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.cluster.v1.Cluster") + proto.RegisterEnum("google.bigtable.admin.cluster.v1.StorageType", StorageType_name, StorageType_value) + proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value) +} + +func init() { + proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xd1, 0x6e, 0xd3, 0x3c, + 0x1c, 0xc5, 0x97, 0xae, 0xeb, 0xbe, 0xcf, 0x41, 0x10, 0xcc, 0x24, 0xa2, 0x09, 0xb4, 0x52, 0xb8, + 0xa8, 0x90, 0x70, 0xb4, 0x71, 0x09, 0x37, 0x6d, 0x63, 0xba, 0x32, 0xe6, 0x56, 0x49, 0x27, 0xc4, + 0x6e, 0x2c, 0xb7, 0xf5, 0xac, 0x48, 0xa9, 0x1d, 0xc5, 0x4e, 0xa5, 0x3e, 0x03, 0x12, 0x8f, 0xc7, + 0xf3, 0xa0, 0x3a, 0x6e, 0x55, 0x34, 0xd0, 0xb8, 0xb3, 0xcf, 0x39, 0x3f, 0xbb, 0xff, 0x53, 0x07, + 0x7c, 0x14, 0x4a, 0x89, 0x9c, 0x47, 0xb3, 0x4c, 0x18, 0x36, 0xcb, 0x79, 0xc4, 0x16, 0xcb, 0x4c, + 0x46, 0xf3, 0xbc, 0xd2, 0x86, 0x97, 0xd1, 0xea, 0x7c, 0xe7, 0x50, 0xa7, 0xd1, 0x05, 0x33, 0x0c, + 0x15, 0xa5, 0x32, 0x0a, 0xb6, 0x6b, 0x1a, 0x6d, 0x33, 0xc8, 0xd2, 0xc8, 0x25, 0xd1, 0xea, 0xfc, + 0xf4, 0x85, 0x3b, 0x9f, 0x15, 0x59, 0xc4, 0xa4, 0x54, 0x86, 0x99, 0x4c, 0x49, 0x5d, 0xf3, 0xa7, + 0xaf, 0x9d, 0x9b, 0x2b, 0x29, 0xca, 0x4a, 0xca, 0x4c, 0x8a, 0x48, 0x15, 0xbc, 0xfc, 0x2d, 0x74, + 0xe6, 0x42, 0x76, 0x37, 0xab, 0xee, 0x22, 0x93, 0x2d, 0xb9, 0x36, 0x6c, 0x59, 0xd4, 0x81, 0xce, + 0x4f, 0x0f, 0x34, 0x6f, 0x95, 0xe4, 0x10, 0x82, 0xa6, 0x64, 0x4b, 0x1e, 0x7a, 0x6d, 0xaf, 0xfb, + 0x7f, 0x62, 0xd7, 0xf0, 0x15, 0x78, 0xb4, 0xc8, 0x74, 0x91, 0xb3, 0x35, 0xb5, 0x5e, 0xc3, 0x7a, + 0xbe, 0xd3, 0xc8, 0x26, 0x82, 0x41, 0x4b, 0x1b, 0x66, 0x2a, 0x1d, 0x1e, 0xb6, 0xbd, 0xee, 0xe3, + 0x8b, 0x77, 0xe8, 0xa1, 0xb1, 0xd0, 0xe6, 0x3a, 0x94, 0x5a, 0x28, 0x71, 0x70, 0x67, 0x02, 0x5a, + 0xb5, 0x02, 0x7d, 0x70, 0x7c, 0x43, 0xae, 0xc8, 0xf8, 0x2b, 0x09, 0x0e, 0x60, 0x0b, 0x34, 0xc6, + 0x57, 0x81, 0x07, 0x9f, 0x83, 0x67, 0x93, 0x2f, 0x3d, 0x42, 0x70, 0x4c, 0xaf, 0x7b, 0x23, 0x32, + 0xc5, 0xa4, 0x47, 0x06, 0x38, 0x68, 0xc0, 0x10, 0x9c, 0xe0, 0x6b, 0x9c, 0x0c, 0x31, 0x19, 0x7c, + 0xb3, 0x96, 0x73, 0x0e, 0x3b, 0x3f, 0x1a, 0xe0, 0x78, 0x50, 0x5f, 0xfa, 0xc7, 0xd9, 0x3e, 0x83, + 0xa7, 0xf3, 0xaa, 0x2c, 0xb9, 0x34, 0x74, 0xd7, 0x9a, 0x9d, 0xc1, 0xbf, 0x78, 0xb9, 0x9d, 0x61, + 0xaf, 0x5a, 0x34, 0xde, 0x86, 0x92, 0xc0, 0x71, 0x3b, 0xe5, 0x5e, 0x4f, 0xcd, 0xfb, 0x3d, 0x9d, + 0x01, 0x5f, 0xf3, 0x72, 0xc5, 0xa9, 0x54, 0x0b, 0xae, 0xc3, 0xa3, 0xb6, 0xd7, 0x3d, 0x4a, 0x80, + 0x95, 0xc8, 0x46, 0x81, 0x14, 0x9c, 0x2c, 0xf8, 0x1d, 0xab, 0x72, 0x43, 0xb5, 0x51, 0x25, 0x13, + 0x9c, 0x9a, 0x75, 0xc1, 0xc3, 0xff, 0xfe, 0xb5, 0xd6, 0xb4, 0xa6, 0xa6, 0xeb, 0x82, 0x27, 0xd0, + 0x1d, 0xb5, 0xa7, 0xbd, 0xbd, 0x04, 0xfe, 0xde, 0x76, 0x53, 0x69, 0x3a, 0x1d, 0x27, 0xbd, 0x21, + 0xa6, 0x37, 0x24, 0x9d, 0xe0, 0xc1, 0xe8, 0xd3, 0x08, 0xc7, 0xc1, 0x01, 0x7c, 0x02, 0xfc, 0xad, + 0x91, 0xa6, 0x71, 0xe0, 0xed, 0x0b, 0x97, 0x71, 0x1c, 0x34, 0xfa, 0xdf, 0x3d, 0xf0, 0x66, 0xae, + 0x96, 0x0f, 0xfe, 0xa4, 0x7e, 0xd8, 0x77, 0x96, 0xfb, 0x23, 0x62, 0x66, 0xd8, 0x64, 0xf3, 0xec, + 0x26, 0xde, 0xed, 0xd0, 0xd1, 0x42, 0xe5, 0x4c, 0x0a, 0xa4, 0x4a, 0x11, 0x09, 0x2e, 0xed, 0xa3, + 0x8c, 0x6a, 0x8b, 0x15, 0x99, 0xfe, 0xfb, 0xb7, 0xf5, 0xc1, 0x2d, 0x67, 0x2d, 0xcb, 0xbc, 0xff, + 0x15, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x27, 0x25, 0xa6, 0x8e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service.pb.go new file mode 100644 index 000000000..bfc7f2fc7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service.pb.go @@ -0,0 +1,472 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto + +package cluster + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BigtableClusterService service + +type BigtableClusterServiceClient interface { + // Lists the supported zones for the given project. + ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) + // Gets information about a particular cluster. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Cluster, error) + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Cancels the scheduled deletion of an cluster and begins preparing it to + // resume serving. The returned operation will also be embedded as the + // cluster's "current_operation". + // Immediately upon completion of this request: + // * The cluster's "delete_time" field will be unset, protecting it from + // automatic deletion. + // Until completion of the returned operation: + // * The operation cannot be cancelled. + // Upon completion of the returned operation: + // * Billing for the cluster's resources will resume. + // * All tables within the cluster will be available. + // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type bigtableClusterServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterServiceClient { + return &bigtableClusterServiceClient{cc} +} + +func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) { + out := new(ListZonesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClusterServiceClient) UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UndeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableClusterService service + +type BigtableClusterServiceServer interface { + // Lists the supported zones for the given project. + ListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error) + // Gets information about a particular cluster. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + CreateCluster(context.Context, *CreateClusterRequest) (*Cluster, error) + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UpdateCluster(context.Context, *Cluster) (*Cluster, error) + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf2.Empty, error) + // Cancels the scheduled deletion of an cluster and begins preparing it to + // resume serving. The returned operation will also be embedded as the + // cluster's "current_operation". + // Immediately upon completion of this request: + // * The cluster's "delete_time" field will be unset, protecting it from + // automatic deletion. + // Until completion of the returned operation: + // * The operation cannot be cancelled. + // Upon completion of the returned operation: + // * Billing for the cluster's resources will resume. + // * All tables within the cluster will be available. + // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + UndeleteCluster(context.Context, *UndeleteClusterRequest) (*google_longrunning.Operation, error) +} + +func RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) { + s.RegisterService(&_BigtableClusterService_serviceDesc, srv) +} + +func _BigtableClusterService_ListZones_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListZonesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).ListZones(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).ListZones(ctx, req.(*ListZonesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Cluster) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).UpdateCluster(ctx, req.(*Cluster)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableClusterService_UndeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableClusterServiceServer).UndeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.cluster.v1.BigtableClusterService/UndeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableClusterServiceServer).UndeleteCluster(ctx, req.(*UndeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BigtableClusterService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.cluster.v1.BigtableClusterService", + HandlerType: (*BigtableClusterServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListZones", + Handler: _BigtableClusterService_ListZones_Handler, + }, + { + MethodName: "GetCluster", + Handler: _BigtableClusterService_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _BigtableClusterService_ListClusters_Handler, + }, + { + MethodName: "CreateCluster", + Handler: _BigtableClusterService_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _BigtableClusterService_UpdateCluster_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _BigtableClusterService_DeleteCluster_Handler, + }, + { + MethodName: "UndeleteCluster", + Handler: _BigtableClusterService_UndeleteCluster_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto", +} + +func init() { + proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 515 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6b, 0x14, 0x31, + 0x18, 0xc6, 0x89, 0x07, 0xa1, 0xc1, 0x45, 0xc8, 0xa1, 0x87, 0x6d, 0x0b, 0x32, 0x15, 0xb1, 0x23, + 0x26, 0x6e, 0x17, 0xc5, 0xbf, 0x08, 0x5b, 0xa5, 0x1e, 0x04, 0x8b, 0xd2, 0x4b, 0x2f, 0x4b, 0x76, + 0xe7, 0x35, 0x8c, 0xcc, 0x24, 0x31, 0xc9, 0x2c, 0xa8, 0xf4, 0xe2, 0xcd, 0x93, 0x88, 0x27, 0x3d, + 0x78, 0xeb, 0xdd, 0xef, 0xe2, 0x57, 0xf0, 0x83, 0xc8, 0x64, 0x92, 0xb5, 0x2b, 0x6b, 0x77, 0xa6, + 0xb7, 0x99, 0xc9, 0xfb, 0xbc, 0xcf, 0x6f, 0x9e, 0x24, 0x2f, 0x7e, 0x2c, 0x94, 0x12, 0x05, 0xb0, + 0x49, 0x2e, 0x1c, 0x9f, 0x14, 0xc0, 0x78, 0x56, 0xe6, 0x92, 0x4d, 0x8b, 0xca, 0x3a, 0x30, 0x6c, + 0x36, 0x98, 0xaf, 0x8c, 0xc3, 0xb7, 0xb1, 0x05, 0x33, 0xcb, 0xa7, 0x40, 0xb5, 0x51, 0x4e, 0x91, + 0x2b, 0x4d, 0x03, 0x1a, 0xcb, 0xa8, 0x6f, 0x40, 0x43, 0x31, 0x9d, 0x0d, 0xfa, 0x9b, 0xc1, 0x82, + 0xeb, 0x9c, 0x71, 0x29, 0x95, 0xe3, 0x2e, 0x57, 0xd2, 0x36, 0xfa, 0xfe, 0xc3, 0xee, 0x00, 0x19, + 0x77, 0x3c, 0xa8, 0x9f, 0x9d, 0x1b, 0x7f, 0x5c, 0x82, 0xb5, 0x5c, 0x40, 0xe4, 0xd8, 0x0e, 0x9d, + 0x0a, 0x25, 0x85, 0xa9, 0xa4, 0xcc, 0xa5, 0x60, 0x4a, 0x83, 0x59, 0x80, 0xdd, 0x08, 0x45, 0xfe, + 0x6d, 0x52, 0xbd, 0x66, 0x50, 0x6a, 0xf7, 0xae, 0x59, 0xdc, 0xfd, 0xb4, 0x86, 0xd7, 0x47, 0xc1, + 0x6d, 0xaf, 0x31, 0x7b, 0xd5, 0x78, 0x91, 0x6f, 0x08, 0xaf, 0x3d, 0xcf, 0xad, 0x3b, 0x52, 0x12, + 0x2c, 0xd9, 0xa5, 0xab, 0x32, 0xa3, 0xf3, 0xe2, 0x97, 0xf0, 0xb6, 0x02, 0xeb, 0xfa, 0xc3, 0x4e, + 0x1a, 0xab, 0x95, 0xb4, 0x90, 0x6c, 0x7f, 0xfc, 0xf5, 0xfb, 0xeb, 0x85, 0x2d, 0xb2, 0x51, 0x07, + 0xf1, 0x41, 0xf2, 0x12, 0x1e, 0x69, 0xa3, 0xde, 0xc0, 0xd4, 0x59, 0x96, 0x1e, 0xb3, 0xf7, 0x9e, + 0xe6, 0x07, 0xc2, 0x78, 0x1f, 0x5c, 0x20, 0x26, 0x2d, 0x8c, 0xfe, 0x56, 0x47, 0xba, 0x9d, 0xd5, + 0xa2, 0xa0, 0x48, 0x6e, 0x79, 0xa6, 0x94, 0x5c, 0x5f, 0xc6, 0xd4, 0x20, 0xb1, 0x34, 0x6e, 0x60, + 0x8d, 0x49, 0x7e, 0x22, 0x7c, 0xa9, 0xfe, 0xb7, 0xd0, 0xc1, 0x92, 0xdb, 0xed, 0xb2, 0x88, 0xf5, + 0x11, 0xf2, 0x4e, 0x57, 0x59, 0x48, 0x71, 0xe0, 0x89, 0x6f, 0x90, 0x9d, 0xe5, 0x29, 0x72, 0x21, + 0x0c, 0x08, 0xee, 0x20, 0x9b, 0x53, 0x93, 0x13, 0x84, 0x7b, 0x7b, 0x06, 0xb8, 0x8b, 0x07, 0x81, + 0xb4, 0x30, 0x5f, 0x10, 0x9c, 0x23, 0xd9, 0xc0, 0x99, 0x5c, 0x3b, 0x2b, 0xd9, 0xe3, 0x39, 0xe4, + 0x7d, 0x94, 0x92, 0xef, 0x08, 0xf7, 0x0e, 0x75, 0x76, 0x8a, 0xb3, 0xbd, 0x5f, 0x17, 0xb4, 0xa1, + 0x47, 0xbb, 0xd9, 0x6f, 0xbd, 0xe9, 0x35, 0xdc, 0x17, 0x84, 0x7b, 0x4f, 0xa0, 0x80, 0x4e, 0x21, + 0x2e, 0x08, 0x62, 0x88, 0xeb, 0x51, 0x17, 0xef, 0x2d, 0x7d, 0x5a, 0xdf, 0xdb, 0x78, 0x16, 0xd3, + 0xf6, 0x67, 0xf1, 0x04, 0xe1, 0xcb, 0x87, 0x32, 0x5b, 0xa0, 0xba, 0xbb, 0x9a, 0xea, 0x1f, 0x49, + 0xe4, 0xda, 0x8a, 0xca, 0x53, 0x43, 0x87, 0xbe, 0x88, 0x43, 0x27, 0xb9, 0xe7, 0xf1, 0x86, 0xc9, + 0xa0, 0x75, 0x6a, 0x55, 0xf0, 0x19, 0x7d, 0x46, 0xf8, 0xea, 0x54, 0x95, 0x2b, 0xc9, 0x46, 0x9b, + 0xcb, 0x27, 0x96, 0x3d, 0xa8, 0x93, 0x3a, 0x40, 0x47, 0xfb, 0xa1, 0x83, 0x50, 0x05, 0x97, 0x82, + 0x2a, 0x23, 0x98, 0x00, 0xe9, 0x73, 0x64, 0xcd, 0x12, 0xd7, 0xb9, 0xfd, 0xff, 0xfc, 0x7d, 0x10, + 0x1e, 0x27, 0x17, 0xbd, 0x66, 0xf8, 0x27, 0x00, 0x00, 0xff, 0xff, 0x50, 0x92, 0x91, 0x86, 0x71, + 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.pb.go new file mode 100644 index 000000000..1f993aa20 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.pb.go @@ -0,0 +1,376 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto + +package cluster + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for BigtableClusterService.ListZones. +type ListZonesRequest struct { + // The unique name of the project for which a list of supported zones is + // requested. + // Values are of the form projects/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} } +func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) } +func (*ListZonesRequest) ProtoMessage() {} +func (*ListZonesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *ListZonesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Response message for BigtableClusterService.ListZones. +type ListZonesResponse struct { + // The list of requested zones. + Zones []*Zone `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"` +} + +func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} } +func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) } +func (*ListZonesResponse) ProtoMessage() {} +func (*ListZonesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListZonesResponse) GetZones() []*Zone { + if m != nil { + return m.Zones + } + return nil +} + +// Request message for BigtableClusterService.GetCluster. +type GetClusterRequest struct { + // The unique name of the requested cluster. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *GetClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for BigtableClusterService.ListClusters. +type ListClustersRequest struct { + // The unique name of the project for which a list of clusters is requested. + // Values are of the form projects/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *ListClustersRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Response message for BigtableClusterService.ListClusters. +type ListClustersResponse struct { + // The list of requested Clusters. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // The zones for which clusters could not be retrieved. + FailedZones []*Zone `protobuf:"bytes,2,rep,name=failed_zones,json=failedZones" json:"failed_zones,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetFailedZones() []*Zone { + if m != nil { + return m.FailedZones + } + return nil +} + +// Request message for BigtableClusterService.CreateCluster. +type CreateClusterRequest struct { + // The unique name of the zone in which to create the cluster. + // Values are of the form projects//zones/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The id to be used when referring to the new cluster within its zone, + // e.g. just the "test-cluster" section of the full name + // "projects//zones//clusters/test-cluster". + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The cluster to create. + // The "name", "delete_time", and "current_operation" fields must be left + // blank. + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *CreateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// Metadata type for the operation returned by +// BigtableClusterService.CreateCluster. +type CreateClusterMetadata struct { + // The request which prompted the creation of this operation. + OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The time at which original_request was received. + RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which this operation failed or was completed successfully. + FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *CreateClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *CreateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +// Metadata type for the operation returned by +// BigtableClusterService.UpdateCluster. +type UpdateClusterMetadata struct { + // The request which prompted the creation of this operation. + OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The time at which original_request was received. + RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + CancelTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"` + // The time at which this operation failed or was completed successfully. + FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *UpdateClusterMetadata) GetOriginalRequest() *Cluster { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *UpdateClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *UpdateClusterMetadata) GetCancelTime() *google_protobuf3.Timestamp { + if m != nil { + return m.CancelTime + } + return nil +} + +func (m *UpdateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +// Request message for BigtableClusterService.DeleteCluster. +type DeleteClusterRequest struct { + // The unique name of the cluster to be deleted. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *DeleteClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for BigtableClusterService.UndeleteCluster. +type UndeleteClusterRequest struct { + // The unique name of the cluster to be un-deleted. + // Values are of the form projects//zones//clusters/ + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} } +func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UndeleteClusterRequest) ProtoMessage() {} +func (*UndeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *UndeleteClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Metadata type for the operation returned by +// BigtableClusterService.UndeleteCluster. +type UndeleteClusterMetadata struct { + // The time at which the original request was received. + RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,1,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which this operation failed or was completed successfully. + FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} } +func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UndeleteClusterMetadata) ProtoMessage() {} +func (*UndeleteClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +func (m *UndeleteClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *UndeleteClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +// Metadata type for operations initiated by the V2 BigtableAdmin service. +// More complete information for such operations is available via the V2 API. +type V2OperationMetadata struct { +} + +func (m *V2OperationMetadata) Reset() { *m = V2OperationMetadata{} } +func (m *V2OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*V2OperationMetadata) ProtoMessage() {} +func (*V2OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func init() { + proto.RegisterType((*ListZonesRequest)(nil), "google.bigtable.admin.cluster.v1.ListZonesRequest") + proto.RegisterType((*ListZonesResponse)(nil), "google.bigtable.admin.cluster.v1.ListZonesResponse") + proto.RegisterType((*GetClusterRequest)(nil), "google.bigtable.admin.cluster.v1.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.bigtable.admin.cluster.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.bigtable.admin.cluster.v1.ListClustersResponse") + proto.RegisterType((*CreateClusterRequest)(nil), "google.bigtable.admin.cluster.v1.CreateClusterRequest") + proto.RegisterType((*CreateClusterMetadata)(nil), "google.bigtable.admin.cluster.v1.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterMetadata)(nil), "google.bigtable.admin.cluster.v1.UpdateClusterMetadata") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.bigtable.admin.cluster.v1.DeleteClusterRequest") + proto.RegisterType((*UndeleteClusterRequest)(nil), "google.bigtable.admin.cluster.v1.UndeleteClusterRequest") + proto.RegisterType((*UndeleteClusterMetadata)(nil), "google.bigtable.admin.cluster.v1.UndeleteClusterMetadata") + proto.RegisterType((*V2OperationMetadata)(nil), "google.bigtable.admin.cluster.v1.V2OperationMetadata") +} + +func init() { + proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto", fileDescriptor2) +} + +var fileDescriptor2 = []byte{ + // 541 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xd5, 0x26, 0xe5, 0xa3, 0xe3, 0x4a, 0xb4, 0x6e, 0x02, 0x51, 0x24, 0x44, 0x64, 0x50, 0x69, + 0x11, 0xb2, 0xd5, 0x20, 0x71, 0x69, 0xb9, 0x24, 0xa0, 0x52, 0x89, 0x8a, 0x12, 0x5a, 0x0e, 0xbd, + 0x58, 0x9b, 0x78, 0x62, 0x56, 0xb2, 0x77, 0x8d, 0x77, 0x93, 0x03, 0x3f, 0x82, 0x1b, 0xfc, 0x04, + 0xc4, 0x2f, 0xe4, 0x8c, 0xec, 0xdd, 0x8d, 0x68, 0x95, 0xd6, 0xb1, 0x10, 0xb7, 0xdd, 0x99, 0xf7, + 0x66, 0xde, 0x9b, 0x1d, 0x69, 0xe1, 0x6d, 0x2c, 0x44, 0x9c, 0x60, 0x30, 0x66, 0xb1, 0xa2, 0xe3, + 0x04, 0x03, 0x1a, 0xa5, 0x8c, 0x07, 0x93, 0x64, 0x26, 0x15, 0xe6, 0xc1, 0x7c, 0x7f, 0x91, 0x09, + 0x4d, 0x2c, 0x94, 0x98, 0xcf, 0xd9, 0x04, 0xc3, 0x14, 0xa5, 0xa4, 0x31, 0x4a, 0x3f, 0xcb, 0x85, + 0x12, 0x6e, 0x4f, 0x57, 0xf2, 0x2d, 0xde, 0x2f, 0x2b, 0xf9, 0x86, 0xe5, 0xcf, 0xf7, 0xbb, 0x87, + 0xf5, 0x7b, 0x45, 0x54, 0x51, 0x5d, 0xbf, 0xfb, 0xc8, 0xb0, 0xcb, 0xdb, 0x78, 0x36, 0x0d, 0x14, + 0x4b, 0x51, 0x2a, 0x9a, 0x66, 0x1a, 0xe0, 0xed, 0xc0, 0xe6, 0x3b, 0x26, 0xd5, 0x85, 0xe0, 0x28, + 0x47, 0xf8, 0x65, 0x86, 0x52, 0xb9, 0x2e, 0xac, 0x71, 0x9a, 0x62, 0x87, 0xf4, 0xc8, 0xee, 0xfa, + 0xa8, 0x3c, 0x7b, 0x1f, 0x60, 0xeb, 0x2f, 0x9c, 0xcc, 0x04, 0x97, 0xe8, 0x1e, 0xc2, 0xad, 0xaf, + 0x45, 0xa0, 0x43, 0x7a, 0xcd, 0x5d, 0xa7, 0xbf, 0xe3, 0x57, 0xb9, 0xf1, 0x0b, 0xfe, 0x48, 0x93, + 0xbc, 0xa7, 0xb0, 0x75, 0x84, 0x6a, 0xa8, 0x93, 0x37, 0xf5, 0xde, 0x83, 0xed, 0xa2, 0xb7, 0x41, + 0xde, 0x28, 0xf3, 0x17, 0x81, 0xd6, 0x65, 0xac, 0x91, 0xfa, 0x06, 0xee, 0x1a, 0x19, 0x56, 0xed, + 0x5e, 0xb5, 0x5a, 0xab, 0x6d, 0x41, 0x75, 0x8f, 0x61, 0x63, 0x4a, 0x59, 0x82, 0x51, 0xa8, 0x8d, + 0x37, 0x6a, 0x19, 0x77, 0x34, 0xb7, 0x1c, 0xa2, 0xf7, 0x8d, 0x40, 0x6b, 0x98, 0x23, 0x55, 0x58, + 0x3d, 0x02, 0xf7, 0x21, 0x80, 0x7d, 0x5d, 0x16, 0x75, 0x1a, 0x65, 0x66, 0xdd, 0x44, 0x8e, 0x23, + 0x77, 0x08, 0x77, 0xcc, 0xa5, 0xd3, 0xec, 0x91, 0x7a, 0xe6, 0x2c, 0xd3, 0xfb, 0x4d, 0xa0, 0x7d, + 0x49, 0xd0, 0x09, 0x2a, 0x5a, 0xec, 0x92, 0x4b, 0x61, 0x53, 0xe4, 0x2c, 0x66, 0x9c, 0x26, 0x61, + 0xae, 0x55, 0x96, 0xea, 0x9c, 0xfe, 0xcb, 0x15, 0xfa, 0x2c, 0xf1, 0x38, 0xba, 0x67, 0xeb, 0x59, + 0xd3, 0xaf, 0x60, 0xc3, 0x54, 0x0e, 0x8b, 0x15, 0x2d, 0x2d, 0x3a, 0xfd, 0xae, 0x2d, 0x6f, 0xf7, + 0xd7, 0x3f, 0xb3, 0xfb, 0x3b, 0x72, 0x0c, 0xbe, 0x88, 0xb8, 0x07, 0xe0, 0x4c, 0x19, 0x67, 0xf2, + 0xb3, 0x66, 0x37, 0x2b, 0xd9, 0xa0, 0xe1, 0x45, 0xc0, 0xfb, 0xd9, 0x80, 0xf6, 0x79, 0x16, 0x2d, + 0x31, 0x7e, 0x76, 0xad, 0xf1, 0x1a, 0x03, 0xfe, 0x0f, 0x5e, 0x27, 0x94, 0x4f, 0x30, 0x59, 0xd9, + 0xab, 0x86, 0x2f, 0x1b, 0xd4, 0x5a, 0xad, 0x41, 0x3d, 0x83, 0xd6, 0x6b, 0x4c, 0x70, 0x95, 0x8d, + 0xf5, 0x9e, 0xc3, 0xfd, 0x73, 0x1e, 0xad, 0x8a, 0xfe, 0x4e, 0xe0, 0xc1, 0x15, 0xf8, 0xe2, 0x11, + 0xae, 0x8e, 0x8b, 0xfc, 0xd3, 0x6a, 0x34, 0x6a, 0x39, 0x6e, 0xc3, 0xf6, 0xa7, 0xfe, 0xfb, 0x0c, + 0x73, 0xaa, 0x98, 0xe0, 0x56, 0xd2, 0xe0, 0x07, 0x81, 0x27, 0x13, 0x91, 0x56, 0xee, 0xc0, 0xe0, + 0xf1, 0xc0, 0xa4, 0x8c, 0xa9, 0x8f, 0xfa, 0x1b, 0x38, 0x31, 0xbf, 0xc0, 0x69, 0xd1, 0xfd, 0x94, + 0x5c, 0x1c, 0x99, 0x42, 0xb1, 0x48, 0x28, 0x8f, 0x7d, 0x91, 0xc7, 0x41, 0x8c, 0xbc, 0xd4, 0x16, + 0xe8, 0x14, 0xcd, 0x98, 0xbc, 0xfe, 0x0f, 0x38, 0x30, 0xc7, 0xf1, 0xed, 0x92, 0xf3, 0xe2, 0x4f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x75, 0x68, 0x13, 0xa2, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_data.pb.go new file mode 100644 index 000000000..2c6183d8e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_data.pb.go @@ -0,0 +1,451 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/table/v1/bigtable_table_data.proto + +/* +Package table is a generated protocol buffer package. + +It is generated from these files: + google/bigtable/admin/table/v1/bigtable_table_data.proto + google/bigtable/admin/table/v1/bigtable_table_service.proto + google/bigtable/admin/table/v1/bigtable_table_service_messages.proto + +It has these top-level messages: + Table + ColumnFamily + GcRule + CreateTableRequest + ListTablesRequest + ListTablesResponse + GetTableRequest + DeleteTableRequest + RenameTableRequest + CreateColumnFamilyRequest + DeleteColumnFamilyRequest + BulkDeleteRowsRequest +*/ +package table + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Table_TimestampGranularity int32 + +const ( + Table_MILLIS Table_TimestampGranularity = 0 +) + +var Table_TimestampGranularity_name = map[int32]string{ + 0: "MILLIS", +} +var Table_TimestampGranularity_value = map[string]int32{ + "MILLIS": 0, +} + +func (x Table_TimestampGranularity) String() string { + return proto.EnumName(Table_TimestampGranularity_name, int32(x)) +} +func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 0} +} + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +type Table struct { + // A unique identifier of the form + // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If this Table is in the process of being created, the Operation used to + // track its progress. As long as this operation is present, the Table will + // not accept any Table Admin or Read/Write requests. + CurrentOperation *google_longrunning.Operation `protobuf:"bytes,2,opt,name=current_operation,json=currentOperation" json:"current_operation,omitempty"` + // The column families configured for this table, mapped by column family id. + ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // Cannot be changed once the table is created. + Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Table) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Table) GetCurrentOperation() *google_longrunning.Operation { + if m != nil { + return m.CurrentOperation + } + return nil +} + +func (m *Table) GetColumnFamilies() map[string]*ColumnFamily { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +func (m *Table) GetGranularity() Table_TimestampGranularity { + if m != nil { + return m.Granularity + } + return Table_MILLIS +} + +// A set of columns within a table which share a common configuration. +type ColumnFamily struct { + // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ + // The last segment is the same as the "name" field in + // google.bigtable.v1.Family. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Garbage collection expression specified by the following grammar: + // GC = EXPR + // | "" ; + // EXPR = EXPR, "||", EXPR (* lowest precedence *) + // | EXPR, "&&", EXPR + // | "(", EXPR, ")" (* highest precedence *) + // | PROP ; + // PROP = "version() >", NUM32 + // | "age() >", NUM64, [ UNIT ] ; + // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) + // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) + // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) + // GC expressions can be up to 500 characters in length + // + // The different types of PROP are defined as follows: + // version() - cell index, counting from most recent and starting at 1 + // age() - age of the cell (current time minus cell timestamp) + // + // Example: "version() > 3 || (age() > 3d && version() > 1)" + // drop cells beyond the most recent three, and drop cells older than three + // days unless they're the most recent cell in the row/column + // + // Garbage collection executes opportunistically in the background, and so + // it's possible for reads to return a cell even if it matches the active GC + // expression for its family. + GcExpression string `protobuf:"bytes,2,opt,name=gc_expression,json=gcExpression" json:"gc_expression,omitempty"` + // Garbage collection rule specified as a protobuf. + // Supersedes `gc_expression`. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule *GcRule `protobuf:"bytes,3,opt,name=gc_rule,json=gcRule" json:"gc_rule,omitempty"` +} + +func (m *ColumnFamily) Reset() { *m = ColumnFamily{} } +func (m *ColumnFamily) String() string { return proto.CompactTextString(m) } +func (*ColumnFamily) ProtoMessage() {} +func (*ColumnFamily) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ColumnFamily) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ColumnFamily) GetGcExpression() string { + if m != nil { + return m.GcExpression + } + return "" +} + +func (m *ColumnFamily) GetGcRule() *GcRule { + if m != nil { + return m.GcRule + } + return nil +} + +// Rule for determining which cells to delete during garbage collection. +type GcRule struct { + // Types that are valid to be assigned to Rule: + // *GcRule_MaxNumVersions + // *GcRule_MaxAge + // *GcRule_Intersection_ + // *GcRule_Union_ + Rule isGcRule_Rule `protobuf_oneof:"rule"` +} + +func (m *GcRule) Reset() { *m = GcRule{} } +func (m *GcRule) String() string { return proto.CompactTextString(m) } +func (*GcRule) ProtoMessage() {} +func (*GcRule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isGcRule_Rule interface { + isGcRule_Rule() +} + +type GcRule_MaxNumVersions struct { + MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,oneof"` +} +type GcRule_MaxAge struct { + MaxAge *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,oneof"` +} +type GcRule_Intersection_ struct { + Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"` +} +type GcRule_Union_ struct { + Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"` +} + +func (*GcRule_MaxNumVersions) isGcRule_Rule() {} +func (*GcRule_MaxAge) isGcRule_Rule() {} +func (*GcRule_Intersection_) isGcRule_Rule() {} +func (*GcRule_Union_) isGcRule_Rule() {} + +func (m *GcRule) GetRule() isGcRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *GcRule) GetMaxNumVersions() int32 { + if x, ok := m.GetRule().(*GcRule_MaxNumVersions); ok { + return x.MaxNumVersions + } + return 0 +} + +func (m *GcRule) GetMaxAge() *google_protobuf3.Duration { + if x, ok := m.GetRule().(*GcRule_MaxAge); ok { + return x.MaxAge + } + return nil +} + +func (m *GcRule) GetIntersection() *GcRule_Intersection { + if x, ok := m.GetRule().(*GcRule_Intersection_); ok { + return x.Intersection + } + return nil +} + +func (m *GcRule) GetUnion() *GcRule_Union { + if x, ok := m.GetRule().(*GcRule_Union_); ok { + return x.Union + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GcRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GcRule_OneofMarshaler, _GcRule_OneofUnmarshaler, _GcRule_OneofSizer, []interface{}{ + (*GcRule_MaxNumVersions)(nil), + (*GcRule_MaxAge)(nil), + (*GcRule_Intersection_)(nil), + (*GcRule_Union_)(nil), + } +} + +func _GcRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GcRule) + // rule + switch x := m.Rule.(type) { + case *GcRule_MaxNumVersions: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.MaxNumVersions)) + case *GcRule_MaxAge: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MaxAge); err != nil { + return err + } + case *GcRule_Intersection_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Intersection); err != nil { + return err + } + case *GcRule_Union_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Union); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GcRule.Rule has unexpected type %T", x) + } + return nil +} + +func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GcRule) + switch tag { + case 1: // rule.max_num_versions + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &GcRule_MaxNumVersions{int32(x)} + return true, err + case 2: // rule.max_age + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Duration) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_MaxAge{msg} + return true, err + case 3: // rule.intersection + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Intersection) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Intersection_{msg} + return true, err + case 4: // rule.union + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Union) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Union_{msg} + return true, err + default: + return false, nil + } +} + +func _GcRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GcRule) + // rule + switch x := m.Rule.(type) { + case *GcRule_MaxNumVersions: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.MaxNumVersions)) + case *GcRule_MaxAge: + s := proto.Size(x.MaxAge) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GcRule_Intersection_: + s := proto.Size(x.Intersection) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GcRule_Union_: + s := proto.Size(x.Union) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A GcRule which deletes cells matching all of the given rules. +type GcRule_Intersection struct { + // Only delete cells which would be deleted by every element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} } +func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) } +func (*GcRule_Intersection) ProtoMessage() {} +func (*GcRule_Intersection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *GcRule_Intersection) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +// A GcRule which deletes cells matching any of the given rules. +type GcRule_Union struct { + // Delete cells which would be deleted by any element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Union) Reset() { *m = GcRule_Union{} } +func (m *GcRule_Union) String() string { return proto.CompactTextString(m) } +func (*GcRule_Union) ProtoMessage() {} +func (*GcRule_Union) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +func (m *GcRule_Union) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterType((*Table)(nil), "google.bigtable.admin.table.v1.Table") + proto.RegisterType((*ColumnFamily)(nil), "google.bigtable.admin.table.v1.ColumnFamily") + proto.RegisterType((*GcRule)(nil), "google.bigtable.admin.table.v1.GcRule") + proto.RegisterType((*GcRule_Intersection)(nil), "google.bigtable.admin.table.v1.GcRule.Intersection") + proto.RegisterType((*GcRule_Union)(nil), "google.bigtable.admin.table.v1.GcRule.Union") + proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value) +} + +func init() { + proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_data.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 579 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x61, 0x6b, 0xd3, 0x40, + 0x18, 0xc7, 0x9b, 0xa5, 0xed, 0xd8, 0xb3, 0x3a, 0xeb, 0x29, 0x52, 0x0b, 0x4a, 0xc9, 0x40, 0x8a, + 0xc8, 0x85, 0x6d, 0xbe, 0x98, 0x53, 0x10, 0xbb, 0xcd, 0x6d, 0x32, 0x75, 0xc4, 0x29, 0x28, 0x42, + 0xb8, 0x66, 0xb7, 0x23, 0x98, 0xbb, 0x2b, 0x97, 0x5c, 0x69, 0x5f, 0xfb, 0xc6, 0x8f, 0xe2, 0xa7, + 0xf0, 0xb3, 0x49, 0xee, 0x2e, 0x35, 0x83, 0xe9, 0x26, 0xbe, 0x49, 0x9e, 0x3c, 0xf7, 0xfc, 0x7f, + 0xf7, 0xcf, 0xf3, 0x5c, 0x02, 0xdb, 0x4c, 0x4a, 0x96, 0xd1, 0x70, 0x9c, 0xb2, 0x82, 0x8c, 0x33, + 0x1a, 0x92, 0x33, 0x9e, 0x8a, 0xd0, 0xc6, 0xd3, 0x8d, 0x45, 0x3e, 0xb6, 0xd7, 0x33, 0x52, 0x10, + 0x3c, 0x51, 0xb2, 0x90, 0xe8, 0x81, 0x55, 0xe2, 0xaa, 0x02, 0x1b, 0x25, 0xb6, 0xf1, 0x74, 0xa3, + 0xbf, 0xee, 0xc8, 0x99, 0x14, 0x4c, 0x69, 0x21, 0x52, 0xc1, 0x42, 0x39, 0xa1, 0x8a, 0x14, 0xa9, + 0x14, 0xb9, 0x85, 0xf4, 0x1d, 0x24, 0x34, 0x4f, 0x63, 0x7d, 0x1e, 0x9e, 0x69, 0x5b, 0x60, 0xd7, + 0x83, 0x9f, 0x3e, 0xb4, 0x4e, 0x4b, 0x22, 0x42, 0xd0, 0x14, 0x84, 0xd3, 0x9e, 0x37, 0xf0, 0x86, + 0x2b, 0x91, 0x89, 0xd1, 0x6b, 0xb8, 0x95, 0x68, 0xa5, 0xa8, 0x28, 0xe2, 0x05, 0xb9, 0xb7, 0x34, + 0xf0, 0x86, 0xab, 0x9b, 0xf7, 0xb1, 0xb3, 0x57, 0xdb, 0x1e, 0xbf, 0xab, 0x8a, 0xa2, 0xae, 0xd3, + 0x2d, 0x32, 0x68, 0x0c, 0x37, 0x13, 0x99, 0x69, 0x2e, 0xe2, 0x73, 0xc2, 0xd3, 0x2c, 0xa5, 0x79, + 0xcf, 0x1f, 0xf8, 0xc3, 0xd5, 0xcd, 0xa7, 0xf8, 0xef, 0x2f, 0x8a, 0x8d, 0x3f, 0xbc, 0x6b, 0xc4, + 0xaf, 0x9c, 0x76, 0x5f, 0x14, 0x6a, 0x1e, 0xad, 0x25, 0x17, 0x92, 0xe8, 0x0b, 0xac, 0x32, 0x45, + 0x84, 0xce, 0x88, 0x4a, 0x8b, 0x79, 0xaf, 0x39, 0xf0, 0x86, 0x6b, 0x9b, 0x3b, 0xd7, 0xe3, 0x9f, + 0xa6, 0x9c, 0xe6, 0x05, 0xe1, 0x93, 0x83, 0xdf, 0x84, 0xa8, 0x8e, 0xeb, 0x4b, 0xb8, 0x7d, 0x89, + 0x09, 0xd4, 0x05, 0xff, 0x2b, 0x9d, 0xbb, 0xbe, 0x95, 0x21, 0x1a, 0x41, 0x6b, 0x4a, 0x32, 0x4d, + 0x5d, 0xab, 0x1e, 0x5f, 0x65, 0xa0, 0x46, 0x9d, 0x47, 0x56, 0xba, 0xb3, 0xb4, 0xed, 0x05, 0x01, + 0xdc, 0xb9, 0xcc, 0x15, 0x02, 0x68, 0xbf, 0x39, 0x3a, 0x3e, 0x3e, 0x7a, 0xdf, 0x6d, 0x04, 0xdf, + 0x3d, 0xe8, 0xd4, 0xf5, 0x97, 0xce, 0x71, 0x1d, 0x6e, 0xb0, 0x24, 0xa6, 0xb3, 0x89, 0xa2, 0x79, + 0x5e, 0xcd, 0x70, 0x25, 0xea, 0xb0, 0x64, 0x7f, 0x91, 0x43, 0x2f, 0x60, 0x99, 0x25, 0xb1, 0xd2, + 0x19, 0xed, 0xf9, 0xc6, 0xf7, 0xc3, 0xab, 0x7c, 0x1f, 0x24, 0x91, 0xce, 0x68, 0xd4, 0x66, 0xe6, + 0x1e, 0xfc, 0xf0, 0xa1, 0x6d, 0x53, 0xe8, 0x11, 0x74, 0x39, 0x99, 0xc5, 0x42, 0xf3, 0x78, 0x4a, + 0x55, 0x89, 0xcf, 0x8d, 0xa1, 0xd6, 0x61, 0x23, 0x5a, 0xe3, 0x64, 0xf6, 0x56, 0xf3, 0x8f, 0x2e, + 0x8f, 0x9e, 0xc0, 0x72, 0x59, 0x4b, 0x58, 0xd5, 0xaf, 0x7b, 0xd5, 0xbe, 0xd5, 0xa1, 0xc5, 0x7b, + 0xee, 0xd0, 0x1e, 0x36, 0xa2, 0x36, 0x27, 0xb3, 0x97, 0x8c, 0xa2, 0x4f, 0xd0, 0x49, 0x45, 0x41, + 0x55, 0x4e, 0x13, 0x73, 0x2a, 0xad, 0xe5, 0xad, 0xeb, 0x59, 0xc6, 0x47, 0x35, 0xe9, 0x61, 0x23, + 0xba, 0x80, 0x42, 0x7b, 0xd0, 0xd2, 0xa2, 0x64, 0x36, 0xaf, 0x37, 0x3e, 0xc7, 0xfc, 0x20, 0x2c, + 0xcc, 0x8a, 0xfb, 0xc7, 0xd0, 0xa9, 0xef, 0x82, 0x9e, 0x43, 0xab, 0xec, 0x6d, 0xd9, 0x07, 0xff, + 0x1f, 0x9a, 0x6b, 0x45, 0xfd, 0x7d, 0x68, 0x19, 0xfe, 0xff, 0x61, 0x46, 0x6d, 0x68, 0x96, 0xc1, + 0xe8, 0x9b, 0x07, 0x41, 0x22, 0xf9, 0x15, 0xe2, 0xd1, 0xdd, 0x91, 0x5b, 0x30, 0x9f, 0xc8, 0x1e, + 0x29, 0xc8, 0x49, 0x39, 0x92, 0x13, 0xef, 0xf3, 0xae, 0x53, 0x32, 0x99, 0x11, 0xc1, 0xb0, 0x54, + 0x2c, 0x64, 0x54, 0x98, 0x81, 0x85, 0x76, 0x89, 0x4c, 0xd2, 0xfc, 0x4f, 0x7f, 0xbd, 0x67, 0x26, + 0x18, 0xb7, 0x4d, 0xfd, 0xd6, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x80, 0x76, 0xdc, 0x24, + 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service.pb.go new file mode 100644 index 000000000..3d5049ab3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service.pb.go @@ -0,0 +1,423 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/table/v1/bigtable_table_service.proto + +package table + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BigtableTableService service + +type BigtableTableServiceClient interface { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) + // Lists the names of all tables served from a specified cluster. + ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) + // Gets the schema of the specified table, including its column families. + GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Creates a new column family within a specified table. + CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*ColumnFamily, error) + // Changes the configuration of a specified column family. + UpdateColumnFamily(ctx context.Context, in *ColumnFamily, opts ...grpc.CallOption) (*ColumnFamily, error) + // Permanently deletes a specified column family and all of its data. + DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Delete all rows in a table corresponding to a particular prefix + BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type bigtableTableServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClient { + return &bigtableTableServiceClient{cc} +} + +func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) { + out := new(Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) { + out := new(ListTablesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) { + out := new(Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*ColumnFamily, error) { + out := new(ColumnFamily) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *ColumnFamily, opts ...grpc.CallOption) (*ColumnFamily, error) { + out := new(ColumnFamily) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableServiceClient) BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/BulkDeleteRows", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableTableService service + +type BigtableTableServiceServer interface { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(context.Context, *CreateTableRequest) (*Table, error) + // Lists the names of all tables served from a specified cluster. + ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) + // Gets the schema of the specified table, including its column families. + GetTable(context.Context, *GetTableRequest) (*Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf2.Empty, error) + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + RenameTable(context.Context, *RenameTableRequest) (*google_protobuf2.Empty, error) + // Creates a new column family within a specified table. + CreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*ColumnFamily, error) + // Changes the configuration of a specified column family. + UpdateColumnFamily(context.Context, *ColumnFamily) (*ColumnFamily, error) + // Permanently deletes a specified column family and all of its data. + DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf2.Empty, error) + // Delete all rows in a table corresponding to a particular prefix + BulkDeleteRows(context.Context, *BulkDeleteRowsRequest) (*google_protobuf2.Empty, error) +} + +func RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) { + s.RegisterService(&_BigtableTableService_serviceDesc, srv) +} + +func _BigtableTableService_CreateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).CreateTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).CreateTable(ctx, req.(*CreateTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).ListTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).ListTables(ctx, req.(*ListTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_GetTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).GetTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).GetTable(ctx, req.(*GetTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).DeleteTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).DeleteTable(ctx, req.(*DeleteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_RenameTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RenameTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).RenameTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).RenameTable(ctx, req.(*RenameTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_CreateColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateColumnFamilyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).CreateColumnFamily(ctx, req.(*CreateColumnFamilyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_UpdateColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ColumnFamily) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).UpdateColumnFamily(ctx, req.(*ColumnFamily)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_DeleteColumnFamily_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteColumnFamilyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).DeleteColumnFamily(ctx, req.(*DeleteColumnFamilyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableService_BulkDeleteRows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableServiceServer).BulkDeleteRows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.table.v1.BigtableTableService/BulkDeleteRows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableServiceServer).BulkDeleteRows(ctx, req.(*BulkDeleteRowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BigtableTableService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.table.v1.BigtableTableService", + HandlerType: (*BigtableTableServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTable", + Handler: _BigtableTableService_CreateTable_Handler, + }, + { + MethodName: "ListTables", + Handler: _BigtableTableService_ListTables_Handler, + }, + { + MethodName: "GetTable", + Handler: _BigtableTableService_GetTable_Handler, + }, + { + MethodName: "DeleteTable", + Handler: _BigtableTableService_DeleteTable_Handler, + }, + { + MethodName: "RenameTable", + Handler: _BigtableTableService_RenameTable_Handler, + }, + { + MethodName: "CreateColumnFamily", + Handler: _BigtableTableService_CreateColumnFamily_Handler, + }, + { + MethodName: "UpdateColumnFamily", + Handler: _BigtableTableService_UpdateColumnFamily_Handler, + }, + { + MethodName: "DeleteColumnFamily", + Handler: _BigtableTableService_DeleteColumnFamily_Handler, + }, + { + MethodName: "BulkDeleteRows", + Handler: _BigtableTableService_BulkDeleteRows_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/bigtable/admin/table/v1/bigtable_table_service.proto", +} + +func init() { + proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 560 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0xbf, 0x6f, 0xd4, 0x30, + 0x14, 0xc7, 0x65, 0x06, 0x84, 0x7c, 0x88, 0xc1, 0x42, 0x0c, 0x07, 0x62, 0x88, 0xc4, 0x12, 0xa1, + 0x58, 0xb9, 0x82, 0x68, 0xaf, 0xaa, 0x84, 0x72, 0xd7, 0x56, 0xa2, 0x20, 0x95, 0x03, 0x16, 0x96, + 0xca, 0xc9, 0x3d, 0xa2, 0x40, 0x62, 0x87, 0xd8, 0x39, 0x54, 0x50, 0x17, 0x16, 0xfe, 0x00, 0x58, + 0x61, 0x62, 0x64, 0x82, 0x1d, 0x76, 0x56, 0xfe, 0x05, 0xfe, 0x10, 0x14, 0x3b, 0xa1, 0x69, 0xf9, + 0xe1, 0x73, 0xd5, 0x25, 0xe7, 0xb3, 0xbf, 0xdf, 0xf7, 0x3e, 0xcf, 0x7e, 0x96, 0xf1, 0x7a, 0x2a, + 0x44, 0x9a, 0x03, 0x8d, 0xb3, 0x54, 0xb1, 0x38, 0x07, 0xca, 0xe6, 0x45, 0xc6, 0xa9, 0x19, 0x2f, + 0xc2, 0xdf, 0xf3, 0x7b, 0xe6, 0x2b, 0xa1, 0x5a, 0x64, 0x09, 0x04, 0x65, 0x25, 0x94, 0x20, 0x57, + 0x8d, 0x39, 0xe8, 0x44, 0x81, 0x36, 0x07, 0x66, 0xbc, 0x08, 0x87, 0x57, 0xda, 0xe0, 0xac, 0xcc, + 0x28, 0xe3, 0x5c, 0x28, 0xa6, 0x32, 0xc1, 0xa5, 0x71, 0x0f, 0x57, 0xdd, 0x52, 0xcf, 0x99, 0x62, + 0xad, 0x73, 0x7a, 0x22, 0xe8, 0xbd, 0x02, 0xa4, 0x64, 0x29, 0x74, 0xf9, 0x2f, 0xb7, 0x51, 0xf4, + 0xbf, 0xb8, 0x7e, 0x42, 0xa1, 0x28, 0xd5, 0xbe, 0x59, 0x1c, 0x7d, 0x3d, 0x8f, 0x2f, 0x46, 0x6d, + 0x98, 0x87, 0xcd, 0xe7, 0x81, 0x09, 0x42, 0x3e, 0x22, 0x3c, 0x98, 0x54, 0xc0, 0x94, 0x99, 0x26, + 0xa3, 0xe0, 0xff, 0x9b, 0x10, 0xf4, 0xc4, 0x33, 0x78, 0x5e, 0x83, 0x54, 0xc3, 0x6b, 0x36, 0x8f, + 0x56, 0x7b, 0xe3, 0xd7, 0x3f, 0x7e, 0xbe, 0x3d, 0x73, 0xc3, 0xa3, 0x4d, 0x4d, 0xaf, 0x38, 0x2b, + 0x60, 0xa3, 0xac, 0xc4, 0x53, 0x48, 0x94, 0xa4, 0x3e, 0x7d, 0x29, 0x38, 0x34, 0xbf, 0x49, 0x5e, + 0x4b, 0x05, 0x95, 0xa4, 0xfe, 0x81, 0xd9, 0x01, 0x39, 0x46, 0x3e, 0xf9, 0x84, 0x30, 0xbe, 0x9b, + 0x49, 0xa5, 0x23, 0x49, 0x12, 0xda, 0x32, 0x1e, 0x6a, 0x3b, 0xc8, 0x91, 0x8b, 0x45, 0x96, 0x82, + 0x4b, 0xf0, 0x6e, 0x69, 0xe2, 0x90, 0xb8, 0x12, 0x93, 0xf7, 0x08, 0x9f, 0xdb, 0x06, 0x13, 0x8e, + 0x50, 0x5b, 0xe6, 0x4e, 0xe9, 0xb8, 0x9f, 0x6b, 0x9a, 0x6e, 0x85, 0x84, 0x4b, 0xd2, 0xb5, 0x70, + 0xd4, 0x3f, 0x20, 0xef, 0x10, 0x1e, 0x4c, 0x21, 0x87, 0xa5, 0x4f, 0xbd, 0x27, 0xee, 0x28, 0x2f, + 0x75, 0x9e, 0xae, 0xe1, 0x82, 0xcd, 0xa6, 0xe1, 0x3a, 0x2c, 0xff, 0x04, 0x58, 0x1f, 0x10, 0x1e, + 0xcc, 0xa0, 0xb1, 0x2c, 0x89, 0xd5, 0x13, 0xdb, 0xb0, 0x26, 0x1a, 0x6b, 0xc3, 0x5b, 0x75, 0xc6, + 0x1a, 0x57, 0x3a, 0x4b, 0xd3, 0x86, 0xdf, 0x11, 0x26, 0xe6, 0x02, 0x4c, 0x44, 0x5e, 0x17, 0x7c, + 0x8b, 0x15, 0x59, 0xbe, 0x4f, 0xd6, 0x96, 0xbb, 0x34, 0x7d, 0x4f, 0x87, 0x7b, 0xdd, 0x6a, 0xed, + 0x99, 0xbc, 0x1d, 0x5d, 0xc4, 0xa6, 0x77, 0xdb, 0xb9, 0x08, 0x9a, 0x1c, 0xc6, 0xc9, 0xcc, 0x9d, + 0xfa, 0x86, 0x30, 0x79, 0x54, 0xce, 0x8f, 0x17, 0xe3, 0x44, 0xe4, 0xc8, 0x7f, 0x4f, 0xf3, 0x6f, + 0x0f, 0x23, 0x57, 0xfe, 0x63, 0xf8, 0xcd, 0xa9, 0x20, 0x9f, 0x7c, 0x41, 0x98, 0x98, 0xce, 0x74, + 0x3b, 0x8e, 0x3f, 0x3d, 0xb6, 0xee, 0xb9, 0xa3, 0xc1, 0xa7, 0xfe, 0x29, 0x80, 0x93, 0xcf, 0x08, + 0x5f, 0x88, 0xea, 0xfc, 0x99, 0xa1, 0x98, 0x89, 0x17, 0x92, 0xdc, 0xb4, 0x11, 0x1f, 0xd5, 0xdb, + 0x68, 0xef, 0x6b, 0xda, 0x1d, 0x6f, 0x4b, 0xd3, 0x9a, 0x57, 0xc3, 0xa9, 0xe3, 0xe3, 0x23, 0xe9, + 0xc6, 0xc8, 0x8f, 0xde, 0x20, 0xec, 0x25, 0xa2, 0xb0, 0x70, 0x46, 0xc3, 0xbf, 0x3d, 0x32, 0x72, + 0xb7, 0xc1, 0xdb, 0x45, 0x8f, 0x27, 0xad, 0x3b, 0x15, 0x39, 0xe3, 0x69, 0x20, 0xaa, 0x94, 0xa6, + 0xc0, 0x35, 0x3c, 0x35, 0x4b, 0xac, 0xcc, 0xe4, 0xbf, 0xde, 0xc1, 0x75, 0x3d, 0x88, 0xcf, 0x6a, + 0xfd, 0xca, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x65, 0xb4, 0xe0, 0xeb, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service_messages.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service_messages.pb.go new file mode 100644 index 000000000..9d9ad71ed --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/table/v1/bigtable_table_service_messages.pb.go @@ -0,0 +1,400 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto + +package table + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type CreateTableRequest struct { + // The unique name of the cluster in which to create the new table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name by which the new table should be referred to within the cluster, + // e.g. "foobar" rather than "/tables/foobar". + TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId" json:"table_id,omitempty"` + // The Table to create. The `name` field of the Table and all of its + // ColumnFamilies must be left blank, and will be populated in the response. + Table *Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + InitialSplitKeys []string `protobuf:"bytes,4,rep,name=initial_split_keys,json=initialSplitKeys" json:"initial_split_keys,omitempty"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} +func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *CreateTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateTableRequest) GetTableId() string { + if m != nil { + return m.TableId + } + return "" +} + +func (m *CreateTableRequest) GetTable() *Table { + if m != nil { + return m.Table + } + return nil +} + +func (m *CreateTableRequest) GetInitialSplitKeys() []string { + if m != nil { + return m.InitialSplitKeys + } + return nil +} + +type ListTablesRequest struct { + // The unique name of the cluster for which tables should be listed. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} } +func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTablesRequest) ProtoMessage() {} +func (*ListTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListTablesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type ListTablesResponse struct { + // The tables present in the requested cluster. + // At present, only the names of the tables are populated. + Tables []*Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` +} + +func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } +func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTablesResponse) ProtoMessage() {} +func (*ListTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *ListTablesResponse) GetTables() []*Table { + if m != nil { + return m.Tables + } + return nil +} + +type GetTableRequest struct { + // The unique name of the requested table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetTableRequest) Reset() { *m = GetTableRequest{} } +func (m *GetTableRequest) String() string { return proto.CompactTextString(m) } +func (*GetTableRequest) ProtoMessage() {} +func (*GetTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *GetTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type DeleteTableRequest struct { + // The unique name of the table to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} +func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *DeleteTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type RenameTableRequest struct { + // The current unique name of the table. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The new name by which the table should be referred to within its containing + // cluster, e.g. "foobar" rather than "/tables/foobar". + NewId string `protobuf:"bytes,2,opt,name=new_id,json=newId" json:"new_id,omitempty"` +} + +func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} } +func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) } +func (*RenameTableRequest) ProtoMessage() {} +func (*RenameTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *RenameTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RenameTableRequest) GetNewId() string { + if m != nil { + return m.NewId + } + return "" +} + +type CreateColumnFamilyRequest struct { + // The unique name of the table in which to create the new column family. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name by which the new column family should be referred to within the + // table, e.g. "foobar" rather than "/columnFamilies/foobar". + ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id,json=columnFamilyId" json:"column_family_id,omitempty"` + // The column family to create. The `name` field must be left blank. + ColumnFamily *ColumnFamily `protobuf:"bytes,3,opt,name=column_family,json=columnFamily" json:"column_family,omitempty"` +} + +func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} } +func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateColumnFamilyRequest) ProtoMessage() {} +func (*CreateColumnFamilyRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *CreateColumnFamilyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateColumnFamilyRequest) GetColumnFamilyId() string { + if m != nil { + return m.ColumnFamilyId + } + return "" +} + +func (m *CreateColumnFamilyRequest) GetColumnFamily() *ColumnFamily { + if m != nil { + return m.ColumnFamily + } + return nil +} + +type DeleteColumnFamilyRequest struct { + // The unique name of the column family to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} } +func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteColumnFamilyRequest) ProtoMessage() {} +func (*DeleteColumnFamilyRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *DeleteColumnFamilyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type BulkDeleteRowsRequest struct { + // The unique name of the table on which to perform the bulk delete + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // Types that are valid to be assigned to Target: + // *BulkDeleteRowsRequest_RowKeyPrefix + // *BulkDeleteRowsRequest_DeleteAllDataFromTable + Target isBulkDeleteRowsRequest_Target `protobuf_oneof:"target"` +} + +func (m *BulkDeleteRowsRequest) Reset() { *m = BulkDeleteRowsRequest{} } +func (m *BulkDeleteRowsRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRowsRequest) ProtoMessage() {} +func (*BulkDeleteRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +type isBulkDeleteRowsRequest_Target interface { + isBulkDeleteRowsRequest_Target() +} + +type BulkDeleteRowsRequest_RowKeyPrefix struct { + RowKeyPrefix []byte `protobuf:"bytes,2,opt,name=row_key_prefix,json=rowKeyPrefix,proto3,oneof"` +} +type BulkDeleteRowsRequest_DeleteAllDataFromTable struct { + DeleteAllDataFromTable bool `protobuf:"varint,3,opt,name=delete_all_data_from_table,json=deleteAllDataFromTable,oneof"` +} + +func (*BulkDeleteRowsRequest_RowKeyPrefix) isBulkDeleteRowsRequest_Target() {} +func (*BulkDeleteRowsRequest_DeleteAllDataFromTable) isBulkDeleteRowsRequest_Target() {} + +func (m *BulkDeleteRowsRequest) GetTarget() isBulkDeleteRowsRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BulkDeleteRowsRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *BulkDeleteRowsRequest) GetRowKeyPrefix() []byte { + if x, ok := m.GetTarget().(*BulkDeleteRowsRequest_RowKeyPrefix); ok { + return x.RowKeyPrefix + } + return nil +} + +func (m *BulkDeleteRowsRequest) GetDeleteAllDataFromTable() bool { + if x, ok := m.GetTarget().(*BulkDeleteRowsRequest_DeleteAllDataFromTable); ok { + return x.DeleteAllDataFromTable + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BulkDeleteRowsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BulkDeleteRowsRequest_OneofMarshaler, _BulkDeleteRowsRequest_OneofUnmarshaler, _BulkDeleteRowsRequest_OneofSizer, []interface{}{ + (*BulkDeleteRowsRequest_RowKeyPrefix)(nil), + (*BulkDeleteRowsRequest_DeleteAllDataFromTable)(nil), + } +} + +func _BulkDeleteRowsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BulkDeleteRowsRequest) + // target + switch x := m.Target.(type) { + case *BulkDeleteRowsRequest_RowKeyPrefix: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKeyPrefix) + case *BulkDeleteRowsRequest_DeleteAllDataFromTable: + t := uint64(0) + if x.DeleteAllDataFromTable { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("BulkDeleteRowsRequest.Target has unexpected type %T", x) + } + return nil +} + +func _BulkDeleteRowsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BulkDeleteRowsRequest) + switch tag { + case 2: // target.row_key_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Target = &BulkDeleteRowsRequest_RowKeyPrefix{x} + return true, err + case 3: // target.delete_all_data_from_table + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Target = &BulkDeleteRowsRequest_DeleteAllDataFromTable{x != 0} + return true, err + default: + return false, nil + } +} + +func _BulkDeleteRowsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BulkDeleteRowsRequest) + // target + switch x := m.Target.(type) { + case *BulkDeleteRowsRequest_RowKeyPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RowKeyPrefix))) + n += len(x.RowKeyPrefix) + case *BulkDeleteRowsRequest_DeleteAllDataFromTable: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*CreateTableRequest)(nil), "google.bigtable.admin.table.v1.CreateTableRequest") + proto.RegisterType((*ListTablesRequest)(nil), "google.bigtable.admin.table.v1.ListTablesRequest") + proto.RegisterType((*ListTablesResponse)(nil), "google.bigtable.admin.table.v1.ListTablesResponse") + proto.RegisterType((*GetTableRequest)(nil), "google.bigtable.admin.table.v1.GetTableRequest") + proto.RegisterType((*DeleteTableRequest)(nil), "google.bigtable.admin.table.v1.DeleteTableRequest") + proto.RegisterType((*RenameTableRequest)(nil), "google.bigtable.admin.table.v1.RenameTableRequest") + proto.RegisterType((*CreateColumnFamilyRequest)(nil), "google.bigtable.admin.table.v1.CreateColumnFamilyRequest") + proto.RegisterType((*DeleteColumnFamilyRequest)(nil), "google.bigtable.admin.table.v1.DeleteColumnFamilyRequest") + proto.RegisterType((*BulkDeleteRowsRequest)(nil), "google.bigtable.admin.table.v1.BulkDeleteRowsRequest") +} + +func init() { + proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service_messages.proto", fileDescriptor2) +} + +var fileDescriptor2 = []byte{ + // 514 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0x49, 0x1b, 0x92, 0x21, 0x94, 0xb2, 0x52, 0x51, 0x52, 0x09, 0x14, 0x56, 0x2a, 0xe4, + 0x50, 0xd9, 0x2a, 0x5c, 0x90, 0x0a, 0x42, 0x24, 0x51, 0x69, 0x54, 0x40, 0xc1, 0xe1, 0xc4, 0xc5, + 0xda, 0xc4, 0x13, 0x6b, 0xd5, 0xb5, 0x37, 0xec, 0x6e, 0x12, 0xf2, 0x13, 0x7c, 0x06, 0x27, 0xc4, + 0x37, 0x22, 0xef, 0x9a, 0x26, 0x3d, 0x10, 0x97, 0x8b, 0x35, 0x9e, 0x79, 0xf3, 0x66, 0xf6, 0xcd, + 0x0c, 0xf4, 0x13, 0x29, 0x13, 0x81, 0xc1, 0x98, 0x27, 0x86, 0x8d, 0x05, 0x06, 0x2c, 0x4e, 0x79, + 0x16, 0x38, 0x7b, 0x71, 0x7a, 0xed, 0x8f, 0xdc, 0x57, 0xa3, 0x5a, 0xf0, 0x09, 0x46, 0x29, 0x6a, + 0xcd, 0x12, 0xd4, 0xfe, 0x4c, 0x49, 0x23, 0xc9, 0x13, 0xc7, 0xe2, 0xff, 0x45, 0xfb, 0x96, 0xc5, + 0x77, 0xf6, 0xe2, 0xf4, 0xe8, 0xd5, 0xff, 0x55, 0x89, 0x99, 0x61, 0x8e, 0x99, 0xfe, 0xf6, 0x80, + 0xf4, 0x14, 0x32, 0x83, 0x5f, 0xf2, 0x50, 0x88, 0xdf, 0xe6, 0xa8, 0x0d, 0x21, 0xb0, 0x9b, 0xb1, + 0x14, 0x9b, 0x5e, 0xdb, 0xeb, 0xd4, 0x43, 0x6b, 0x93, 0x16, 0xd4, 0x5c, 0x3a, 0x8f, 0x9b, 0x77, + 0xac, 0xff, 0xae, 0xfd, 0x1f, 0xc4, 0xe4, 0x0c, 0xf6, 0xac, 0xd9, 0xac, 0xb4, 0xbd, 0xce, 0xbd, + 0x17, 0xc7, 0xfe, 0xf6, 0x7e, 0x7d, 0x57, 0xcb, 0xe5, 0x90, 0x13, 0x20, 0x3c, 0xe3, 0x86, 0x33, + 0x11, 0xe9, 0x99, 0xe0, 0x26, 0xba, 0xc2, 0x95, 0x6e, 0xee, 0xb6, 0x2b, 0x9d, 0x7a, 0x78, 0x50, + 0x44, 0x46, 0x79, 0xe0, 0x12, 0x57, 0x9a, 0x3e, 0x87, 0x87, 0x1f, 0xb8, 0x36, 0x96, 0x41, 0x6f, + 0x69, 0x97, 0x8e, 0x80, 0x6c, 0x02, 0xf5, 0x4c, 0x66, 0x1a, 0xc9, 0x1b, 0xa8, 0xda, 0xaa, 0xba, + 0xe9, 0xb5, 0x2b, 0xb7, 0x6f, 0xb5, 0x48, 0xa2, 0xc7, 0xf0, 0xe0, 0x3d, 0x9a, 0x32, 0xa9, 0x68, + 0x07, 0x48, 0x1f, 0x05, 0x96, 0x8b, 0x4a, 0xdf, 0x02, 0x09, 0x31, 0xb7, 0x4a, 0xe5, 0x3f, 0x84, + 0x6a, 0x86, 0xcb, 0xb5, 0xf8, 0x7b, 0x19, 0x2e, 0x07, 0x31, 0xfd, 0xe5, 0x41, 0xcb, 0x0d, 0xb0, + 0x27, 0xc5, 0x3c, 0xcd, 0xce, 0x59, 0xca, 0xc5, 0x6a, 0x1b, 0x51, 0x07, 0x0e, 0x26, 0x16, 0x1a, + 0x4d, 0x2d, 0x76, 0x4d, 0xb9, 0x3f, 0xd9, 0xa0, 0x18, 0xc4, 0xe4, 0x33, 0xdc, 0xbf, 0x81, 0x2c, + 0xc6, 0x7b, 0x52, 0xa6, 0xd9, 0x8d, 0x4e, 0x1a, 0x9b, 0xa4, 0x34, 0x80, 0x96, 0x53, 0xe6, 0x96, + 0xdd, 0xd2, 0x9f, 0x1e, 0x1c, 0x76, 0xe7, 0xe2, 0xca, 0x65, 0x85, 0x72, 0x79, 0x3d, 0xf4, 0xc7, + 0x00, 0x6e, 0x1f, 0x37, 0x72, 0xea, 0xd6, 0xf3, 0x29, 0x7f, 0xe6, 0x33, 0xd8, 0x57, 0x72, 0x99, + 0x2f, 0x53, 0x34, 0x53, 0x38, 0xe5, 0xdf, 0xed, 0x23, 0x1b, 0x17, 0x3b, 0x61, 0x43, 0xc9, 0xe5, + 0x25, 0xae, 0x86, 0xd6, 0x4b, 0x5e, 0xc3, 0x51, 0x6c, 0xb9, 0x23, 0x26, 0x84, 0x3d, 0x8d, 0x68, + 0xaa, 0x64, 0x1a, 0xad, 0x17, 0xba, 0x76, 0xb1, 0x13, 0x3e, 0x72, 0x98, 0x77, 0x42, 0xf4, 0x99, + 0x61, 0xe7, 0x4a, 0xa6, 0x76, 0x60, 0xdd, 0x5a, 0xbe, 0x4f, 0x2a, 0x41, 0xd3, 0xfd, 0xe1, 0x01, + 0x9d, 0xc8, 0xb4, 0x44, 0x9b, 0xee, 0xd3, 0x6e, 0x11, 0xb0, 0xf9, 0x23, 0x77, 0xef, 0x1f, 0x8b, + 0x73, 0x1f, 0xe6, 0x37, 0x39, 0xf4, 0xbe, 0xf6, 0x0a, 0x92, 0x44, 0x0a, 0x96, 0x25, 0xbe, 0x54, + 0x49, 0x90, 0x60, 0x66, 0x2f, 0x36, 0x70, 0x21, 0x36, 0xe3, 0xfa, 0x5f, 0xe7, 0x7e, 0x66, 0x8d, + 0x71, 0xd5, 0xe2, 0x5f, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x08, 0x29, 0x16, 0x83, 0x04, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go new file mode 100644 index 000000000..62f859169 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -0,0 +1,962 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/v2/bigtable_instance_admin.proto + +/* +Package admin is a generated protocol buffer package. + +It is generated from these files: + google/bigtable/admin/v2/bigtable_instance_admin.proto + google/bigtable/admin/v2/bigtable_table_admin.proto + google/bigtable/admin/v2/common.proto + google/bigtable/admin/v2/instance.proto + google/bigtable/admin/v2/table.proto + +It has these top-level messages: + CreateInstanceRequest + GetInstanceRequest + ListInstancesRequest + ListInstancesResponse + DeleteInstanceRequest + CreateClusterRequest + GetClusterRequest + ListClustersRequest + ListClustersResponse + DeleteClusterRequest + CreateInstanceMetadata + CreateClusterMetadata + UpdateClusterMetadata + CreateTableRequest + DropRowRangeRequest + ListTablesRequest + ListTablesResponse + GetTableRequest + DeleteTableRequest + ModifyColumnFamiliesRequest + Instance + Cluster + Table + ColumnFamily + GcRule +*/ +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request message for BigtableInstanceAdmin.CreateInstance. +type CreateInstanceRequest struct { + // The unique name of the project in which to create the new instance. + // Values are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The ID to be used when referring to the new instance within its project, + // e.g., just `myinstance` rather than + // `projects/myproject/instances/myinstance`. + InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` + // The instance to create. + // Fields marked `OutputOnly` must be left blank. + Instance *Instance `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + // The clusters to be created within the instance, mapped by desired + // cluster ID, e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + // Fields marked `OutputOnly` must be left blank. + // Currently exactly one cluster must be specified. + Clusters map[string]*Cluster `protobuf:"bytes,4,rep,name=clusters" json:"clusters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *CreateInstanceRequest) Reset() { *m = CreateInstanceRequest{} } +func (m *CreateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceRequest) ProtoMessage() {} +func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CreateInstanceRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *CreateInstanceRequest) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *CreateInstanceRequest) GetClusters() map[string]*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +// Request message for BigtableInstanceAdmin.GetInstance. +type GetInstanceRequest struct { + // The unique name of the requested instance. Values are of the form + // `projects//instances/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} } +func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceRequest) ProtoMessage() {} +func (*GetInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for BigtableInstanceAdmin.ListInstances. +type ListInstancesRequest struct { + // The unique name of the project for which a list of instances is requested. + // Values are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The value of `next_page_token` returned by a previous call. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} } +func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstancesRequest) ProtoMessage() {} +func (*ListInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListInstancesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstancesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for BigtableInstanceAdmin.ListInstances. +type ListInstancesResponse struct { + // The list of requested instances. + Instances []*Instance `protobuf:"bytes,1,rep,name=instances" json:"instances,omitempty"` + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from `instances`, and Instances with at least one + // Cluster in a failed location may only have partial information returned. + FailedLocations []string `protobuf:"bytes,2,rep,name=failed_locations,json=failedLocations" json:"failed_locations,omitempty"` + // Set if not all instances could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} } +func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstancesResponse) ProtoMessage() {} +func (*ListInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListInstancesResponse) GetInstances() []*Instance { + if m != nil { + return m.Instances + } + return nil +} + +func (m *ListInstancesResponse) GetFailedLocations() []string { + if m != nil { + return m.FailedLocations + } + return nil +} + +func (m *ListInstancesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +type DeleteInstanceRequest struct { + // The unique name of the instance to be deleted. + // Values are of the form `projects//instances/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} } +func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceRequest) ProtoMessage() {} +func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *DeleteInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +type CreateClusterRequest struct { + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The ID to be used when referring to the new cluster within its instance, + // e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The cluster to be created. + // Fields marked `OutputOnly` must be left blank. + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateClusterRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// Request message for BigtableInstanceAdmin.GetCluster. +type GetClusterRequest struct { + // The unique name of the requested cluster. Values are of the form + // `projects//instances//clusters/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GetClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for BigtableInstanceAdmin.ListClusters. +type ListClustersRequest struct { + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form `projects//instances/`. + // Use ` = '-'` to list Clusters for all Instances in a project, + // e.g., `projects/myproject/instances/-`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The value of `next_page_token` returned by a previous call. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListClustersRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for BigtableInstanceAdmin.ListClusters. +type ListClustersResponse struct { + // The list of requested clusters. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from `clusters`, + // or may only have partial information returned. + FailedLocations []string `protobuf:"bytes,2,rep,name=failed_locations,json=failedLocations" json:"failed_locations,omitempty"` + // Set if not all clusters could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetFailedLocations() []string { + if m != nil { + return m.FailedLocations + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +type DeleteClusterRequest struct { + // The unique name of the cluster to be deleted. Values are of the form + // `projects//instances//clusters/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeleteClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The metadata for the Operation returned by CreateInstance. +type CreateInstanceMetadata struct { + // The request that prompted the initiation of this CreateInstance operation. + OriginalRequest *CreateInstanceRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The time at which the original request was received. + RequestTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which the operation failed or was completed successfully. + FinishTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *CreateInstanceMetadata) Reset() { *m = CreateInstanceMetadata{} } +func (m *CreateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceMetadata) ProtoMessage() {} +func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CreateInstanceMetadata) GetOriginalRequest() *CreateInstanceRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *CreateInstanceMetadata) GetRequestTime() *google_protobuf1.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *CreateInstanceMetadata) GetFinishTime() *google_protobuf1.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +// The metadata for the Operation returned by CreateCluster. +type CreateClusterMetadata struct { + // The request that prompted the initiation of this CreateCluster operation. + OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The time at which the original request was received. + RequestTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which the operation failed or was completed successfully. + FinishTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} } +func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateClusterMetadata) ProtoMessage() {} +func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *CreateClusterMetadata) GetRequestTime() *google_protobuf1.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *CreateClusterMetadata) GetFinishTime() *google_protobuf1.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +// The metadata for the Operation returned by UpdateCluster. +type UpdateClusterMetadata struct { + // The request that prompted the initiation of this UpdateCluster operation. + OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The time at which the original request was received. + RequestTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"` + // The time at which the operation failed or was completed successfully. + FinishTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` +} + +func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} } +func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterMetadata) ProtoMessage() {} +func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *UpdateClusterMetadata) GetOriginalRequest() *Cluster { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *UpdateClusterMetadata) GetRequestTime() *google_protobuf1.Timestamp { + if m != nil { + return m.RequestTime + } + return nil +} + +func (m *UpdateClusterMetadata) GetFinishTime() *google_protobuf1.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +func init() { + proto.RegisterType((*CreateInstanceRequest)(nil), "google.bigtable.admin.v2.CreateInstanceRequest") + proto.RegisterType((*GetInstanceRequest)(nil), "google.bigtable.admin.v2.GetInstanceRequest") + proto.RegisterType((*ListInstancesRequest)(nil), "google.bigtable.admin.v2.ListInstancesRequest") + proto.RegisterType((*ListInstancesResponse)(nil), "google.bigtable.admin.v2.ListInstancesResponse") + proto.RegisterType((*DeleteInstanceRequest)(nil), "google.bigtable.admin.v2.DeleteInstanceRequest") + proto.RegisterType((*CreateClusterRequest)(nil), "google.bigtable.admin.v2.CreateClusterRequest") + proto.RegisterType((*GetClusterRequest)(nil), "google.bigtable.admin.v2.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.bigtable.admin.v2.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.bigtable.admin.v2.ListClustersResponse") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.bigtable.admin.v2.DeleteClusterRequest") + proto.RegisterType((*CreateInstanceMetadata)(nil), "google.bigtable.admin.v2.CreateInstanceMetadata") + proto.RegisterType((*CreateClusterMetadata)(nil), "google.bigtable.admin.v2.CreateClusterMetadata") + proto.RegisterType((*UpdateClusterMetadata)(nil), "google.bigtable.admin.v2.UpdateClusterMetadata") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BigtableInstanceAdmin service + +type BigtableInstanceAdminClient interface { + // Create an instance within a project. + CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets information about an instance. + GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) + // Lists information about instances in a project. + ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) + // Updates an instance within a project. + UpdateInstance(ctx context.Context, in *Instance, opts ...grpc.CallOption) (*Instance, error) + // Delete an instance from a project. + DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Creates a cluster within an instance. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets information about a cluster. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Lists information about clusters in an instance. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Updates a cluster within an instance. + UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes a cluster from an instance. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) +} + +type bigtableInstanceAdminClient struct { + cc *grpc.ClientConn +} + +func NewBigtableInstanceAdminClient(cc *grpc.ClientConn) BigtableInstanceAdminClient { + return &bigtableInstanceAdminClient{cc} +} + +func (c *bigtableInstanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) { + out := new(ListInstancesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) UpdateInstance(ctx context.Context, in *Instance, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableInstanceAdminClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableInstanceAdmin service + +type BigtableInstanceAdminServer interface { + // Create an instance within a project. + CreateInstance(context.Context, *CreateInstanceRequest) (*google_longrunning.Operation, error) + // Gets information about an instance. + GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) + // Lists information about instances in a project. + ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) + // Updates an instance within a project. + UpdateInstance(context.Context, *Instance) (*Instance, error) + // Delete an instance from a project. + DeleteInstance(context.Context, *DeleteInstanceRequest) (*google_protobuf3.Empty, error) + // Creates a cluster within an instance. + CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error) + // Gets information about a cluster. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Lists information about clusters in an instance. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Updates a cluster within an instance. + UpdateCluster(context.Context, *Cluster) (*google_longrunning.Operation, error) + // Deletes a cluster from an instance. + DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf3.Empty, error) +} + +func RegisterBigtableInstanceAdminServer(s *grpc.Server, srv BigtableInstanceAdminServer) { + s.RegisterService(&_BigtableInstanceAdmin_serviceDesc, srv) +} + +func _BigtableInstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).CreateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).GetInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstancesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).ListInstances(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Instance) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).UpdateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).UpdateInstance(ctx, req.(*Instance)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).DeleteInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Cluster) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).UpdateCluster(ctx, req.(*Cluster)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableInstanceAdmin_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableInstanceAdminServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableInstanceAdminServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BigtableInstanceAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.v2.BigtableInstanceAdmin", + HandlerType: (*BigtableInstanceAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateInstance", + Handler: _BigtableInstanceAdmin_CreateInstance_Handler, + }, + { + MethodName: "GetInstance", + Handler: _BigtableInstanceAdmin_GetInstance_Handler, + }, + { + MethodName: "ListInstances", + Handler: _BigtableInstanceAdmin_ListInstances_Handler, + }, + { + MethodName: "UpdateInstance", + Handler: _BigtableInstanceAdmin_UpdateInstance_Handler, + }, + { + MethodName: "DeleteInstance", + Handler: _BigtableInstanceAdmin_DeleteInstance_Handler, + }, + { + MethodName: "CreateCluster", + Handler: _BigtableInstanceAdmin_CreateCluster_Handler, + }, + { + MethodName: "GetCluster", + Handler: _BigtableInstanceAdmin_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _BigtableInstanceAdmin_ListClusters_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _BigtableInstanceAdmin_UpdateCluster_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _BigtableInstanceAdmin_DeleteCluster_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/bigtable/admin/v2/bigtable_instance_admin.proto", +} + +func init() { + proto.RegisterFile("google/bigtable/admin/v2/bigtable_instance_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1026 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xc1, 0x6f, 0x1b, 0xc5, + 0x17, 0xd6, 0xd8, 0xfd, 0xf5, 0x57, 0xbf, 0xad, 0x93, 0x30, 0xc4, 0x91, 0xb5, 0xb4, 0x34, 0xdd, + 0x4a, 0xad, 0xeb, 0x86, 0x5d, 0x61, 0x10, 0x41, 0x89, 0x8c, 0x20, 0xa1, 0x8a, 0x22, 0xa5, 0x22, + 0xb2, 0x0a, 0x52, 0x7b, 0xc0, 0x9a, 0xd8, 0x93, 0x65, 0xe9, 0x7a, 0x76, 0xd9, 0x1d, 0x47, 0x44, + 0xa8, 0x17, 0x84, 0x38, 0x54, 0x82, 0x03, 0x1c, 0x81, 0x13, 0x17, 0x84, 0xf8, 0x4f, 0x38, 0x72, + 0xe4, 0x84, 0xc4, 0x19, 0xf1, 0x27, 0xa0, 0xd9, 0x99, 0x59, 0x7b, 0x9d, 0xb5, 0x77, 0x2d, 0x84, + 0xd4, 0x9b, 0x77, 0xe6, 0xbd, 0x37, 0xdf, 0xfb, 0xde, 0x37, 0xef, 0x8d, 0xe1, 0x0d, 0x37, 0x08, + 0x5c, 0x9f, 0x3a, 0x27, 0x9e, 0xcb, 0xc9, 0x89, 0x4f, 0x1d, 0x32, 0x1c, 0x79, 0xcc, 0x39, 0xeb, + 0xa4, 0x2b, 0x7d, 0x8f, 0xc5, 0x9c, 0xb0, 0x01, 0xed, 0x27, 0x5b, 0x76, 0x18, 0x05, 0x3c, 0xc0, + 0x4d, 0xe9, 0x67, 0x6b, 0x2b, 0x5b, 0x6e, 0x9e, 0x75, 0xcc, 0x6b, 0x2a, 0x22, 0x09, 0x3d, 0x87, + 0x30, 0x16, 0x70, 0xc2, 0xbd, 0x80, 0xc5, 0xd2, 0xcf, 0xbc, 0x33, 0xf7, 0x3c, 0x7d, 0x8c, 0x32, + 0xbc, 0xa5, 0x0c, 0xfd, 0x80, 0xb9, 0xd1, 0x98, 0x31, 0x8f, 0xb9, 0x4e, 0x10, 0xd2, 0x28, 0x13, + 0xed, 0x25, 0x65, 0x94, 0x7c, 0x9d, 0x8c, 0x4f, 0x1d, 0x3a, 0x0a, 0xf9, 0xb9, 0xda, 0xbc, 0x31, + 0xbb, 0xc9, 0xbd, 0x11, 0x8d, 0x39, 0x19, 0x85, 0xd2, 0xc0, 0xfa, 0xb5, 0x02, 0x8d, 0xfd, 0x88, + 0x12, 0x4e, 0x0f, 0xd5, 0xd9, 0x3d, 0xfa, 0xc9, 0x98, 0xc6, 0x1c, 0x6f, 0xc0, 0xe5, 0x90, 0x44, + 0x94, 0xf1, 0x26, 0xda, 0x44, 0xad, 0x5a, 0x4f, 0x7d, 0xe1, 0x1b, 0x60, 0xa4, 0x6c, 0x78, 0xc3, + 0x66, 0x25, 0xd9, 0x04, 0xbd, 0x74, 0x38, 0xc4, 0x6f, 0xc1, 0x15, 0xfd, 0xd5, 0xac, 0x6e, 0xa2, + 0x96, 0xd1, 0xb1, 0xec, 0x79, 0x4c, 0xd9, 0xe9, 0xa9, 0xa9, 0x0f, 0x7e, 0x04, 0x57, 0x06, 0xfe, + 0x38, 0xe6, 0x34, 0x8a, 0x9b, 0x97, 0x36, 0xab, 0x2d, 0xa3, 0xd3, 0x9d, 0xef, 0x9f, 0x8b, 0xdd, + 0xde, 0x57, 0xfe, 0xf7, 0x19, 0x8f, 0xce, 0x7b, 0x69, 0x38, 0xf3, 0x43, 0xa8, 0x67, 0xb6, 0xf0, + 0x1a, 0x54, 0x9f, 0xd0, 0x73, 0x95, 0xa1, 0xf8, 0x89, 0xb7, 0xe1, 0x7f, 0x67, 0xc4, 0x1f, 0xd3, + 0x24, 0x31, 0xa3, 0x73, 0x73, 0xc1, 0xd1, 0x32, 0x52, 0x4f, 0xda, 0xef, 0x54, 0xde, 0x44, 0x56, + 0x0b, 0xf0, 0x01, 0xe5, 0xb3, 0x4c, 0x62, 0xb8, 0xc4, 0xc8, 0x88, 0xaa, 0x53, 0x92, 0xdf, 0xd6, + 0x03, 0x58, 0x3f, 0xf2, 0xe2, 0xd4, 0x34, 0x2e, 0x62, 0xfd, 0x3a, 0x40, 0x48, 0x5c, 0xda, 0xe7, + 0xc1, 0x13, 0xca, 0x14, 0xe9, 0x35, 0xb1, 0xf2, 0x50, 0x2c, 0x58, 0xbf, 0x20, 0x68, 0xcc, 0xc4, + 0x8b, 0xc3, 0x80, 0xc5, 0x14, 0xbf, 0x0d, 0x35, 0xcd, 0x6c, 0xdc, 0x44, 0x09, 0x9d, 0x65, 0xca, + 0x31, 0x71, 0xc2, 0x77, 0x61, 0xed, 0x94, 0x78, 0x3e, 0x1d, 0xf6, 0xfd, 0x60, 0x20, 0xa5, 0xd7, + 0xac, 0x6c, 0x56, 0x5b, 0xb5, 0xde, 0xaa, 0x5c, 0x3f, 0xd2, 0xcb, 0xf8, 0x36, 0xac, 0x32, 0xfa, + 0x29, 0xef, 0x4f, 0x41, 0xad, 0x26, 0x50, 0xeb, 0x62, 0xf9, 0x38, 0x85, 0x7b, 0x0f, 0x1a, 0xef, + 0x52, 0x9f, 0x5e, 0x14, 0x5d, 0x1e, 0x55, 0xcf, 0x10, 0xac, 0xcb, 0x32, 0x6b, 0xc6, 0x8b, 0xb9, + 0x52, 0x15, 0x9f, 0x08, 0xb4, 0xa6, 0x56, 0x0e, 0x87, 0x78, 0x17, 0xfe, 0xaf, 0x3e, 0x94, 0x3c, + 0x4b, 0xd4, 0x58, 0x7b, 0x58, 0x77, 0xe0, 0x85, 0x03, 0xca, 0x67, 0x80, 0xe4, 0xa1, 0x3e, 0x82, + 0x17, 0x45, 0x41, 0xb4, 0xdc, 0xfe, 0x65, 0x7d, 0x7f, 0x42, 0x52, 0x2f, 0x93, 0x70, 0xaa, 0xbc, + 0xdd, 0xa9, 0xcb, 0x22, 0xab, 0x5b, 0x22, 0x9b, 0xd4, 0xe5, 0xbf, 0xa8, 0x6d, 0x1b, 0xd6, 0x65, + 0x6d, 0x4b, 0x90, 0xf4, 0x37, 0x82, 0x8d, 0xec, 0x0d, 0x7e, 0x40, 0x39, 0x19, 0x12, 0x4e, 0xf0, + 0x63, 0x58, 0x0b, 0x22, 0xcf, 0xf5, 0x18, 0xf1, 0xfb, 0x91, 0x0c, 0x91, 0xb8, 0x1a, 0x1d, 0x67, + 0xc9, 0x6e, 0xd0, 0x5b, 0xd5, 0x81, 0x34, 0x94, 0x2e, 0x5c, 0x55, 0x21, 0xfb, 0xa2, 0x1f, 0xaa, + 0xab, 0x6e, 0xea, 0xb8, 0xba, 0x59, 0xda, 0x0f, 0x75, 0xb3, 0xec, 0x19, 0xca, 0x5e, 0xac, 0xe0, + 0x5d, 0x30, 0x4e, 0x3d, 0xe6, 0xc5, 0x1f, 0x49, 0xef, 0x6a, 0xa1, 0x37, 0x48, 0x73, 0xb1, 0x60, + 0xfd, 0x85, 0x74, 0xc3, 0x55, 0xfc, 0xa4, 0x19, 0x3f, 0x9a, 0x9b, 0xb1, 0x5d, 0x94, 0x71, 0x96, + 0xea, 0xe7, 0x2b, 0xe1, 0x3f, 0x10, 0x34, 0xde, 0x0f, 0x87, 0x39, 0x09, 0x1f, 0xcd, 0x4d, 0xb8, + 0x84, 0x86, 0x9f, 0xa7, 0x1c, 0x3b, 0xbf, 0x1b, 0xd0, 0xd8, 0x53, 0x50, 0xb5, 0xfa, 0xde, 0x11, + 0x88, 0xf1, 0xd7, 0x08, 0x56, 0xb2, 0xaa, 0xc4, 0xcb, 0xea, 0xd7, 0xbc, 0xae, 0x1d, 0xa6, 0xde, + 0x01, 0xf6, 0x7b, 0xfa, 0x1d, 0x60, 0x6d, 0x7d, 0xfe, 0xdb, 0x9f, 0xdf, 0x56, 0x6e, 0x5b, 0x37, + 0xc5, 0x0b, 0xe2, 0x33, 0xd9, 0x4f, 0xba, 0x61, 0x14, 0x7c, 0x4c, 0x07, 0x3c, 0x76, 0xda, 0x4f, + 0xd3, 0x57, 0x45, 0xbc, 0x83, 0xda, 0xf8, 0x19, 0x02, 0x63, 0x6a, 0x46, 0xe1, 0xad, 0xf9, 0x68, + 0x2e, 0x8e, 0x32, 0xb3, 0xc4, 0xe8, 0xb0, 0xee, 0x26, 0x78, 0x6e, 0x61, 0x89, 0x47, 0xdc, 0xf3, + 0x29, 0x34, 0x13, 0x30, 0x4e, 0xfb, 0x29, 0xfe, 0x0e, 0x41, 0x3d, 0x33, 0xb6, 0xf0, 0x02, 0xa9, + 0xe7, 0xcd, 0x4b, 0xd3, 0x29, 0x6d, 0x2f, 0x1b, 0xe6, 0x0c, 0xba, 0x45, 0x6c, 0xe1, 0x2f, 0x11, + 0xac, 0x48, 0xe5, 0xa6, 0x6c, 0x95, 0xc8, 0xbf, 0x14, 0x47, 0xaa, 0x66, 0x66, 0x31, 0x47, 0xa2, + 0x66, 0x5f, 0x20, 0x58, 0xc9, 0xce, 0xcb, 0x45, 0x22, 0xca, 0x9d, 0xac, 0xe6, 0xc6, 0x05, 0x29, + 0xdf, 0x17, 0xef, 0x44, 0xcd, 0x47, 0xbb, 0x44, 0xb5, 0x7e, 0x40, 0x50, 0xcf, 0xf4, 0x1b, 0xbc, + 0x64, 0x63, 0x2a, 0x52, 0x72, 0x37, 0xc1, 0xb2, 0x6d, 0x6d, 0xe5, 0xd7, 0x26, 0x83, 0xc6, 0xd1, + 0x33, 0x6c, 0x47, 0xcf, 0x66, 0xfc, 0x0d, 0x02, 0x98, 0x0c, 0x67, 0x7c, 0x6f, 0xa1, 0xb2, 0x67, + 0x90, 0x15, 0x77, 0x1c, 0xeb, 0xf5, 0x04, 0x9d, 0x8d, 0xb7, 0x8a, 0x98, 0x4a, 0xa1, 0x09, 0xd2, + 0x7e, 0x44, 0x70, 0x75, 0x7a, 0x72, 0xe3, 0x57, 0x16, 0x2b, 0x76, 0xe6, 0xc1, 0x60, 0xda, 0x65, + 0xcd, 0x95, 0xbe, 0xb3, 0x28, 0x4b, 0x72, 0x28, 0xba, 0x42, 0x3d, 0xd3, 0xa4, 0x71, 0x31, 0x21, + 0x45, 0xd5, 0xdc, 0x4e, 0x90, 0xbc, 0x6a, 0x2e, 0xc5, 0x97, 0x90, 0xfb, 0x57, 0x08, 0xea, 0x99, + 0x27, 0xc4, 0x22, 0x9d, 0xe5, 0xbd, 0x35, 0xe6, 0x8a, 0x5d, 0x91, 0xd3, 0x5e, 0x0a, 0xd2, 0xde, + 0xf7, 0x08, 0xae, 0x0d, 0x82, 0xd1, 0x5c, 0x0c, 0x7b, 0x66, 0x6e, 0xef, 0x3f, 0x16, 0x67, 0x1f, + 0xa3, 0xc7, 0x5d, 0xe5, 0xe7, 0x06, 0x3e, 0x61, 0xae, 0x1d, 0x44, 0xae, 0xe3, 0x52, 0x96, 0x20, + 0x73, 0xe4, 0x16, 0x09, 0xbd, 0xf8, 0xe2, 0xbf, 0xc1, 0xdd, 0xe4, 0xc7, 0xcf, 0x95, 0x97, 0x0f, + 0xa4, 0xff, 0xbe, 0x1f, 0x8c, 0x87, 0xb6, 0x3e, 0xca, 0x4e, 0xce, 0xb0, 0x3f, 0xe8, 0x9c, 0x5c, + 0x4e, 0x42, 0xbd, 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x18, 0xc5, 0x74, 0xc7, 0x0e, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go new file mode 100644 index 000000000..981dd5986 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/v2/bigtable_table_admin.proto + +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/duration" +import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +type CreateTableRequest struct { + // The unique name of the instance in which to create the table. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `/tables/foobar`. + TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId" json:"table_id,omitempty"` + // The Table to create. + Table *Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // The optional list of row keys that will be used to initially split the + // table into several tablets (tablets are similar to HBase regions). + // Given two split keys, `s1` and `s2`, three tablets will be created, + // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. + // + // Example: + // + // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // * Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` + InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits" json:"initial_splits,omitempty"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} +func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *CreateTableRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateTableRequest) GetTableId() string { + if m != nil { + return m.TableId + } + return "" +} + +func (m *CreateTableRequest) GetTable() *Table { + if m != nil { + return m.Table + } + return nil +} + +func (m *CreateTableRequest) GetInitialSplits() []*CreateTableRequest_Split { + if m != nil { + return m.InitialSplits + } + return nil +} + +// An initial split point for a newly created table. +type CreateTableRequest_Split struct { + // Row key to use as an initial tablet boundary. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *CreateTableRequest_Split) Reset() { *m = CreateTableRequest_Split{} } +func (m *CreateTableRequest_Split) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest_Split) ProtoMessage() {} +func (*CreateTableRequest_Split) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +func (m *CreateTableRequest_Split) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +type DropRowRangeRequest struct { + // The unique name of the table on which to drop a range of rows. + // Values are of the form + // `projects//instances//tables/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Delete all rows or by prefix. + // + // Types that are valid to be assigned to Target: + // *DropRowRangeRequest_RowKeyPrefix + // *DropRowRangeRequest_DeleteAllDataFromTable + Target isDropRowRangeRequest_Target `protobuf_oneof:"target"` +} + +func (m *DropRowRangeRequest) Reset() { *m = DropRowRangeRequest{} } +func (m *DropRowRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DropRowRangeRequest) ProtoMessage() {} +func (*DropRowRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isDropRowRangeRequest_Target interface { + isDropRowRangeRequest_Target() +} + +type DropRowRangeRequest_RowKeyPrefix struct { + RowKeyPrefix []byte `protobuf:"bytes,2,opt,name=row_key_prefix,json=rowKeyPrefix,proto3,oneof"` +} +type DropRowRangeRequest_DeleteAllDataFromTable struct { + DeleteAllDataFromTable bool `protobuf:"varint,3,opt,name=delete_all_data_from_table,json=deleteAllDataFromTable,oneof"` +} + +func (*DropRowRangeRequest_RowKeyPrefix) isDropRowRangeRequest_Target() {} +func (*DropRowRangeRequest_DeleteAllDataFromTable) isDropRowRangeRequest_Target() {} + +func (m *DropRowRangeRequest) GetTarget() isDropRowRangeRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *DropRowRangeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DropRowRangeRequest) GetRowKeyPrefix() []byte { + if x, ok := m.GetTarget().(*DropRowRangeRequest_RowKeyPrefix); ok { + return x.RowKeyPrefix + } + return nil +} + +func (m *DropRowRangeRequest) GetDeleteAllDataFromTable() bool { + if x, ok := m.GetTarget().(*DropRowRangeRequest_DeleteAllDataFromTable); ok { + return x.DeleteAllDataFromTable + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DropRowRangeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DropRowRangeRequest_OneofMarshaler, _DropRowRangeRequest_OneofUnmarshaler, _DropRowRangeRequest_OneofSizer, []interface{}{ + (*DropRowRangeRequest_RowKeyPrefix)(nil), + (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), + } +} + +func _DropRowRangeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DropRowRangeRequest) + // target + switch x := m.Target.(type) { + case *DropRowRangeRequest_RowKeyPrefix: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKeyPrefix) + case *DropRowRangeRequest_DeleteAllDataFromTable: + t := uint64(0) + if x.DeleteAllDataFromTable { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("DropRowRangeRequest.Target has unexpected type %T", x) + } + return nil +} + +func _DropRowRangeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DropRowRangeRequest) + switch tag { + case 2: // target.row_key_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Target = &DropRowRangeRequest_RowKeyPrefix{x} + return true, err + case 3: // target.delete_all_data_from_table + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Target = &DropRowRangeRequest_DeleteAllDataFromTable{x != 0} + return true, err + default: + return false, nil + } +} + +func _DropRowRangeRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DropRowRangeRequest) + // target + switch x := m.Target.(type) { + case *DropRowRangeRequest_RowKeyPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RowKeyPrefix))) + n += len(x.RowKeyPrefix) + case *DropRowRangeRequest_DeleteAllDataFromTable: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +type ListTablesRequest struct { + // The unique name of the instance for which tables should be listed. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The view to be applied to the returned tables' fields. + // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. + View Table_View `protobuf:"varint,2,opt,name=view,enum=google.bigtable.admin.v2.Table_View" json:"view,omitempty"` + // The value of `next_page_token` returned by a previous call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} } +func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTablesRequest) ProtoMessage() {} +func (*ListTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ListTablesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListTablesRequest) GetView() Table_View { + if m != nil { + return m.View + } + return Table_VIEW_UNSPECIFIED +} + +func (m *ListTablesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +type ListTablesResponse struct { + // The tables present in the requested instance. + Tables []*Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} } +func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTablesResponse) ProtoMessage() {} +func (*ListTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *ListTablesResponse) GetTables() []*Table { + if m != nil { + return m.Tables + } + return nil +} + +func (m *ListTablesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +type GetTableRequest struct { + // The unique name of the requested table. + // Values are of the form + // `projects//instances//tables/
`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The view to be applied to the returned table's fields. + // Defaults to `SCHEMA_VIEW` if unspecified. + View Table_View `protobuf:"varint,2,opt,name=view,enum=google.bigtable.admin.v2.Table_View" json:"view,omitempty"` +} + +func (m *GetTableRequest) Reset() { *m = GetTableRequest{} } +func (m *GetTableRequest) String() string { return proto.CompactTextString(m) } +func (*GetTableRequest) ProtoMessage() {} +func (*GetTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *GetTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetTableRequest) GetView() Table_View { + if m != nil { + return m.View + } + return Table_VIEW_UNSPECIFIED +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +type DeleteTableRequest struct { + // The unique name of the table to be deleted. + // Values are of the form + // `projects//instances//tables/
`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} +func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *DeleteTableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +type ModifyColumnFamiliesRequest struct { + // The unique name of the table whose families should be modified. + // Values are of the form + // `projects//instances//tables/
`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + Modifications []*ModifyColumnFamiliesRequest_Modification `protobuf:"bytes,2,rep,name=modifications" json:"modifications,omitempty"` +} + +func (m *ModifyColumnFamiliesRequest) Reset() { *m = ModifyColumnFamiliesRequest{} } +func (m *ModifyColumnFamiliesRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyColumnFamiliesRequest) ProtoMessage() {} +func (*ModifyColumnFamiliesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *ModifyColumnFamiliesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ModifyColumnFamiliesRequest) GetModifications() []*ModifyColumnFamiliesRequest_Modification { + if m != nil { + return m.Modifications + } + return nil +} + +// A create, update, or delete of a particular column family. +type ModifyColumnFamiliesRequest_Modification struct { + // The ID of the column family to be modified. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Column familiy modifications. + // + // Types that are valid to be assigned to Mod: + // *ModifyColumnFamiliesRequest_Modification_Create + // *ModifyColumnFamiliesRequest_Modification_Update + // *ModifyColumnFamiliesRequest_Modification_Drop + Mod isModifyColumnFamiliesRequest_Modification_Mod `protobuf_oneof:"mod"` +} + +func (m *ModifyColumnFamiliesRequest_Modification) Reset() { + *m = ModifyColumnFamiliesRequest_Modification{} +} +func (m *ModifyColumnFamiliesRequest_Modification) String() string { return proto.CompactTextString(m) } +func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {} +func (*ModifyColumnFamiliesRequest_Modification) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{6, 0} +} + +type isModifyColumnFamiliesRequest_Modification_Mod interface { + isModifyColumnFamiliesRequest_Modification_Mod() +} + +type ModifyColumnFamiliesRequest_Modification_Create struct { + Create *ColumnFamily `protobuf:"bytes,2,opt,name=create,oneof"` +} +type ModifyColumnFamiliesRequest_Modification_Update struct { + Update *ColumnFamily `protobuf:"bytes,3,opt,name=update,oneof"` +} +type ModifyColumnFamiliesRequest_Modification_Drop struct { + Drop bool `protobuf:"varint,4,opt,name=drop,oneof"` +} + +func (*ModifyColumnFamiliesRequest_Modification_Create) isModifyColumnFamiliesRequest_Modification_Mod() { +} +func (*ModifyColumnFamiliesRequest_Modification_Update) isModifyColumnFamiliesRequest_Modification_Mod() { +} +func (*ModifyColumnFamiliesRequest_Modification_Drop) isModifyColumnFamiliesRequest_Modification_Mod() { +} + +func (m *ModifyColumnFamiliesRequest_Modification) GetMod() isModifyColumnFamiliesRequest_Modification_Mod { + if m != nil { + return m.Mod + } + return nil +} + +func (m *ModifyColumnFamiliesRequest_Modification) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ModifyColumnFamiliesRequest_Modification) GetCreate() *ColumnFamily { + if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Create); ok { + return x.Create + } + return nil +} + +func (m *ModifyColumnFamiliesRequest_Modification) GetUpdate() *ColumnFamily { + if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Update); ok { + return x.Update + } + return nil +} + +func (m *ModifyColumnFamiliesRequest_Modification) GetDrop() bool { + if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Drop); ok { + return x.Drop + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ModifyColumnFamiliesRequest_Modification) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ModifyColumnFamiliesRequest_Modification_OneofMarshaler, _ModifyColumnFamiliesRequest_Modification_OneofUnmarshaler, _ModifyColumnFamiliesRequest_Modification_OneofSizer, []interface{}{ + (*ModifyColumnFamiliesRequest_Modification_Create)(nil), + (*ModifyColumnFamiliesRequest_Modification_Update)(nil), + (*ModifyColumnFamiliesRequest_Modification_Drop)(nil), + } +} + +func _ModifyColumnFamiliesRequest_Modification_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ModifyColumnFamiliesRequest_Modification) + // mod + switch x := m.Mod.(type) { + case *ModifyColumnFamiliesRequest_Modification_Create: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Create); err != nil { + return err + } + case *ModifyColumnFamiliesRequest_Modification_Update: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *ModifyColumnFamiliesRequest_Modification_Drop: + t := uint64(0) + if x.Drop { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("ModifyColumnFamiliesRequest_Modification.Mod has unexpected type %T", x) + } + return nil +} + +func _ModifyColumnFamiliesRequest_Modification_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ModifyColumnFamiliesRequest_Modification) + switch tag { + case 2: // mod.create + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ColumnFamily) + err := b.DecodeMessage(msg) + m.Mod = &ModifyColumnFamiliesRequest_Modification_Create{msg} + return true, err + case 3: // mod.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ColumnFamily) + err := b.DecodeMessage(msg) + m.Mod = &ModifyColumnFamiliesRequest_Modification_Update{msg} + return true, err + case 4: // mod.drop + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Mod = &ModifyColumnFamiliesRequest_Modification_Drop{x != 0} + return true, err + default: + return false, nil + } +} + +func _ModifyColumnFamiliesRequest_Modification_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ModifyColumnFamiliesRequest_Modification) + // mod + switch x := m.Mod.(type) { + case *ModifyColumnFamiliesRequest_Modification_Create: + s := proto.Size(x.Create) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ModifyColumnFamiliesRequest_Modification_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ModifyColumnFamiliesRequest_Modification_Drop: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*CreateTableRequest)(nil), "google.bigtable.admin.v2.CreateTableRequest") + proto.RegisterType((*CreateTableRequest_Split)(nil), "google.bigtable.admin.v2.CreateTableRequest.Split") + proto.RegisterType((*DropRowRangeRequest)(nil), "google.bigtable.admin.v2.DropRowRangeRequest") + proto.RegisterType((*ListTablesRequest)(nil), "google.bigtable.admin.v2.ListTablesRequest") + proto.RegisterType((*ListTablesResponse)(nil), "google.bigtable.admin.v2.ListTablesResponse") + proto.RegisterType((*GetTableRequest)(nil), "google.bigtable.admin.v2.GetTableRequest") + proto.RegisterType((*DeleteTableRequest)(nil), "google.bigtable.admin.v2.DeleteTableRequest") + proto.RegisterType((*ModifyColumnFamiliesRequest)(nil), "google.bigtable.admin.v2.ModifyColumnFamiliesRequest") + proto.RegisterType((*ModifyColumnFamiliesRequest_Modification)(nil), "google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BigtableTableAdmin service + +type BigtableTableAdminClient interface { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) + // Lists all tables served from a specified instance. + ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) + // Gets metadata information about the specified table. + GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Performs a series of column family modifications on the specified table. + // Either all or none of the modifications will occur before this method + // returns, but data requests received prior to that point may see a table + // where only some modifications have taken effect. + ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error) + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) +} + +type bigtableTableAdminClient struct { + cc *grpc.ClientConn +} + +func NewBigtableTableAdminClient(cc *grpc.ClientConn) BigtableTableAdminClient { + return &bigtableTableAdminClient{cc} +} + +func (c *bigtableTableAdminClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) { + out := new(Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableAdminClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) { + out := new(ListTablesResponse) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) { + out := new(Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableAdminClient) ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error) { + out := new(Table) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableTableAdminClient) DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableTableAdmin service + +type BigtableTableAdminServer interface { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + CreateTable(context.Context, *CreateTableRequest) (*Table, error) + // Lists all tables served from a specified instance. + ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) + // Gets metadata information about the specified table. + GetTable(context.Context, *GetTableRequest) (*Table, error) + // Permanently deletes a specified table and all of its data. + DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf3.Empty, error) + // Performs a series of column family modifications on the specified table. + // Either all or none of the modifications will occur before this method + // returns, but data requests received prior to that point may see a table + // where only some modifications have taken effect. + ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error) + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + DropRowRange(context.Context, *DropRowRangeRequest) (*google_protobuf3.Empty, error) +} + +func RegisterBigtableTableAdminServer(s *grpc.Server, srv BigtableTableAdminServer) { + s.RegisterService(&_BigtableTableAdmin_serviceDesc, srv) +} + +func _BigtableTableAdmin_CreateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).CreateTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).CreateTable(ctx, req.(*CreateTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableAdmin_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).ListTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).ListTables(ctx, req.(*ListTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableAdmin_GetTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).GetTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).GetTable(ctx, req.(*GetTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).DeleteTable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).DeleteTable(ctx, req.(*DeleteTableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableAdmin_ModifyColumnFamilies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyColumnFamiliesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).ModifyColumnFamilies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).ModifyColumnFamilies(ctx, req.(*ModifyColumnFamiliesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableTableAdmin_DropRowRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DropRowRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableTableAdminServer).DropRowRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableTableAdminServer).DropRowRange(ctx, req.(*DropRowRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BigtableTableAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.admin.v2.BigtableTableAdmin", + HandlerType: (*BigtableTableAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTable", + Handler: _BigtableTableAdmin_CreateTable_Handler, + }, + { + MethodName: "ListTables", + Handler: _BigtableTableAdmin_ListTables_Handler, + }, + { + MethodName: "GetTable", + Handler: _BigtableTableAdmin_GetTable_Handler, + }, + { + MethodName: "DeleteTable", + Handler: _BigtableTableAdmin_DeleteTable_Handler, + }, + { + MethodName: "ModifyColumnFamilies", + Handler: _BigtableTableAdmin_ModifyColumnFamilies_Handler, + }, + { + MethodName: "DropRowRange", + Handler: _BigtableTableAdmin_DropRowRange_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/bigtable/admin/v2/bigtable_table_admin.proto", +} + +func init() { + proto.RegisterFile("google/bigtable/admin/v2/bigtable_table_admin.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 922 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xd8, 0x4e, 0x9a, 0x1c, 0x3b, 0x29, 0x0c, 0x55, 0x70, 0xdd, 0xd2, 0x46, 0x4b, 0x15, + 0x05, 0x13, 0x76, 0xa5, 0xad, 0xa2, 0xa2, 0xd0, 0x0a, 0xea, 0x84, 0x36, 0xfc, 0x49, 0xd1, 0x52, + 0x55, 0x82, 0x9b, 0xd5, 0xc4, 0x3b, 0x59, 0x86, 0xec, 0xce, 0x2c, 0xbb, 0xe3, 0xb8, 0x16, 0xea, + 0x0d, 0x42, 0x42, 0xe2, 0xb6, 0x57, 0x11, 0x6f, 0xc0, 0x13, 0x20, 0x6e, 0x78, 0x08, 0xae, 0xb9, + 0xe3, 0x11, 0x78, 0x00, 0x34, 0x3f, 0x4e, 0x9c, 0x38, 0x6b, 0xd7, 0xb9, 0xb1, 0x66, 0xce, 0xcf, + 0x77, 0xbe, 0x39, 0x67, 0xbe, 0x1d, 0xc3, 0xfd, 0x58, 0x88, 0x38, 0xa1, 0xde, 0x01, 0x8b, 0x25, + 0x39, 0x48, 0xa8, 0x47, 0xa2, 0x94, 0x71, 0xef, 0xd8, 0x3f, 0xb5, 0x84, 0xe6, 0x57, 0xdb, 0xdd, + 0x2c, 0x17, 0x52, 0xe0, 0xa6, 0x49, 0x72, 0x87, 0x21, 0xae, 0x71, 0x1e, 0xfb, 0xad, 0xdb, 0x16, + 0x8e, 0x64, 0xcc, 0x23, 0x9c, 0x0b, 0x49, 0x24, 0x13, 0xbc, 0x30, 0x79, 0xad, 0x7b, 0xa5, 0xc5, + 0x0c, 0x8c, 0x89, 0x7a, 0xd7, 0x46, 0x25, 0x82, 0xc7, 0x79, 0x8f, 0x73, 0xc6, 0x63, 0x4f, 0x64, + 0x34, 0x3f, 0x07, 0x75, 0xc7, 0x06, 0xe9, 0xdd, 0x41, 0xef, 0xd0, 0x8b, 0x7a, 0x26, 0xc0, 0xfa, + 0x6f, 0x5d, 0xf4, 0xd3, 0x34, 0x93, 0x03, 0xeb, 0xbc, 0x7b, 0xd1, 0x29, 0x59, 0x4a, 0x0b, 0x49, + 0xd2, 0xcc, 0x04, 0x38, 0xff, 0x21, 0xc0, 0x3b, 0x39, 0x25, 0x92, 0x3e, 0x53, 0xc4, 0x02, 0xfa, + 0x43, 0x8f, 0x16, 0x12, 0xaf, 0xc2, 0x42, 0x46, 0x72, 0xca, 0x65, 0x13, 0xad, 0xa1, 0x8d, 0xa5, + 0xc0, 0xee, 0xf0, 0x4d, 0x58, 0x34, 0x4d, 0x62, 0x51, 0xb3, 0xa2, 0x3d, 0xd7, 0xf4, 0xfe, 0xb3, + 0x08, 0x6f, 0xc1, 0xbc, 0x5e, 0x36, 0xab, 0x6b, 0x68, 0xa3, 0xee, 0xdf, 0x75, 0xcb, 0x5a, 0xe7, + 0x9a, 0x4a, 0x26, 0x1a, 0x7f, 0x03, 0x2b, 0x8c, 0x33, 0xc9, 0x48, 0x12, 0x16, 0x59, 0xc2, 0x64, + 0xd1, 0xac, 0xad, 0x55, 0x37, 0xea, 0xbe, 0x5f, 0x9e, 0x3f, 0xce, 0xd7, 0xfd, 0x5a, 0xa5, 0x06, + 0xcb, 0x16, 0x49, 0xef, 0x8a, 0xd6, 0x4d, 0x98, 0xd7, 0x2b, 0xfc, 0x06, 0x54, 0x8f, 0xe8, 0x40, + 0x1f, 0xa5, 0x11, 0xa8, 0xa5, 0x73, 0x82, 0xe0, 0xad, 0xdd, 0x5c, 0x64, 0x81, 0xe8, 0x07, 0x84, + 0xc7, 0xa7, 0xe7, 0xc6, 0x50, 0xe3, 0x24, 0xa5, 0xf6, 0xd4, 0x7a, 0x8d, 0xd7, 0x61, 0x25, 0x17, + 0xfd, 0xf0, 0x88, 0x0e, 0xc2, 0x2c, 0xa7, 0x87, 0xec, 0x85, 0x3e, 0x79, 0x63, 0x6f, 0x2e, 0x68, + 0xe4, 0xa2, 0xff, 0x05, 0x1d, 0xec, 0x6b, 0x2b, 0x7e, 0x08, 0xad, 0x88, 0x26, 0x54, 0xd2, 0x90, + 0x24, 0x49, 0x18, 0x11, 0x49, 0xc2, 0xc3, 0x5c, 0xa4, 0xe1, 0x59, 0x57, 0x16, 0xf7, 0xe6, 0x82, + 0x55, 0x13, 0xf3, 0x38, 0x49, 0x76, 0x89, 0x24, 0x4f, 0x72, 0x91, 0xea, 0x83, 0x74, 0x16, 0x61, + 0x41, 0x92, 0x3c, 0xa6, 0xd2, 0xf9, 0x19, 0xc1, 0x9b, 0x5f, 0xb2, 0x42, 0x6a, 0x7b, 0x31, 0x6d, + 0x22, 0x1f, 0x42, 0xed, 0x98, 0xd1, 0xbe, 0xe6, 0xb4, 0xe2, 0xdf, 0x9b, 0xd2, 0x75, 0xf7, 0x39, + 0xa3, 0xfd, 0x40, 0x67, 0xe0, 0x77, 0x00, 0x32, 0x12, 0xd3, 0x50, 0x8a, 0x23, 0xca, 0x35, 0xbf, + 0xa5, 0x60, 0x49, 0x59, 0x9e, 0x29, 0x83, 0xd3, 0x03, 0x3c, 0xca, 0xa2, 0xc8, 0x04, 0x2f, 0x28, + 0x7e, 0xa0, 0x68, 0x2a, 0x4b, 0x13, 0xe9, 0x31, 0x4d, 0x1d, 0xb3, 0x0d, 0xc7, 0xeb, 0x70, 0x9d, + 0xd3, 0x17, 0x32, 0x1c, 0x29, 0x69, 0x2e, 0xd0, 0xb2, 0x32, 0xef, 0x9f, 0x96, 0x0d, 0xe1, 0xfa, + 0x53, 0x2a, 0xcf, 0x5d, 0xc6, 0xcb, 0x86, 0x72, 0xe5, 0x63, 0x3b, 0x1b, 0x80, 0x77, 0xf5, 0x08, + 0xa6, 0xd5, 0x70, 0xfe, 0xa9, 0xc0, 0xad, 0xaf, 0x44, 0xc4, 0x0e, 0x07, 0x3b, 0x22, 0xe9, 0xa5, + 0xfc, 0x09, 0x49, 0x59, 0xc2, 0xce, 0x46, 0x72, 0x19, 0xaf, 0xef, 0x60, 0x39, 0x55, 0x29, 0xac, + 0x6b, 0x44, 0xdc, 0xac, 0xe8, 0x36, 0x75, 0xca, 0x09, 0x4e, 0xa8, 0x60, 0x7c, 0x16, 0x2a, 0x38, + 0x0f, 0xdc, 0xfa, 0x0b, 0x41, 0x63, 0xd4, 0x8f, 0x57, 0xa0, 0xc2, 0x22, 0x4b, 0xa6, 0xc2, 0x22, + 0xfc, 0x09, 0x2c, 0x74, 0xb5, 0x52, 0x74, 0x93, 0xea, 0xfe, 0xfa, 0x04, 0x45, 0x9d, 0x55, 0x1f, + 0xec, 0xcd, 0x05, 0x36, 0x4f, 0x21, 0xf4, 0xb2, 0x48, 0x21, 0x54, 0x67, 0x45, 0x30, 0x79, 0xf8, + 0x06, 0xd4, 0xa2, 0x5c, 0x64, 0xcd, 0x9a, 0xbd, 0xfd, 0x7a, 0xd7, 0x99, 0x87, 0x6a, 0x2a, 0x22, + 0xff, 0x8f, 0x6b, 0x80, 0x3b, 0x16, 0x49, 0x0f, 0xe3, 0xb1, 0x42, 0xc3, 0xaf, 0x10, 0xd4, 0x47, + 0x24, 0x8e, 0x37, 0x67, 0xf9, 0x12, 0xb4, 0xa6, 0x5d, 0x48, 0x67, 0xeb, 0xa7, 0xbf, 0xff, 0x7d, + 0x55, 0xf1, 0x9c, 0xb6, 0xfa, 0x1a, 0xff, 0x68, 0x54, 0xf4, 0x28, 0xcb, 0xc5, 0xf7, 0xb4, 0x2b, + 0x0b, 0xaf, 0xed, 0x31, 0x5e, 0x48, 0xc2, 0xbb, 0xb4, 0xf0, 0xda, 0x2f, 0xcd, 0xd7, 0xba, 0xd8, + 0x46, 0x6d, 0xfc, 0x1b, 0x02, 0x38, 0xd3, 0x03, 0x7e, 0xbf, 0xbc, 0xcc, 0x98, 0x76, 0x5b, 0x9b, + 0xaf, 0x17, 0x6c, 0x24, 0xe6, 0xf8, 0x9a, 0xe0, 0x26, 0x9e, 0x81, 0x20, 0xfe, 0x15, 0xc1, 0xe2, + 0x50, 0x36, 0xf8, 0xbd, 0xf2, 0x72, 0x17, 0xa4, 0x35, 0xbd, 0x5b, 0xe7, 0xc9, 0xa8, 0x2b, 0x5e, + 0x42, 0xc5, 0x32, 0xf1, 0xda, 0x2f, 0xf1, 0x2f, 0x08, 0xea, 0x23, 0x12, 0x9b, 0x34, 0xc0, 0x71, + 0x25, 0xb6, 0x56, 0x87, 0xd1, 0xc3, 0x37, 0xcb, 0xfd, 0x54, 0x3d, 0x68, 0x43, 0x26, 0xed, 0x59, + 0x98, 0xfc, 0x89, 0xe0, 0xc6, 0x65, 0xfa, 0xc2, 0x5b, 0x57, 0xd2, 0xe3, 0xf4, 0x76, 0x7d, 0xae, + 0x49, 0xee, 0x3a, 0x1f, 0xbf, 0x3e, 0xc9, 0xed, 0xf4, 0x92, 0x82, 0xea, 0xc6, 0x9d, 0x20, 0x68, + 0x8c, 0xbe, 0x51, 0xf8, 0x83, 0x09, 0x7d, 0x1c, 0x7f, 0xcb, 0x4a, 0x1b, 0xd9, 0xd1, 0x1c, 0x1f, + 0x3a, 0x0f, 0x66, 0xe0, 0x18, 0x8d, 0xe0, 0x6f, 0xa3, 0x76, 0xe7, 0x04, 0xc1, 0xed, 0xae, 0x48, + 0x4b, 0x09, 0x75, 0xde, 0x1e, 0x17, 0xf6, 0xbe, 0xa2, 0xb1, 0x8f, 0xbe, 0x7d, 0x64, 0x93, 0x62, + 0x91, 0x10, 0x1e, 0xbb, 0x22, 0x8f, 0xbd, 0x98, 0x72, 0x4d, 0xd2, 0x33, 0x2e, 0x92, 0xb1, 0x62, + 0xfc, 0xaf, 0xd3, 0x47, 0x7a, 0xf1, 0x7b, 0xe5, 0xce, 0x53, 0x93, 0xbf, 0x93, 0x88, 0x5e, 0xe4, + 0x0e, 0xeb, 0xb8, 0xba, 0x86, 0xfb, 0xdc, 0x3f, 0x58, 0xd0, 0x50, 0xf7, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x47, 0xff, 0x6e, 0x8f, 0xf1, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go new file mode 100644 index 000000000..5519217ec --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go @@ -0,0 +1,69 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/v2/common.proto + +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Storage media types for persisting Bigtable data. +type StorageType int32 + +const ( + // The user did not specify a storage type. + StorageType_STORAGE_TYPE_UNSPECIFIED StorageType = 0 + // Flash (SSD) storage should be used. + StorageType_SSD StorageType = 1 + // Magnetic drive (HDD) storage should be used. + StorageType_HDD StorageType = 2 +) + +var StorageType_name = map[int32]string{ + 0: "STORAGE_TYPE_UNSPECIFIED", + 1: "SSD", + 2: "HDD", +} +var StorageType_value = map[string]int32{ + "STORAGE_TYPE_UNSPECIFIED": 0, + "SSD": 1, + "HDD": 2, +} + +func (x StorageType) String() string { + return proto.EnumName(StorageType_name, int32(x)) +} +func (StorageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func init() { + proto.RegisterEnum("google.bigtable.admin.v2.StorageType", StorageType_name, StorageType_value) +} + +func init() { proto.RegisterFile("google/bigtable/admin/v2/common.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xcf, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0xed, 0x04, 0x85, 0xec, 0x52, 0x7a, 0x1a, 0xa3, 0xe8, 0xc9, 0x8b, 0x87, 0x04, 0xea, + 0x51, 0x76, 0x58, 0x7f, 0x38, 0x77, 0xd1, 0x62, 0xaa, 0xa0, 0x97, 0xf1, 0xba, 0xc5, 0x10, 0x68, + 0xf2, 0x42, 0x9b, 0x0d, 0xfc, 0x23, 0xfc, 0x47, 0xfc, 0x2b, 0x65, 0x49, 0x77, 0x12, 0x6f, 0x2f, + 0xbc, 0xcf, 0xf7, 0x47, 0x1e, 0xb9, 0x91, 0x88, 0xb2, 0x13, 0xac, 0x55, 0xd2, 0x41, 0xdb, 0x09, + 0x06, 0x3b, 0xad, 0x0c, 0x3b, 0x64, 0x6c, 0x8b, 0x5a, 0xa3, 0xa1, 0xb6, 0x47, 0x87, 0xc9, 0x2c, + 0x60, 0xf4, 0x84, 0x51, 0x8f, 0xd1, 0x43, 0x36, 0x4f, 0x47, 0x03, 0xb0, 0x8a, 0x81, 0x31, 0xe8, + 0xc0, 0x29, 0x34, 0x43, 0xd0, 0xcd, 0xaf, 0xc7, 0xad, 0x7f, 0xb5, 0xfb, 0x4f, 0xe6, 0x94, 0x16, + 0x83, 0x03, 0x6d, 0x03, 0x70, 0xbb, 0x20, 0x53, 0xee, 0xb0, 0x07, 0x29, 0x9a, 0x2f, 0x2b, 0x92, + 0x94, 0xcc, 0x78, 0xf3, 0xfc, 0xb2, 0x5c, 0x55, 0x9b, 0xe6, 0xbd, 0xae, 0x36, 0xaf, 0x4f, 0xbc, + 0xae, 0x8a, 0xf5, 0xc3, 0xba, 0x2a, 0xe3, 0xb3, 0xe4, 0x92, 0x9c, 0x73, 0x5e, 0xc6, 0xd1, 0x71, + 0x78, 0x2c, 0xcb, 0x78, 0x92, 0x7f, 0x47, 0x24, 0xdd, 0xa2, 0xa6, 0xff, 0xd5, 0xcb, 0xa7, 0x85, + 0xff, 0x46, 0x7d, 0x0c, 0xab, 0xa3, 0x8f, 0xc5, 0x08, 0x4a, 0xec, 0xc0, 0x48, 0x8a, 0xbd, 0x64, + 0x52, 0x18, 0x5f, 0x85, 0x85, 0x15, 0x58, 0x35, 0xfc, 0xbd, 0xc6, 0xbd, 0x1f, 0x7e, 0x26, 0x57, + 0xab, 0xa0, 0x2f, 0x3a, 0xdc, 0xef, 0x68, 0x7e, 0x8a, 0x5b, 0xfa, 0xb8, 0xb7, 0xac, 0xbd, 0xf0, + 0x56, 0x77, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcd, 0xe8, 0x84, 0xa9, 0x57, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go new file mode 100644 index 000000000..a57ede086 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -0,0 +1,289 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/v2/instance.proto + +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Possible states of an instance. +type Instance_State int32 + +const ( + // The state of the instance could not be determined. + Instance_STATE_NOT_KNOWN Instance_State = 0 + // The instance has been successfully created and can serve requests + // to its tables. + Instance_READY Instance_State = 1 + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + Instance_CREATING Instance_State = 2 +) + +var Instance_State_name = map[int32]string{ + 0: "STATE_NOT_KNOWN", + 1: "READY", + 2: "CREATING", +} +var Instance_State_value = map[string]int32{ + "STATE_NOT_KNOWN": 0, + "READY": 1, + "CREATING": 2, +} + +func (x Instance_State) String() string { + return proto.EnumName(Instance_State_name, int32(x)) +} +func (Instance_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +// The type of the instance. +type Instance_Type int32 + +const ( + // The type of the instance is unspecified. If set when creating an + // instance, a `PRODUCTION` instance will be created. If set when updating + // an instance, the type will be left unchanged. + Instance_TYPE_UNSPECIFIED Instance_Type = 0 + // An instance meant for production use. `serve_nodes` must be set + // on the cluster. + Instance_PRODUCTION Instance_Type = 1 + // The instance is meant for development and testing purposes only; it has + // no performance or uptime guarantees and is not covered by SLA. + // After a development instance is created, it can be upgraded by + // updating the instance to type `PRODUCTION`. An instance created + // as a production instance cannot be changed to a development instance. + // When creating a development instance, `serve_nodes` on the cluster must + // not be set. + Instance_DEVELOPMENT Instance_Type = 2 +) + +var Instance_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "PRODUCTION", + 2: "DEVELOPMENT", +} +var Instance_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "PRODUCTION": 1, + "DEVELOPMENT": 2, +} + +func (x Instance_Type) String() string { + return proto.EnumName(Instance_Type_name, int32(x)) +} +func (Instance_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} } + +// Possible states of a cluster. +type Cluster_State int32 + +const ( + // The state of the cluster could not be determined. + Cluster_STATE_NOT_KNOWN Cluster_State = 0 + // The cluster has been successfully created and is ready to serve requests. + Cluster_READY Cluster_State = 1 + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. + Cluster_CREATING Cluster_State = 2 + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. + Cluster_RESIZING Cluster_State = 3 + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + Cluster_DISABLED Cluster_State = 4 +) + +var Cluster_State_name = map[int32]string{ + 0: "STATE_NOT_KNOWN", + 1: "READY", + 2: "CREATING", + 3: "RESIZING", + 4: "DISABLED", +} +var Cluster_State_value = map[string]int32{ + "STATE_NOT_KNOWN": 0, + "READY": 1, + "CREATING": 2, + "RESIZING": 3, + "DISABLED": 4, +} + +func (x Cluster_State) String() string { + return proto.EnumName(Cluster_State_name, int32(x)) +} +func (Cluster_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{1, 0} } + +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. +type Instance struct { + // (`OutputOnly`) + // The unique name of the instance. Values are of the form + // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // (`OutputOnly`) + // The current state of the instance. + State Instance_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Instance_State" json:"state,omitempty"` + // The type of the instance. Defaults to `PRODUCTION`. + Type Instance_Type `protobuf:"varint,4,opt,name=type,enum=google.bigtable.admin.v2.Instance_Type" json:"type,omitempty"` +} + +func (m *Instance) Reset() { *m = Instance{} } +func (m *Instance) String() string { return proto.CompactTextString(m) } +func (*Instance) ProtoMessage() {} +func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *Instance) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Instance) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Instance) GetState() Instance_State { + if m != nil { + return m.State + } + return Instance_STATE_NOT_KNOWN +} + +func (m *Instance) GetType() Instance_Type { + if m != nil { + return m.Type + } + return Instance_TYPE_UNSPECIFIED +} + +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. +type Cluster struct { + // (`OutputOnly`) + // The unique name of the cluster. Values are of the form + // `projects//instances//clusters/[a-z][-a-z0-9]*`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // (`CreationOnly`) + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this cluster. + // Currently only zones are supported, so values should be of the form + // `projects//locations/`. + Location string `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` + // (`OutputOnly`) + // The current state of the cluster. + State Cluster_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Cluster_State" json:"state,omitempty"` + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. + ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes" json:"serve_nodes,omitempty"` + // (`CreationOnly`) + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. + DefaultStorageType StorageType `protobuf:"varint,5,opt,name=default_storage_type,json=defaultStorageType,enum=google.bigtable.admin.v2.StorageType" json:"default_storage_type,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *Cluster) GetState() Cluster_State { + if m != nil { + return m.State + } + return Cluster_STATE_NOT_KNOWN +} + +func (m *Cluster) GetServeNodes() int32 { + if m != nil { + return m.ServeNodes + } + return 0 +} + +func (m *Cluster) GetDefaultStorageType() StorageType { + if m != nil { + return m.DefaultStorageType + } + return StorageType_STORAGE_TYPE_UNSPECIFIED +} + +func init() { + proto.RegisterType((*Instance)(nil), "google.bigtable.admin.v2.Instance") + proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.v2.Cluster") + proto.RegisterEnum("google.bigtable.admin.v2.Instance_State", Instance_State_name, Instance_State_value) + proto.RegisterEnum("google.bigtable.admin.v2.Instance_Type", Instance_Type_name, Instance_Type_value) + proto.RegisterEnum("google.bigtable.admin.v2.Cluster_State", Cluster_State_name, Cluster_State_value) +} + +func init() { proto.RegisterFile("google/bigtable/admin/v2/instance.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xdd, 0x6a, 0xdb, 0x30, + 0x14, 0xae, 0xdd, 0x64, 0x4b, 0x4f, 0xba, 0xd6, 0x68, 0xbd, 0x08, 0xa1, 0x6c, 0x5d, 0xa0, 0x34, + 0x57, 0x36, 0x64, 0xec, 0xaa, 0x64, 0x90, 0xd8, 0x5a, 0x31, 0xeb, 0x6c, 0xcf, 0x76, 0x5b, 0xda, + 0x1b, 0xa3, 0xc4, 0x9a, 0x31, 0xd8, 0x92, 0xb1, 0x95, 0x40, 0x9e, 0x62, 0xef, 0xb1, 0xeb, 0xbd, + 0xc2, 0xde, 0x6b, 0x58, 0x76, 0xc6, 0xca, 0x96, 0x31, 0x7a, 0xa7, 0x73, 0xce, 0xf7, 0x23, 0x7d, + 0x3a, 0x70, 0x91, 0x70, 0x9e, 0x64, 0xd4, 0x58, 0xa4, 0x89, 0x20, 0x8b, 0x8c, 0x1a, 0x24, 0xce, + 0x53, 0x66, 0xac, 0x27, 0x46, 0xca, 0x2a, 0x41, 0xd8, 0x92, 0xea, 0x45, 0xc9, 0x05, 0x47, 0x83, + 0x06, 0xa8, 0x6f, 0x81, 0xba, 0x04, 0xea, 0xeb, 0xc9, 0xf0, 0xb4, 0x95, 0x20, 0x45, 0x6a, 0x10, + 0xc6, 0xb8, 0x20, 0x22, 0xe5, 0xac, 0x6a, 0x78, 0xc3, 0xf3, 0x9d, 0x06, 0x4b, 0x9e, 0xe7, 0x9c, + 0x35, 0xb0, 0xd1, 0x77, 0x15, 0x7a, 0x76, 0xeb, 0x88, 0x10, 0x74, 0x18, 0xc9, 0xe9, 0x40, 0x39, + 0x53, 0xc6, 0x07, 0xbe, 0x3c, 0xa3, 0x37, 0x70, 0x18, 0xa7, 0x55, 0x91, 0x91, 0x4d, 0x24, 0x67, + 0xaa, 0x9c, 0xf5, 0xdb, 0x9e, 0x53, 0x43, 0xde, 0x43, 0xb7, 0x12, 0x44, 0xd0, 0xc1, 0xfe, 0x99, + 0x32, 0x3e, 0x9a, 0x8c, 0xf5, 0x5d, 0x57, 0xd6, 0xb7, 0x4e, 0x7a, 0x50, 0xe3, 0xfd, 0x86, 0x86, + 0x2e, 0xa1, 0x23, 0x36, 0x05, 0x1d, 0x74, 0x24, 0xfd, 0xe2, 0x3f, 0xe8, 0xe1, 0xa6, 0xa0, 0xbe, + 0x24, 0x8d, 0xde, 0x41, 0x57, 0x8a, 0xa1, 0x97, 0x70, 0x1c, 0x84, 0xb3, 0x10, 0x47, 0x8e, 0x1b, + 0x46, 0x1f, 0x1d, 0xf7, 0xce, 0xd1, 0xf6, 0xd0, 0x01, 0x74, 0x7d, 0x3c, 0xb3, 0xee, 0x35, 0x05, + 0x1d, 0x42, 0xcf, 0xf4, 0xf1, 0x2c, 0xb4, 0x9d, 0x2b, 0x4d, 0x1d, 0x4d, 0xa1, 0x53, 0x8b, 0xa0, + 0x13, 0xd0, 0xc2, 0x7b, 0x0f, 0x47, 0x37, 0x4e, 0xe0, 0x61, 0xd3, 0xfe, 0x60, 0x63, 0x4b, 0xdb, + 0x43, 0x47, 0x00, 0x9e, 0xef, 0x5a, 0x37, 0x66, 0x68, 0xbb, 0x8e, 0xa6, 0xa0, 0x63, 0xe8, 0x5b, + 0xf8, 0x16, 0x5f, 0xbb, 0xde, 0x27, 0xec, 0x84, 0x9a, 0x3a, 0xfa, 0xa1, 0xc2, 0x73, 0x33, 0x5b, + 0x55, 0x82, 0x96, 0x7f, 0x4d, 0x6d, 0x08, 0xbd, 0x8c, 0x2f, 0xe5, 0x87, 0xb4, 0x89, 0xfd, 0xaa, + 0xd1, 0xf4, 0x71, 0x5c, 0xff, 0x78, 0x6f, 0xeb, 0xf0, 0x38, 0xad, 0xd7, 0xd0, 0xaf, 0x68, 0xb9, + 0xa6, 0x11, 0xe3, 0x31, 0xad, 0x64, 0x68, 0x5d, 0x1f, 0x64, 0xcb, 0xa9, 0x3b, 0xe8, 0x0e, 0x4e, + 0x62, 0xfa, 0x85, 0xac, 0x32, 0x11, 0x55, 0x82, 0x97, 0x24, 0xa1, 0x91, 0x8c, 0xb7, 0x2b, 0xed, + 0xce, 0x77, 0xdb, 0x05, 0x0d, 0x5a, 0x86, 0x8b, 0x5a, 0x89, 0xdf, 0x7a, 0xa3, 0xcf, 0x4f, 0x8a, + 0xba, 0xae, 0x7c, 0x1c, 0xd8, 0x0f, 0x75, 0xb5, 0x5f, 0x57, 0x96, 0x1d, 0xcc, 0xe6, 0xd7, 0xd8, + 0xd2, 0x3a, 0xf3, 0xaf, 0x0a, 0x9c, 0x2e, 0x79, 0xbe, 0xf3, 0x4e, 0xf3, 0x17, 0xdb, 0x3f, 0xf7, + 0xea, 0x75, 0xf5, 0x94, 0x87, 0x69, 0x0b, 0x4d, 0x78, 0x46, 0x58, 0xa2, 0xf3, 0x32, 0x31, 0x12, + 0xca, 0xe4, 0x32, 0x1b, 0xcd, 0x88, 0x14, 0x69, 0xf5, 0xe7, 0xda, 0x5f, 0xca, 0xc3, 0x37, 0xf5, + 0xd5, 0x55, 0xc3, 0x37, 0x33, 0xbe, 0x8a, 0xf5, 0xf9, 0xd6, 0x70, 0x26, 0x0d, 0x6f, 0x27, 0x8b, + 0x67, 0x52, 0xea, 0xed, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8d, 0xab, 0xe0, 0x6d, 0xa1, 0x03, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go new file mode 100644 index 000000000..223c5af01 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -0,0 +1,417 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/admin/v2/table.proto + +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf4 "github.com/golang/protobuf/ptypes/duration" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Possible timestamp granularities to use when keeping multiple versions +// of data in a table. +type Table_TimestampGranularity int32 + +const ( + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. + Table_TIMESTAMP_GRANULARITY_UNSPECIFIED Table_TimestampGranularity = 0 + // The table keeps data versioned at a granularity of 1ms. + Table_MILLIS Table_TimestampGranularity = 1 +) + +var Table_TimestampGranularity_name = map[int32]string{ + 0: "TIMESTAMP_GRANULARITY_UNSPECIFIED", + 1: "MILLIS", +} +var Table_TimestampGranularity_value = map[string]int32{ + "TIMESTAMP_GRANULARITY_UNSPECIFIED": 0, + "MILLIS": 1, +} + +func (x Table_TimestampGranularity) String() string { + return proto.EnumName(Table_TimestampGranularity_name, int32(x)) +} +func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor4, []int{0, 0} +} + +// Defines a view over a table's fields. +type Table_View int32 + +const ( + // Uses the default view for each method as documented in its request. + Table_VIEW_UNSPECIFIED Table_View = 0 + // Only populates `name`. + Table_NAME_ONLY Table_View = 1 + // Only populates `name` and fields related to the table's schema. + Table_SCHEMA_VIEW Table_View = 2 + // Populates all fields. + Table_FULL Table_View = 4 +) + +var Table_View_name = map[int32]string{ + 0: "VIEW_UNSPECIFIED", + 1: "NAME_ONLY", + 2: "SCHEMA_VIEW", + 4: "FULL", +} +var Table_View_value = map[string]int32{ + "VIEW_UNSPECIFIED": 0, + "NAME_ONLY": 1, + "SCHEMA_VIEW": 2, + "FULL": 4, +} + +func (x Table_View) String() string { + return proto.EnumName(Table_View_name, int32(x)) +} +func (Table_View) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 1} } + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +type Table struct { + // (`OutputOnly`) + // The unique name of the table. Values are of the form + // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `FULL` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // (`CreationOnly`) + // The column families configured for this table, mapped by column family ID. + // Views: `SCHEMA_VIEW`, `FULL` + ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // (`CreationOnly`) + // The granularity (e.g. `MILLIS`, `MICROS`) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to `MILLIS`. + // Views: `SCHEMA_VIEW`, `FULL` + Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *Table) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Table) GetColumnFamilies() map[string]*ColumnFamily { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +func (m *Table) GetGranularity() Table_TimestampGranularity { + if m != nil { + return m.Granularity + } + return Table_TIMESTAMP_GRANULARITY_UNSPECIFIED +} + +// A set of columns within a table which share a common configuration. +type ColumnFamily struct { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule *GcRule `protobuf:"bytes,1,opt,name=gc_rule,json=gcRule" json:"gc_rule,omitempty"` +} + +func (m *ColumnFamily) Reset() { *m = ColumnFamily{} } +func (m *ColumnFamily) String() string { return proto.CompactTextString(m) } +func (*ColumnFamily) ProtoMessage() {} +func (*ColumnFamily) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *ColumnFamily) GetGcRule() *GcRule { + if m != nil { + return m.GcRule + } + return nil +} + +// Rule for determining which cells to delete during garbage collection. +type GcRule struct { + // Garbage collection rules. + // + // Types that are valid to be assigned to Rule: + // *GcRule_MaxNumVersions + // *GcRule_MaxAge + // *GcRule_Intersection_ + // *GcRule_Union_ + Rule isGcRule_Rule `protobuf_oneof:"rule"` +} + +func (m *GcRule) Reset() { *m = GcRule{} } +func (m *GcRule) String() string { return proto.CompactTextString(m) } +func (*GcRule) ProtoMessage() {} +func (*GcRule) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +type isGcRule_Rule interface { + isGcRule_Rule() +} + +type GcRule_MaxNumVersions struct { + MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,oneof"` +} +type GcRule_MaxAge struct { + MaxAge *google_protobuf4.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,oneof"` +} +type GcRule_Intersection_ struct { + Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"` +} +type GcRule_Union_ struct { + Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"` +} + +func (*GcRule_MaxNumVersions) isGcRule_Rule() {} +func (*GcRule_MaxAge) isGcRule_Rule() {} +func (*GcRule_Intersection_) isGcRule_Rule() {} +func (*GcRule_Union_) isGcRule_Rule() {} + +func (m *GcRule) GetRule() isGcRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *GcRule) GetMaxNumVersions() int32 { + if x, ok := m.GetRule().(*GcRule_MaxNumVersions); ok { + return x.MaxNumVersions + } + return 0 +} + +func (m *GcRule) GetMaxAge() *google_protobuf4.Duration { + if x, ok := m.GetRule().(*GcRule_MaxAge); ok { + return x.MaxAge + } + return nil +} + +func (m *GcRule) GetIntersection() *GcRule_Intersection { + if x, ok := m.GetRule().(*GcRule_Intersection_); ok { + return x.Intersection + } + return nil +} + +func (m *GcRule) GetUnion() *GcRule_Union { + if x, ok := m.GetRule().(*GcRule_Union_); ok { + return x.Union + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GcRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GcRule_OneofMarshaler, _GcRule_OneofUnmarshaler, _GcRule_OneofSizer, []interface{}{ + (*GcRule_MaxNumVersions)(nil), + (*GcRule_MaxAge)(nil), + (*GcRule_Intersection_)(nil), + (*GcRule_Union_)(nil), + } +} + +func _GcRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GcRule) + // rule + switch x := m.Rule.(type) { + case *GcRule_MaxNumVersions: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.MaxNumVersions)) + case *GcRule_MaxAge: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MaxAge); err != nil { + return err + } + case *GcRule_Intersection_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Intersection); err != nil { + return err + } + case *GcRule_Union_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Union); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GcRule.Rule has unexpected type %T", x) + } + return nil +} + +func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GcRule) + switch tag { + case 1: // rule.max_num_versions + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &GcRule_MaxNumVersions{int32(x)} + return true, err + case 2: // rule.max_age + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf4.Duration) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_MaxAge{msg} + return true, err + case 3: // rule.intersection + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Intersection) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Intersection_{msg} + return true, err + case 4: // rule.union + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcRule_Union) + err := b.DecodeMessage(msg) + m.Rule = &GcRule_Union_{msg} + return true, err + default: + return false, nil + } +} + +func _GcRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GcRule) + // rule + switch x := m.Rule.(type) { + case *GcRule_MaxNumVersions: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.MaxNumVersions)) + case *GcRule_MaxAge: + s := proto.Size(x.MaxAge) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GcRule_Intersection_: + s := proto.Size(x.Intersection) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GcRule_Union_: + s := proto.Size(x.Union) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A GcRule which deletes cells matching all of the given rules. +type GcRule_Intersection struct { + // Only delete cells which would be deleted by every element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} } +func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) } +func (*GcRule_Intersection) ProtoMessage() {} +func (*GcRule_Intersection) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 0} } + +func (m *GcRule_Intersection) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +// A GcRule which deletes cells matching any of the given rules. +type GcRule_Union struct { + // Delete cells which would be deleted by any element of `rules`. + Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"` +} + +func (m *GcRule_Union) Reset() { *m = GcRule_Union{} } +func (m *GcRule_Union) String() string { return proto.CompactTextString(m) } +func (*GcRule_Union) ProtoMessage() {} +func (*GcRule_Union) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 1} } + +func (m *GcRule_Union) GetRules() []*GcRule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterType((*Table)(nil), "google.bigtable.admin.v2.Table") + proto.RegisterType((*ColumnFamily)(nil), "google.bigtable.admin.v2.ColumnFamily") + proto.RegisterType((*GcRule)(nil), "google.bigtable.admin.v2.GcRule") + proto.RegisterType((*GcRule_Intersection)(nil), "google.bigtable.admin.v2.GcRule.Intersection") + proto.RegisterType((*GcRule_Union)(nil), "google.bigtable.admin.v2.GcRule.Union") + proto.RegisterEnum("google.bigtable.admin.v2.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value) + proto.RegisterEnum("google.bigtable.admin.v2.Table_View", Table_View_name, Table_View_value) +} + +func init() { proto.RegisterFile("google/bigtable/admin/v2/table.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdd, 0x6e, 0xda, 0x3c, + 0x18, 0xc7, 0x09, 0x09, 0xf4, 0xed, 0x43, 0xdf, 0x36, 0xf2, 0x7a, 0xc0, 0x50, 0xd5, 0x31, 0xb4, + 0x4d, 0x68, 0xd2, 0x12, 0x29, 0xad, 0xa6, 0x7d, 0x4f, 0x81, 0x06, 0x88, 0x04, 0x0c, 0x85, 0x8f, + 0xa9, 0xd3, 0xa4, 0xc8, 0x50, 0x37, 0xb2, 0x16, 0x3b, 0x28, 0x24, 0xac, 0xdc, 0xc3, 0x0e, 0x76, + 0x1d, 0xbb, 0x99, 0xdd, 0xd2, 0x14, 0x27, 0x68, 0xb4, 0x2b, 0xea, 0xb4, 0x23, 0x6c, 0x3f, 0xff, + 0xff, 0xef, 0xf9, 0xb0, 0x09, 0x3c, 0xf2, 0x82, 0xc0, 0xf3, 0x89, 0x3e, 0xa5, 0x5e, 0x84, 0xa7, + 0x3e, 0xd1, 0xf1, 0x05, 0xa3, 0x5c, 0x5f, 0x1a, 0xba, 0xd8, 0x6a, 0xf3, 0x30, 0x88, 0x02, 0x54, + 0x4e, 0x55, 0xda, 0x5a, 0xa5, 0x09, 0x95, 0xb6, 0x34, 0x2a, 0x47, 0x99, 0x1f, 0xcf, 0xa9, 0x8e, + 0x39, 0x0f, 0x22, 0x1c, 0xd1, 0x80, 0x2f, 0x52, 0x5f, 0xe5, 0x38, 0x8b, 0x8a, 0xdd, 0x34, 0xbe, + 0xd4, 0x2f, 0xe2, 0x50, 0x08, 0xb2, 0xf8, 0x83, 0x9b, 0xf1, 0x88, 0x32, 0xb2, 0x88, 0x30, 0x9b, + 0xa7, 0x82, 0xda, 0x4f, 0x19, 0x0a, 0xa3, 0x24, 0x23, 0x42, 0xa0, 0x70, 0xcc, 0x48, 0x59, 0xaa, + 0x4a, 0xf5, 0x5d, 0x47, 0xac, 0xd1, 0x67, 0x38, 0x98, 0x05, 0x7e, 0xcc, 0xb8, 0x7b, 0x89, 0x19, + 0xf5, 0x29, 0x59, 0x94, 0xe5, 0xaa, 0x5c, 0x2f, 0x19, 0x27, 0xda, 0xb6, 0x82, 0x35, 0x41, 0xd3, + 0x9a, 0xc2, 0xd6, 0xca, 0x5c, 0x16, 0x8f, 0xc2, 0x95, 0xb3, 0x3f, 0xbb, 0x76, 0x88, 0x26, 0x50, + 0xf2, 0x42, 0xcc, 0x63, 0x1f, 0x87, 0x34, 0x5a, 0x95, 0x95, 0xaa, 0x54, 0xdf, 0x37, 0x4e, 0xef, + 0x22, 0x8f, 0xd6, 0x1d, 0xb4, 0x7f, 0x7b, 0x9d, 0x4d, 0x50, 0x85, 0xc2, 0xbd, 0x5b, 0xd2, 0x23, + 0x15, 0xe4, 0x2f, 0x64, 0x95, 0xf5, 0x97, 0x2c, 0xd1, 0x1b, 0x28, 0x2c, 0xb1, 0x1f, 0x93, 0x72, + 0xbe, 0x2a, 0xd5, 0x4b, 0xc6, 0x93, 0xed, 0xa9, 0x37, 0x78, 0x2b, 0x27, 0x35, 0xbd, 0xca, 0xbf, + 0x90, 0x6a, 0x36, 0x1c, 0xde, 0x56, 0x0f, 0x7a, 0x0c, 0x0f, 0x47, 0x76, 0xcf, 0x1a, 0x8e, 0xcc, + 0xde, 0xc0, 0x6d, 0x3b, 0x66, 0x7f, 0xdc, 0x35, 0x1d, 0x7b, 0x74, 0xee, 0x8e, 0xfb, 0xc3, 0x81, + 0xd5, 0xb4, 0x5b, 0xb6, 0x75, 0xa6, 0xe6, 0x10, 0x40, 0xb1, 0x67, 0x77, 0xbb, 0xf6, 0x50, 0x95, + 0x6a, 0x2d, 0x50, 0x26, 0x94, 0x7c, 0x45, 0x87, 0xa0, 0x4e, 0x6c, 0xeb, 0xe3, 0x0d, 0xe5, 0xff, + 0xb0, 0xdb, 0x37, 0x7b, 0x96, 0xfb, 0xa1, 0xdf, 0x3d, 0x57, 0x25, 0x74, 0x00, 0xa5, 0x61, 0xb3, + 0x63, 0xf5, 0x4c, 0x37, 0xd1, 0xaa, 0x79, 0xf4, 0x1f, 0x28, 0xad, 0x71, 0xb7, 0xab, 0x2a, 0x35, + 0x1b, 0xf6, 0x36, 0xab, 0x45, 0x2f, 0x61, 0xc7, 0x9b, 0xb9, 0x61, 0xec, 0xa7, 0x57, 0x5b, 0x32, + 0xaa, 0xdb, 0xdb, 0x6c, 0xcf, 0x9c, 0xd8, 0x27, 0x4e, 0xd1, 0x13, 0xbf, 0xb5, 0xef, 0x32, 0x14, + 0xd3, 0x23, 0xf4, 0x14, 0x54, 0x86, 0xaf, 0x5c, 0x1e, 0x33, 0x77, 0x49, 0xc2, 0x45, 0xf2, 0x04, + 0x05, 0xae, 0xd0, 0xc9, 0x39, 0xfb, 0x0c, 0x5f, 0xf5, 0x63, 0x36, 0xc9, 0xce, 0xd1, 0x29, 0xec, + 0x24, 0x5a, 0xec, 0xad, 0x07, 0x7b, 0x7f, 0x9d, 0x71, 0xfd, 0x0c, 0xb5, 0xb3, 0xec, 0x99, 0x76, + 0x72, 0x4e, 0x91, 0xe1, 0x2b, 0xd3, 0x23, 0x68, 0x08, 0x7b, 0x94, 0x47, 0x24, 0x5c, 0x90, 0x59, + 0x12, 0x29, 0xcb, 0xc2, 0xfa, 0xec, 0xae, 0x62, 0x35, 0x7b, 0xc3, 0xd4, 0xc9, 0x39, 0xd7, 0x20, + 0xe8, 0x1d, 0x14, 0x62, 0x9e, 0xd0, 0x94, 0xbb, 0x6e, 0x38, 0xa3, 0x8d, 0x79, 0x8a, 0x49, 0x6d, + 0x95, 0x16, 0xec, 0x6d, 0xf2, 0xd1, 0x73, 0x28, 0x24, 0x93, 0x4c, 0x7a, 0x97, 0xff, 0x6a, 0x94, + 0xa9, 0xbc, 0xf2, 0x1e, 0x0a, 0x82, 0xfc, 0xaf, 0x80, 0x46, 0x11, 0x94, 0x64, 0xd1, 0xf8, 0x26, + 0xc1, 0xd1, 0x2c, 0x60, 0x5b, 0x6d, 0x0d, 0x10, 0xff, 0x92, 0x41, 0x32, 0xe8, 0x81, 0xf4, 0xe9, + 0x6d, 0xa6, 0xf3, 0x02, 0x1f, 0x73, 0x4f, 0x0b, 0x42, 0x4f, 0xf7, 0x08, 0x17, 0xd7, 0xa0, 0xa7, + 0x21, 0x3c, 0xa7, 0x8b, 0x3f, 0x3f, 0x4e, 0xaf, 0xc5, 0xe2, 0x47, 0xfe, 0xb8, 0x9d, 0xfa, 0x9b, + 0x7e, 0x10, 0x5f, 0x68, 0x8d, 0x75, 0x36, 0x53, 0x64, 0x9b, 0x18, 0xd3, 0xa2, 0x40, 0x9d, 0xfc, + 0x0a, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xf3, 0x89, 0x9d, 0xe6, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go new file mode 100644 index 000000000..51ece9f93 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go @@ -0,0 +1,1980 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/v1/bigtable_data.proto + +/* +Package bigtable is a generated protocol buffer package. + +It is generated from these files: + google/bigtable/v1/bigtable_data.proto + google/bigtable/v1/bigtable_service.proto + google/bigtable/v1/bigtable_service_messages.proto + +It has these top-level messages: + Row + Family + Column + Cell + RowRange + RowSet + ColumnRange + TimestampRange + ValueRange + RowFilter + Mutation + ReadModifyWriteRule + ReadRowsRequest + ReadRowsResponse + SampleRowKeysRequest + SampleRowKeysResponse + MutateRowRequest + MutateRowsRequest + MutateRowsResponse + CheckAndMutateRowRequest + CheckAndMutateRowResponse + ReadModifyWriteRowRequest +*/ +package bigtable + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +type Row struct { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + Families []*Family `protobuf:"bytes,2,rep,name=families" json:"families,omitempty"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Row) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Row) GetFamilies() []*Family { + if m != nil { + return m.Families + } + return nil +} + +// Specifies (some of) the contents of a single row/column family of a table. +type Family struct { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Must not be empty. Sorted in order of increasing "qualifier". + Columns []*Column `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` +} + +func (m *Family) Reset() { *m = Family{} } +func (m *Family) String() string { return proto.CompactTextString(m) } +func (*Family) ProtoMessage() {} +func (*Family) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Family) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Family) GetColumns() []*Column { + if m != nil { + return m.Columns + } + return nil +} + +// Specifies (some of) the contents of a single row/column of a table. +type Column struct { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its "column_qualifier_regex_filter" field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier,proto3" json:"qualifier,omitempty"` + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + Cells []*Cell `protobuf:"bytes,2,rep,name=cells" json:"cells,omitempty"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Column) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *Column) GetCells() []*Cell { + if m != nil { + return m.Cells + } + return nil +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +type Cell struct { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of "timestamp_micros" which are multiples of 1000. + TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros,json=timestampMicros" json:"timestamp_micros,omitempty"` + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + Labels []string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty"` +} + +func (m *Cell) Reset() { *m = Cell{} } +func (m *Cell) String() string { return proto.CompactTextString(m) } +func (*Cell) ProtoMessage() {} +func (*Cell) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Cell) GetTimestampMicros() int64 { + if m != nil { + return m.TimestampMicros + } + return 0 +} + +func (m *Cell) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Cell) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +// Specifies a contiguous range of rows. +type RowRange struct { + // Inclusive lower bound. If left empty, interpreted as the empty string. + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // Exclusive upper bound. If left empty, interpreted as infinity. + EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` +} + +func (m *RowRange) Reset() { *m = RowRange{} } +func (m *RowRange) String() string { return proto.CompactTextString(m) } +func (*RowRange) ProtoMessage() {} +func (*RowRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RowRange) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *RowRange) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +// Specifies a non-contiguous set of rows. +type RowSet struct { + // Single rows included in the set. + RowKeys [][]byte `protobuf:"bytes,1,rep,name=row_keys,json=rowKeys,proto3" json:"row_keys,omitempty"` + // Contiguous row ranges included in the set. + RowRanges []*RowRange `protobuf:"bytes,2,rep,name=row_ranges,json=rowRanges" json:"row_ranges,omitempty"` +} + +func (m *RowSet) Reset() { *m = RowSet{} } +func (m *RowSet) String() string { return proto.CompactTextString(m) } +func (*RowSet) ProtoMessage() {} +func (*RowSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *RowSet) GetRowKeys() [][]byte { + if m != nil { + return m.RowKeys + } + return nil +} + +func (m *RowSet) GetRowRanges() []*RowRange { + if m != nil { + return m.RowRanges + } + return nil +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from : to +// :, where both bounds can be either inclusive or +// exclusive. +type ColumnRange struct { + // The name of the column family within which this range falls. + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The column qualifier at which to start the range (within 'column_family'). + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartQualifier: + // *ColumnRange_StartQualifierInclusive + // *ColumnRange_StartQualifierExclusive + StartQualifier isColumnRange_StartQualifier `protobuf_oneof:"start_qualifier"` + // The column qualifier at which to end the range (within 'column_family'). + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndQualifier: + // *ColumnRange_EndQualifierInclusive + // *ColumnRange_EndQualifierExclusive + EndQualifier isColumnRange_EndQualifier `protobuf_oneof:"end_qualifier"` +} + +func (m *ColumnRange) Reset() { *m = ColumnRange{} } +func (m *ColumnRange) String() string { return proto.CompactTextString(m) } +func (*ColumnRange) ProtoMessage() {} +func (*ColumnRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isColumnRange_StartQualifier interface { + isColumnRange_StartQualifier() +} +type isColumnRange_EndQualifier interface { + isColumnRange_EndQualifier() +} + +type ColumnRange_StartQualifierInclusive struct { + StartQualifierInclusive []byte `protobuf:"bytes,2,opt,name=start_qualifier_inclusive,json=startQualifierInclusive,proto3,oneof"` +} +type ColumnRange_StartQualifierExclusive struct { + StartQualifierExclusive []byte `protobuf:"bytes,3,opt,name=start_qualifier_exclusive,json=startQualifierExclusive,proto3,oneof"` +} +type ColumnRange_EndQualifierInclusive struct { + EndQualifierInclusive []byte `protobuf:"bytes,4,opt,name=end_qualifier_inclusive,json=endQualifierInclusive,proto3,oneof"` +} +type ColumnRange_EndQualifierExclusive struct { + EndQualifierExclusive []byte `protobuf:"bytes,5,opt,name=end_qualifier_exclusive,json=endQualifierExclusive,proto3,oneof"` +} + +func (*ColumnRange_StartQualifierInclusive) isColumnRange_StartQualifier() {} +func (*ColumnRange_StartQualifierExclusive) isColumnRange_StartQualifier() {} +func (*ColumnRange_EndQualifierInclusive) isColumnRange_EndQualifier() {} +func (*ColumnRange_EndQualifierExclusive) isColumnRange_EndQualifier() {} + +func (m *ColumnRange) GetStartQualifier() isColumnRange_StartQualifier { + if m != nil { + return m.StartQualifier + } + return nil +} +func (m *ColumnRange) GetEndQualifier() isColumnRange_EndQualifier { + if m != nil { + return m.EndQualifier + } + return nil +} + +func (m *ColumnRange) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *ColumnRange) GetStartQualifierInclusive() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierInclusive); ok { + return x.StartQualifierInclusive + } + return nil +} + +func (m *ColumnRange) GetStartQualifierExclusive() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierExclusive); ok { + return x.StartQualifierExclusive + } + return nil +} + +func (m *ColumnRange) GetEndQualifierInclusive() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierInclusive); ok { + return x.EndQualifierInclusive + } + return nil +} + +func (m *ColumnRange) GetEndQualifierExclusive() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierExclusive); ok { + return x.EndQualifierExclusive + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ColumnRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ColumnRange_OneofMarshaler, _ColumnRange_OneofUnmarshaler, _ColumnRange_OneofSizer, []interface{}{ + (*ColumnRange_StartQualifierInclusive)(nil), + (*ColumnRange_StartQualifierExclusive)(nil), + (*ColumnRange_EndQualifierInclusive)(nil), + (*ColumnRange_EndQualifierExclusive)(nil), + } +} + +func _ColumnRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ColumnRange) + // start_qualifier + switch x := m.StartQualifier.(type) { + case *ColumnRange_StartQualifierInclusive: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierInclusive) + case *ColumnRange_StartQualifierExclusive: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierExclusive) + case nil: + default: + return fmt.Errorf("ColumnRange.StartQualifier has unexpected type %T", x) + } + // end_qualifier + switch x := m.EndQualifier.(type) { + case *ColumnRange_EndQualifierInclusive: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierInclusive) + case *ColumnRange_EndQualifierExclusive: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierExclusive) + case nil: + default: + return fmt.Errorf("ColumnRange.EndQualifier has unexpected type %T", x) + } + return nil +} + +func _ColumnRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ColumnRange) + switch tag { + case 2: // start_qualifier.start_qualifier_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierInclusive{x} + return true, err + case 3: // start_qualifier.start_qualifier_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierExclusive{x} + return true, err + case 4: // end_qualifier.end_qualifier_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierInclusive{x} + return true, err + case 5: // end_qualifier.end_qualifier_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierExclusive{x} + return true, err + default: + return false, nil + } +} + +func _ColumnRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ColumnRange) + // start_qualifier + switch x := m.StartQualifier.(type) { + case *ColumnRange_StartQualifierInclusive: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartQualifierInclusive))) + n += len(x.StartQualifierInclusive) + case *ColumnRange_StartQualifierExclusive: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartQualifierExclusive))) + n += len(x.StartQualifierExclusive) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_qualifier + switch x := m.EndQualifier.(type) { + case *ColumnRange_EndQualifierInclusive: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndQualifierInclusive))) + n += len(x.EndQualifierInclusive) + case *ColumnRange_EndQualifierExclusive: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndQualifierExclusive))) + n += len(x.EndQualifierExclusive) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specified a contiguous range of microsecond timestamps. +type TimestampRange struct { + // Inclusive lower bound. If left empty, interpreted as 0. + StartTimestampMicros int64 `protobuf:"varint,1,opt,name=start_timestamp_micros,json=startTimestampMicros" json:"start_timestamp_micros,omitempty"` + // Exclusive upper bound. If left empty, interpreted as infinity. + EndTimestampMicros int64 `protobuf:"varint,2,opt,name=end_timestamp_micros,json=endTimestampMicros" json:"end_timestamp_micros,omitempty"` +} + +func (m *TimestampRange) Reset() { *m = TimestampRange{} } +func (m *TimestampRange) String() string { return proto.CompactTextString(m) } +func (*TimestampRange) ProtoMessage() {} +func (*TimestampRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *TimestampRange) GetStartTimestampMicros() int64 { + if m != nil { + return m.StartTimestampMicros + } + return 0 +} + +func (m *TimestampRange) GetEndTimestampMicros() int64 { + if m != nil { + return m.EndTimestampMicros + } + return 0 +} + +// Specifies a contiguous range of raw byte values. +type ValueRange struct { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartValue: + // *ValueRange_StartValueInclusive + // *ValueRange_StartValueExclusive + StartValue isValueRange_StartValue `protobuf_oneof:"start_value"` + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndValue: + // *ValueRange_EndValueInclusive + // *ValueRange_EndValueExclusive + EndValue isValueRange_EndValue `protobuf_oneof:"end_value"` +} + +func (m *ValueRange) Reset() { *m = ValueRange{} } +func (m *ValueRange) String() string { return proto.CompactTextString(m) } +func (*ValueRange) ProtoMessage() {} +func (*ValueRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type isValueRange_StartValue interface { + isValueRange_StartValue() +} +type isValueRange_EndValue interface { + isValueRange_EndValue() +} + +type ValueRange_StartValueInclusive struct { + StartValueInclusive []byte `protobuf:"bytes,1,opt,name=start_value_inclusive,json=startValueInclusive,proto3,oneof"` +} +type ValueRange_StartValueExclusive struct { + StartValueExclusive []byte `protobuf:"bytes,2,opt,name=start_value_exclusive,json=startValueExclusive,proto3,oneof"` +} +type ValueRange_EndValueInclusive struct { + EndValueInclusive []byte `protobuf:"bytes,3,opt,name=end_value_inclusive,json=endValueInclusive,proto3,oneof"` +} +type ValueRange_EndValueExclusive struct { + EndValueExclusive []byte `protobuf:"bytes,4,opt,name=end_value_exclusive,json=endValueExclusive,proto3,oneof"` +} + +func (*ValueRange_StartValueInclusive) isValueRange_StartValue() {} +func (*ValueRange_StartValueExclusive) isValueRange_StartValue() {} +func (*ValueRange_EndValueInclusive) isValueRange_EndValue() {} +func (*ValueRange_EndValueExclusive) isValueRange_EndValue() {} + +func (m *ValueRange) GetStartValue() isValueRange_StartValue { + if m != nil { + return m.StartValue + } + return nil +} +func (m *ValueRange) GetEndValue() isValueRange_EndValue { + if m != nil { + return m.EndValue + } + return nil +} + +func (m *ValueRange) GetStartValueInclusive() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueInclusive); ok { + return x.StartValueInclusive + } + return nil +} + +func (m *ValueRange) GetStartValueExclusive() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueExclusive); ok { + return x.StartValueExclusive + } + return nil +} + +func (m *ValueRange) GetEndValueInclusive() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueInclusive); ok { + return x.EndValueInclusive + } + return nil +} + +func (m *ValueRange) GetEndValueExclusive() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueExclusive); ok { + return x.EndValueExclusive + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ValueRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ValueRange_OneofMarshaler, _ValueRange_OneofUnmarshaler, _ValueRange_OneofSizer, []interface{}{ + (*ValueRange_StartValueInclusive)(nil), + (*ValueRange_StartValueExclusive)(nil), + (*ValueRange_EndValueInclusive)(nil), + (*ValueRange_EndValueExclusive)(nil), + } +} + +func _ValueRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ValueRange) + // start_value + switch x := m.StartValue.(type) { + case *ValueRange_StartValueInclusive: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueInclusive) + case *ValueRange_StartValueExclusive: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueExclusive) + case nil: + default: + return fmt.Errorf("ValueRange.StartValue has unexpected type %T", x) + } + // end_value + switch x := m.EndValue.(type) { + case *ValueRange_EndValueInclusive: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueInclusive) + case *ValueRange_EndValueExclusive: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueExclusive) + case nil: + default: + return fmt.Errorf("ValueRange.EndValue has unexpected type %T", x) + } + return nil +} + +func _ValueRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ValueRange) + switch tag { + case 1: // start_value.start_value_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueInclusive{x} + return true, err + case 2: // start_value.start_value_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueExclusive{x} + return true, err + case 3: // end_value.end_value_inclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueInclusive{x} + return true, err + case 4: // end_value.end_value_exclusive + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueExclusive{x} + return true, err + default: + return false, nil + } +} + +func _ValueRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ValueRange) + // start_value + switch x := m.StartValue.(type) { + case *ValueRange_StartValueInclusive: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartValueInclusive))) + n += len(x.StartValueInclusive) + case *ValueRange_StartValueExclusive: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartValueExclusive))) + n += len(x.StartValueExclusive) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_value + switch x := m.EndValue.(type) { + case *ValueRange_EndValueInclusive: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndValueInclusive))) + n += len(x.EndValueInclusive) + case *ValueRange_EndValueExclusive: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndValueExclusive))) + n += len(x.EndValueExclusive) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the "value_regex_filter", +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that RE2(.) is equivalent by default to +// RE2([^\n]), meaning that it does not match newlines. When attempting to match +// an arbitrary byte, you should therefore use the escape sequence '\C', which +// may need to be further escaped as '\\C' in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the "strip_value_transformer", which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +type RowFilter struct { + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + // + // Types that are valid to be assigned to Filter: + // *RowFilter_Chain_ + // *RowFilter_Interleave_ + // *RowFilter_Condition_ + // *RowFilter_Sink + // *RowFilter_PassAllFilter + // *RowFilter_BlockAllFilter + // *RowFilter_RowKeyRegexFilter + // *RowFilter_RowSampleFilter + // *RowFilter_FamilyNameRegexFilter + // *RowFilter_ColumnQualifierRegexFilter + // *RowFilter_ColumnRangeFilter + // *RowFilter_TimestampRangeFilter + // *RowFilter_ValueRegexFilter + // *RowFilter_ValueRangeFilter + // *RowFilter_CellsPerRowOffsetFilter + // *RowFilter_CellsPerRowLimitFilter + // *RowFilter_CellsPerColumnLimitFilter + // *RowFilter_StripValueTransformer + // *RowFilter_ApplyLabelTransformer + Filter isRowFilter_Filter `protobuf_oneof:"filter"` +} + +func (m *RowFilter) Reset() { *m = RowFilter{} } +func (m *RowFilter) String() string { return proto.CompactTextString(m) } +func (*RowFilter) ProtoMessage() {} +func (*RowFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type isRowFilter_Filter interface { + isRowFilter_Filter() +} + +type RowFilter_Chain_ struct { + Chain *RowFilter_Chain `protobuf:"bytes,1,opt,name=chain,oneof"` +} +type RowFilter_Interleave_ struct { + Interleave *RowFilter_Interleave `protobuf:"bytes,2,opt,name=interleave,oneof"` +} +type RowFilter_Condition_ struct { + Condition *RowFilter_Condition `protobuf:"bytes,3,opt,name=condition,oneof"` +} +type RowFilter_Sink struct { + Sink bool `protobuf:"varint,16,opt,name=sink,oneof"` +} +type RowFilter_PassAllFilter struct { + PassAllFilter bool `protobuf:"varint,17,opt,name=pass_all_filter,json=passAllFilter,oneof"` +} +type RowFilter_BlockAllFilter struct { + BlockAllFilter bool `protobuf:"varint,18,opt,name=block_all_filter,json=blockAllFilter,oneof"` +} +type RowFilter_RowKeyRegexFilter struct { + RowKeyRegexFilter []byte `protobuf:"bytes,4,opt,name=row_key_regex_filter,json=rowKeyRegexFilter,proto3,oneof"` +} +type RowFilter_RowSampleFilter struct { + RowSampleFilter float64 `protobuf:"fixed64,14,opt,name=row_sample_filter,json=rowSampleFilter,oneof"` +} +type RowFilter_FamilyNameRegexFilter struct { + FamilyNameRegexFilter string `protobuf:"bytes,5,opt,name=family_name_regex_filter,json=familyNameRegexFilter,oneof"` +} +type RowFilter_ColumnQualifierRegexFilter struct { + ColumnQualifierRegexFilter []byte `protobuf:"bytes,6,opt,name=column_qualifier_regex_filter,json=columnQualifierRegexFilter,proto3,oneof"` +} +type RowFilter_ColumnRangeFilter struct { + ColumnRangeFilter *ColumnRange `protobuf:"bytes,7,opt,name=column_range_filter,json=columnRangeFilter,oneof"` +} +type RowFilter_TimestampRangeFilter struct { + TimestampRangeFilter *TimestampRange `protobuf:"bytes,8,opt,name=timestamp_range_filter,json=timestampRangeFilter,oneof"` +} +type RowFilter_ValueRegexFilter struct { + ValueRegexFilter []byte `protobuf:"bytes,9,opt,name=value_regex_filter,json=valueRegexFilter,proto3,oneof"` +} +type RowFilter_ValueRangeFilter struct { + ValueRangeFilter *ValueRange `protobuf:"bytes,15,opt,name=value_range_filter,json=valueRangeFilter,oneof"` +} +type RowFilter_CellsPerRowOffsetFilter struct { + CellsPerRowOffsetFilter int32 `protobuf:"varint,10,opt,name=cells_per_row_offset_filter,json=cellsPerRowOffsetFilter,oneof"` +} +type RowFilter_CellsPerRowLimitFilter struct { + CellsPerRowLimitFilter int32 `protobuf:"varint,11,opt,name=cells_per_row_limit_filter,json=cellsPerRowLimitFilter,oneof"` +} +type RowFilter_CellsPerColumnLimitFilter struct { + CellsPerColumnLimitFilter int32 `protobuf:"varint,12,opt,name=cells_per_column_limit_filter,json=cellsPerColumnLimitFilter,oneof"` +} +type RowFilter_StripValueTransformer struct { + StripValueTransformer bool `protobuf:"varint,13,opt,name=strip_value_transformer,json=stripValueTransformer,oneof"` +} +type RowFilter_ApplyLabelTransformer struct { + ApplyLabelTransformer string `protobuf:"bytes,19,opt,name=apply_label_transformer,json=applyLabelTransformer,oneof"` +} + +func (*RowFilter_Chain_) isRowFilter_Filter() {} +func (*RowFilter_Interleave_) isRowFilter_Filter() {} +func (*RowFilter_Condition_) isRowFilter_Filter() {} +func (*RowFilter_Sink) isRowFilter_Filter() {} +func (*RowFilter_PassAllFilter) isRowFilter_Filter() {} +func (*RowFilter_BlockAllFilter) isRowFilter_Filter() {} +func (*RowFilter_RowKeyRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_RowSampleFilter) isRowFilter_Filter() {} +func (*RowFilter_FamilyNameRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnQualifierRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_TimestampRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowOffsetFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerColumnLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_StripValueTransformer) isRowFilter_Filter() {} +func (*RowFilter_ApplyLabelTransformer) isRowFilter_Filter() {} + +func (m *RowFilter) GetFilter() isRowFilter_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *RowFilter) GetChain() *RowFilter_Chain { + if x, ok := m.GetFilter().(*RowFilter_Chain_); ok { + return x.Chain + } + return nil +} + +func (m *RowFilter) GetInterleave() *RowFilter_Interleave { + if x, ok := m.GetFilter().(*RowFilter_Interleave_); ok { + return x.Interleave + } + return nil +} + +func (m *RowFilter) GetCondition() *RowFilter_Condition { + if x, ok := m.GetFilter().(*RowFilter_Condition_); ok { + return x.Condition + } + return nil +} + +func (m *RowFilter) GetSink() bool { + if x, ok := m.GetFilter().(*RowFilter_Sink); ok { + return x.Sink + } + return false +} + +func (m *RowFilter) GetPassAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_PassAllFilter); ok { + return x.PassAllFilter + } + return false +} + +func (m *RowFilter) GetBlockAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_BlockAllFilter); ok { + return x.BlockAllFilter + } + return false +} + +func (m *RowFilter) GetRowKeyRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_RowKeyRegexFilter); ok { + return x.RowKeyRegexFilter + } + return nil +} + +func (m *RowFilter) GetRowSampleFilter() float64 { + if x, ok := m.GetFilter().(*RowFilter_RowSampleFilter); ok { + return x.RowSampleFilter + } + return 0 +} + +func (m *RowFilter) GetFamilyNameRegexFilter() string { + if x, ok := m.GetFilter().(*RowFilter_FamilyNameRegexFilter); ok { + return x.FamilyNameRegexFilter + } + return "" +} + +func (m *RowFilter) GetColumnQualifierRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ColumnQualifierRegexFilter); ok { + return x.ColumnQualifierRegexFilter + } + return nil +} + +func (m *RowFilter) GetColumnRangeFilter() *ColumnRange { + if x, ok := m.GetFilter().(*RowFilter_ColumnRangeFilter); ok { + return x.ColumnRangeFilter + } + return nil +} + +func (m *RowFilter) GetTimestampRangeFilter() *TimestampRange { + if x, ok := m.GetFilter().(*RowFilter_TimestampRangeFilter); ok { + return x.TimestampRangeFilter + } + return nil +} + +func (m *RowFilter) GetValueRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ValueRegexFilter); ok { + return x.ValueRegexFilter + } + return nil +} + +func (m *RowFilter) GetValueRangeFilter() *ValueRange { + if x, ok := m.GetFilter().(*RowFilter_ValueRangeFilter); ok { + return x.ValueRangeFilter + } + return nil +} + +func (m *RowFilter) GetCellsPerRowOffsetFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowOffsetFilter); ok { + return x.CellsPerRowOffsetFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerRowLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowLimitFilter); ok { + return x.CellsPerRowLimitFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerColumnLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerColumnLimitFilter); ok { + return x.CellsPerColumnLimitFilter + } + return 0 +} + +func (m *RowFilter) GetStripValueTransformer() bool { + if x, ok := m.GetFilter().(*RowFilter_StripValueTransformer); ok { + return x.StripValueTransformer + } + return false +} + +func (m *RowFilter) GetApplyLabelTransformer() string { + if x, ok := m.GetFilter().(*RowFilter_ApplyLabelTransformer); ok { + return x.ApplyLabelTransformer + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RowFilter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RowFilter_OneofMarshaler, _RowFilter_OneofUnmarshaler, _RowFilter_OneofSizer, []interface{}{ + (*RowFilter_Chain_)(nil), + (*RowFilter_Interleave_)(nil), + (*RowFilter_Condition_)(nil), + (*RowFilter_Sink)(nil), + (*RowFilter_PassAllFilter)(nil), + (*RowFilter_BlockAllFilter)(nil), + (*RowFilter_RowKeyRegexFilter)(nil), + (*RowFilter_RowSampleFilter)(nil), + (*RowFilter_FamilyNameRegexFilter)(nil), + (*RowFilter_ColumnQualifierRegexFilter)(nil), + (*RowFilter_ColumnRangeFilter)(nil), + (*RowFilter_TimestampRangeFilter)(nil), + (*RowFilter_ValueRegexFilter)(nil), + (*RowFilter_ValueRangeFilter)(nil), + (*RowFilter_CellsPerRowOffsetFilter)(nil), + (*RowFilter_CellsPerRowLimitFilter)(nil), + (*RowFilter_CellsPerColumnLimitFilter)(nil), + (*RowFilter_StripValueTransformer)(nil), + (*RowFilter_ApplyLabelTransformer)(nil), + } +} + +func _RowFilter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RowFilter) + // filter + switch x := m.Filter.(type) { + case *RowFilter_Chain_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Chain); err != nil { + return err + } + case *RowFilter_Interleave_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Interleave); err != nil { + return err + } + case *RowFilter_Condition_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Condition); err != nil { + return err + } + case *RowFilter_Sink: + t := uint64(0) + if x.Sink { + t = 1 + } + b.EncodeVarint(16<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_PassAllFilter: + t := uint64(0) + if x.PassAllFilter { + t = 1 + } + b.EncodeVarint(17<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_BlockAllFilter: + t := uint64(0) + if x.BlockAllFilter { + t = 1 + } + b.EncodeVarint(18<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_RowKeyRegexFilter: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKeyRegexFilter) + case *RowFilter_RowSampleFilter: + b.EncodeVarint(14<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.RowSampleFilter)) + case *RowFilter_FamilyNameRegexFilter: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FamilyNameRegexFilter) + case *RowFilter_ColumnQualifierRegexFilter: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ColumnQualifierRegexFilter) + case *RowFilter_ColumnRangeFilter: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ColumnRangeFilter); err != nil { + return err + } + case *RowFilter_TimestampRangeFilter: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampRangeFilter); err != nil { + return err + } + case *RowFilter_ValueRegexFilter: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ValueRegexFilter) + case *RowFilter_ValueRangeFilter: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ValueRangeFilter); err != nil { + return err + } + case *RowFilter_CellsPerRowOffsetFilter: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowOffsetFilter)) + case *RowFilter_CellsPerRowLimitFilter: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowLimitFilter)) + case *RowFilter_CellsPerColumnLimitFilter: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerColumnLimitFilter)) + case *RowFilter_StripValueTransformer: + t := uint64(0) + if x.StripValueTransformer { + t = 1 + } + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_ApplyLabelTransformer: + b.EncodeVarint(19<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ApplyLabelTransformer) + case nil: + default: + return fmt.Errorf("RowFilter.Filter has unexpected type %T", x) + } + return nil +} + +func _RowFilter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RowFilter) + switch tag { + case 1: // filter.chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Chain) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Chain_{msg} + return true, err + case 2: // filter.interleave + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Interleave) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Interleave_{msg} + return true, err + case 3: // filter.condition + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Condition) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Condition_{msg} + return true, err + case 16: // filter.sink + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_Sink{x != 0} + return true, err + case 17: // filter.pass_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_PassAllFilter{x != 0} + return true, err + case 18: // filter.block_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_BlockAllFilter{x != 0} + return true, err + case 4: // filter.row_key_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_RowKeyRegexFilter{x} + return true, err + case 14: // filter.row_sample_filter + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Filter = &RowFilter_RowSampleFilter{math.Float64frombits(x)} + return true, err + case 5: // filter.family_name_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_FamilyNameRegexFilter{x} + return true, err + case 6: // filter.column_qualifier_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ColumnQualifierRegexFilter{x} + return true, err + case 7: // filter.column_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ColumnRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ColumnRangeFilter{msg} + return true, err + case 8: // filter.timestamp_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TimestampRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_TimestampRangeFilter{msg} + return true, err + case 9: // filter.value_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ValueRegexFilter{x} + return true, err + case 15: // filter.value_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ValueRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ValueRangeFilter{msg} + return true, err + case 10: // filter.cells_per_row_offset_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowOffsetFilter{int32(x)} + return true, err + case 11: // filter.cells_per_row_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowLimitFilter{int32(x)} + return true, err + case 12: // filter.cells_per_column_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerColumnLimitFilter{int32(x)} + return true, err + case 13: // filter.strip_value_transformer + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_StripValueTransformer{x != 0} + return true, err + case 19: // filter.apply_label_transformer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_ApplyLabelTransformer{x} + return true, err + default: + return false, nil + } +} + +func _RowFilter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RowFilter) + // filter + switch x := m.Filter.(type) { + case *RowFilter_Chain_: + s := proto.Size(x.Chain) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Interleave_: + s := proto.Size(x.Interleave) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Condition_: + s := proto.Size(x.Condition) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Sink: + n += proto.SizeVarint(16<<3 | proto.WireVarint) + n += 1 + case *RowFilter_PassAllFilter: + n += proto.SizeVarint(17<<3 | proto.WireVarint) + n += 1 + case *RowFilter_BlockAllFilter: + n += proto.SizeVarint(18<<3 | proto.WireVarint) + n += 1 + case *RowFilter_RowKeyRegexFilter: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RowKeyRegexFilter))) + n += len(x.RowKeyRegexFilter) + case *RowFilter_RowSampleFilter: + n += proto.SizeVarint(14<<3 | proto.WireFixed64) + n += 8 + case *RowFilter_FamilyNameRegexFilter: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FamilyNameRegexFilter))) + n += len(x.FamilyNameRegexFilter) + case *RowFilter_ColumnQualifierRegexFilter: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ColumnQualifierRegexFilter))) + n += len(x.ColumnQualifierRegexFilter) + case *RowFilter_ColumnRangeFilter: + s := proto.Size(x.ColumnRangeFilter) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_TimestampRangeFilter: + s := proto.Size(x.TimestampRangeFilter) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_ValueRegexFilter: + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ValueRegexFilter))) + n += len(x.ValueRegexFilter) + case *RowFilter_ValueRangeFilter: + s := proto.Size(x.ValueRangeFilter) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_CellsPerRowOffsetFilter: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerRowOffsetFilter)) + case *RowFilter_CellsPerRowLimitFilter: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerRowLimitFilter)) + case *RowFilter_CellsPerColumnLimitFilter: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerColumnLimitFilter)) + case *RowFilter_StripValueTransformer: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += 1 + case *RowFilter_ApplyLabelTransformer: + n += proto.SizeVarint(19<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ApplyLabelTransformer))) + n += len(x.ApplyLabelTransformer) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A RowFilter which sends rows through several RowFilters in sequence. +type RowFilter_Chain struct { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Chain) Reset() { *m = RowFilter_Chain{} } +func (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Chain) ProtoMessage() {} +func (*RowFilter_Chain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +func (m *RowFilter_Chain) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which sends each row to each of several component +// RowFilters and interleaves the results. +type RowFilter_Interleave struct { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // All interleaved filters are executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Interleave) Reset() { *m = RowFilter_Interleave{} } +func (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Interleave) ProtoMessage() {} +func (*RowFilter_Interleave) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 1} } + +func (m *RowFilter_Interleave) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which evaluates one of two possible RowFilters, depending on +// whether or not a predicate RowFilter outputs any cells from the input row. +// +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, Condition filters have poor performance, especially +// when filters are set for the false condition. +type RowFilter_Condition struct { + // If "predicate_filter" outputs any cells, then "true_filter" will be + // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter,json=predicateFilter" json:"predicate_filter,omitempty"` + // The filter to apply to the input row if "predicate_filter" returns any + // results. If not provided, no results will be returned in the true case. + TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter,json=trueFilter" json:"true_filter,omitempty"` + // The filter to apply to the input row if "predicate_filter" does not + // return any results. If not provided, no results will be returned in the + // false case. + FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter,json=falseFilter" json:"false_filter,omitempty"` +} + +func (m *RowFilter_Condition) Reset() { *m = RowFilter_Condition{} } +func (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Condition) ProtoMessage() {} +func (*RowFilter_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 2} } + +func (m *RowFilter_Condition) GetPredicateFilter() *RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *RowFilter_Condition) GetTrueFilter() *RowFilter { + if m != nil { + return m.TrueFilter + } + return nil +} + +func (m *RowFilter_Condition) GetFalseFilter() *RowFilter { + if m != nil { + return m.FalseFilter + } + return nil +} + +// Specifies a particular change to be made to the contents of a row. +type Mutation struct { + // Which of the possible Mutation types to apply. + // + // Types that are valid to be assigned to Mutation: + // *Mutation_SetCell_ + // *Mutation_DeleteFromColumn_ + // *Mutation_DeleteFromFamily_ + // *Mutation_DeleteFromRow_ + Mutation isMutation_Mutation `protobuf_oneof:"mutation"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isMutation_Mutation interface { + isMutation_Mutation() +} + +type Mutation_SetCell_ struct { + SetCell *Mutation_SetCell `protobuf:"bytes,1,opt,name=set_cell,json=setCell,oneof"` +} +type Mutation_DeleteFromColumn_ struct { + DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column,json=deleteFromColumn,oneof"` +} +type Mutation_DeleteFromFamily_ struct { + DeleteFromFamily *Mutation_DeleteFromFamily `protobuf:"bytes,3,opt,name=delete_from_family,json=deleteFromFamily,oneof"` +} +type Mutation_DeleteFromRow_ struct { + DeleteFromRow *Mutation_DeleteFromRow `protobuf:"bytes,4,opt,name=delete_from_row,json=deleteFromRow,oneof"` +} + +func (*Mutation_SetCell_) isMutation_Mutation() {} +func (*Mutation_DeleteFromColumn_) isMutation_Mutation() {} +func (*Mutation_DeleteFromFamily_) isMutation_Mutation() {} +func (*Mutation_DeleteFromRow_) isMutation_Mutation() {} + +func (m *Mutation) GetMutation() isMutation_Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *Mutation) GetSetCell() *Mutation_SetCell { + if x, ok := m.GetMutation().(*Mutation_SetCell_); ok { + return x.SetCell + } + return nil +} + +func (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { + if x, ok := m.GetMutation().(*Mutation_DeleteFromColumn_); ok { + return x.DeleteFromColumn + } + return nil +} + +func (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily { + if x, ok := m.GetMutation().(*Mutation_DeleteFromFamily_); ok { + return x.DeleteFromFamily + } + return nil +} + +func (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow { + if x, ok := m.GetMutation().(*Mutation_DeleteFromRow_); ok { + return x.DeleteFromRow + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_SetCell_)(nil), + (*Mutation_DeleteFromColumn_)(nil), + (*Mutation_DeleteFromFamily_)(nil), + (*Mutation_DeleteFromRow_)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // mutation + switch x := m.Mutation.(type) { + case *Mutation_SetCell_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetCell); err != nil { + return err + } + case *Mutation_DeleteFromColumn_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromColumn); err != nil { + return err + } + case *Mutation_DeleteFromFamily_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromFamily); err != nil { + return err + } + case *Mutation_DeleteFromRow_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromRow); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Mutation has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 1: // mutation.set_cell + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_SetCell) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_SetCell_{msg} + return true, err + case 2: // mutation.delete_from_column + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromColumn) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromColumn_{msg} + return true, err + case 3: // mutation.delete_from_family + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromFamily) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromFamily_{msg} + return true, err + case 4: // mutation.delete_from_row + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromRow) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromRow_{msg} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // mutation + switch x := m.Mutation.(type) { + case *Mutation_SetCell_: + s := proto.Size(x.SetCell) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromColumn_: + s := proto.Size(x.DeleteFromColumn) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromFamily_: + s := proto.Size(x.DeleteFromFamily) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromRow_: + s := proto.Size(x.DeleteFromRow) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Mutation which sets the value of the specified cell. +type Mutation_SetCell struct { + // The name of the family into which new data should be written. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the "granularity" of the table (e.g. micros, millis). + TimestampMicros int64 `protobuf:"varint,3,opt,name=timestamp_micros,json=timestampMicros" json:"timestamp_micros,omitempty"` + // The value to be written into the specified cell. + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Mutation_SetCell) Reset() { *m = Mutation_SetCell{} } +func (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) } +func (*Mutation_SetCell) ProtoMessage() {} +func (*Mutation_SetCell) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } + +func (m *Mutation_SetCell) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *Mutation_SetCell) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *Mutation_SetCell) GetTimestampMicros() int64 { + if m != nil { + return m.TimestampMicros + } + return 0 +} + +func (m *Mutation_SetCell) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// A Mutation which deletes cells from the specified column, optionally +// restricting the deletions to a given timestamp range. +type Mutation_DeleteFromColumn struct { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The range of timestamps within which cells should be deleted. + TimeRange *TimestampRange `protobuf:"bytes,3,opt,name=time_range,json=timeRange" json:"time_range,omitempty"` +} + +func (m *Mutation_DeleteFromColumn) Reset() { *m = Mutation_DeleteFromColumn{} } +func (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromColumn) ProtoMessage() {} +func (*Mutation_DeleteFromColumn) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 1} } + +func (m *Mutation_DeleteFromColumn) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *Mutation_DeleteFromColumn) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange { + if m != nil { + return m.TimeRange + } + return nil +} + +// A Mutation which deletes all cells from the specified column family. +type Mutation_DeleteFromFamily struct { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` +} + +func (m *Mutation_DeleteFromFamily) Reset() { *m = Mutation_DeleteFromFamily{} } +func (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromFamily) ProtoMessage() {} +func (*Mutation_DeleteFromFamily) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 2} } + +func (m *Mutation_DeleteFromFamily) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +// A Mutation which deletes all cells from the containing row. +type Mutation_DeleteFromRow struct { +} + +func (m *Mutation_DeleteFromRow) Reset() { *m = Mutation_DeleteFromRow{} } +func (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromRow) ProtoMessage() {} +func (*Mutation_DeleteFromRow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 3} } + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +type ReadModifyWriteRule struct { + // The name of the family to which the read/modify/write should be applied. + // Must match [-_.a-zA-Z0-9]+ + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The rule used to determine the column's new latest value from its current + // latest value. + // + // Types that are valid to be assigned to Rule: + // *ReadModifyWriteRule_AppendValue + // *ReadModifyWriteRule_IncrementAmount + Rule isReadModifyWriteRule_Rule `protobuf_oneof:"rule"` +} + +func (m *ReadModifyWriteRule) Reset() { *m = ReadModifyWriteRule{} } +func (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRule) ProtoMessage() {} +func (*ReadModifyWriteRule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +type isReadModifyWriteRule_Rule interface { + isReadModifyWriteRule_Rule() +} + +type ReadModifyWriteRule_AppendValue struct { + AppendValue []byte `protobuf:"bytes,3,opt,name=append_value,json=appendValue,proto3,oneof"` +} +type ReadModifyWriteRule_IncrementAmount struct { + IncrementAmount int64 `protobuf:"varint,4,opt,name=increment_amount,json=incrementAmount,oneof"` +} + +func (*ReadModifyWriteRule_AppendValue) isReadModifyWriteRule_Rule() {} +func (*ReadModifyWriteRule_IncrementAmount) isReadModifyWriteRule_Rule() {} + +func (m *ReadModifyWriteRule) GetRule() isReadModifyWriteRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *ReadModifyWriteRule) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *ReadModifyWriteRule) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *ReadModifyWriteRule) GetAppendValue() []byte { + if x, ok := m.GetRule().(*ReadModifyWriteRule_AppendValue); ok { + return x.AppendValue + } + return nil +} + +func (m *ReadModifyWriteRule) GetIncrementAmount() int64 { + if x, ok := m.GetRule().(*ReadModifyWriteRule_IncrementAmount); ok { + return x.IncrementAmount + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadModifyWriteRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadModifyWriteRule_OneofMarshaler, _ReadModifyWriteRule_OneofUnmarshaler, _ReadModifyWriteRule_OneofSizer, []interface{}{ + (*ReadModifyWriteRule_AppendValue)(nil), + (*ReadModifyWriteRule_IncrementAmount)(nil), + } +} + +func _ReadModifyWriteRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadModifyWriteRule) + // rule + switch x := m.Rule.(type) { + case *ReadModifyWriteRule_AppendValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AppendValue) + case *ReadModifyWriteRule_IncrementAmount: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IncrementAmount)) + case nil: + default: + return fmt.Errorf("ReadModifyWriteRule.Rule has unexpected type %T", x) + } + return nil +} + +func _ReadModifyWriteRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadModifyWriteRule) + switch tag { + case 3: // rule.append_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Rule = &ReadModifyWriteRule_AppendValue{x} + return true, err + case 4: // rule.increment_amount + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &ReadModifyWriteRule_IncrementAmount{int64(x)} + return true, err + default: + return false, nil + } +} + +func _ReadModifyWriteRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadModifyWriteRule) + // rule + switch x := m.Rule.(type) { + case *ReadModifyWriteRule_AppendValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AppendValue))) + n += len(x.AppendValue) + case *ReadModifyWriteRule_IncrementAmount: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IncrementAmount)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Row)(nil), "google.bigtable.v1.Row") + proto.RegisterType((*Family)(nil), "google.bigtable.v1.Family") + proto.RegisterType((*Column)(nil), "google.bigtable.v1.Column") + proto.RegisterType((*Cell)(nil), "google.bigtable.v1.Cell") + proto.RegisterType((*RowRange)(nil), "google.bigtable.v1.RowRange") + proto.RegisterType((*RowSet)(nil), "google.bigtable.v1.RowSet") + proto.RegisterType((*ColumnRange)(nil), "google.bigtable.v1.ColumnRange") + proto.RegisterType((*TimestampRange)(nil), "google.bigtable.v1.TimestampRange") + proto.RegisterType((*ValueRange)(nil), "google.bigtable.v1.ValueRange") + proto.RegisterType((*RowFilter)(nil), "google.bigtable.v1.RowFilter") + proto.RegisterType((*RowFilter_Chain)(nil), "google.bigtable.v1.RowFilter.Chain") + proto.RegisterType((*RowFilter_Interleave)(nil), "google.bigtable.v1.RowFilter.Interleave") + proto.RegisterType((*RowFilter_Condition)(nil), "google.bigtable.v1.RowFilter.Condition") + proto.RegisterType((*Mutation)(nil), "google.bigtable.v1.Mutation") + proto.RegisterType((*Mutation_SetCell)(nil), "google.bigtable.v1.Mutation.SetCell") + proto.RegisterType((*Mutation_DeleteFromColumn)(nil), "google.bigtable.v1.Mutation.DeleteFromColumn") + proto.RegisterType((*Mutation_DeleteFromFamily)(nil), "google.bigtable.v1.Mutation.DeleteFromFamily") + proto.RegisterType((*Mutation_DeleteFromRow)(nil), "google.bigtable.v1.Mutation.DeleteFromRow") + proto.RegisterType((*ReadModifyWriteRule)(nil), "google.bigtable.v1.ReadModifyWriteRule") +} + +func init() { proto.RegisterFile("google/bigtable/v1/bigtable_data.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdb, 0x6e, 0x1b, 0x37, + 0x13, 0xd6, 0x5a, 0xb2, 0x0e, 0xb3, 0x76, 0x24, 0xd3, 0x27, 0x45, 0x49, 0xfe, 0x18, 0xca, 0x8f, + 0x56, 0x49, 0x5b, 0x39, 0x71, 0x82, 0x36, 0x6d, 0x82, 0x22, 0x56, 0x0e, 0x55, 0x9a, 0x33, 0x63, + 0xa4, 0x40, 0x80, 0x62, 0x4b, 0x6b, 0x29, 0x75, 0x11, 0xee, 0x52, 0xe5, 0xae, 0xac, 0xe8, 0x45, + 0x7a, 0xdf, 0xe7, 0xe8, 0x5d, 0x5f, 0xa2, 0xaf, 0xd1, 0xcb, 0x5e, 0xf4, 0xa2, 0xe0, 0x61, 0x4f, + 0xb2, 0x62, 0x1b, 0x45, 0xee, 0x96, 0x9c, 0xef, 0xfb, 0x66, 0x38, 0x1c, 0x0e, 0xb9, 0xf0, 0xc9, + 0x88, 0xf3, 0x11, 0xa3, 0xbb, 0x87, 0xde, 0x28, 0x22, 0x87, 0x8c, 0xee, 0x1e, 0xdd, 0x48, 0xbe, + 0x1d, 0x97, 0x44, 0xa4, 0x3b, 0x16, 0x3c, 0xe2, 0x08, 0x69, 0x5c, 0x37, 0xb6, 0x75, 0x8f, 0x6e, + 0xb4, 0x5f, 0x40, 0x11, 0xf3, 0x29, 0x6a, 0x40, 0xf1, 0x1d, 0x9d, 0x35, 0xad, 0x1d, 0xab, 0xb3, + 0x82, 0xe5, 0x27, 0xfa, 0x12, 0xaa, 0x43, 0xe2, 0x7b, 0xcc, 0xa3, 0x61, 0x73, 0x69, 0xa7, 0xd8, + 0xb1, 0xf7, 0x5a, 0xdd, 0xe3, 0xfc, 0xee, 0x23, 0x89, 0x99, 0xe1, 0x04, 0xdb, 0xc6, 0x50, 0xd6, + 0x73, 0x08, 0x41, 0x29, 0x20, 0x3e, 0x55, 0xa2, 0x35, 0xac, 0xbe, 0xd1, 0x2d, 0xa8, 0x0c, 0x38, + 0x9b, 0xf8, 0xc1, 0x89, 0xa2, 0xf7, 0x15, 0x04, 0xc7, 0xd0, 0xf6, 0x1b, 0x28, 0xeb, 0x29, 0x74, + 0x11, 0x6a, 0xbf, 0x4c, 0x08, 0xf3, 0x86, 0x1e, 0x15, 0x26, 0xda, 0x74, 0x02, 0x75, 0x61, 0x79, + 0x40, 0x19, 0x8b, 0xb5, 0x9b, 0x0b, 0xb5, 0x29, 0x63, 0x58, 0xc3, 0xda, 0x0e, 0x94, 0xe4, 0x10, + 0x5d, 0x85, 0x46, 0xe4, 0xf9, 0x34, 0x8c, 0x88, 0x3f, 0x76, 0x7c, 0x6f, 0x20, 0x78, 0xa8, 0xc4, + 0x8b, 0xb8, 0x9e, 0xcc, 0x3f, 0x53, 0xd3, 0x68, 0x03, 0x96, 0x8f, 0x08, 0x9b, 0xd0, 0xe6, 0x92, + 0x72, 0xae, 0x07, 0x68, 0x0b, 0xca, 0x8c, 0x1c, 0x52, 0x16, 0x36, 0x8b, 0x3b, 0xc5, 0x4e, 0x0d, + 0x9b, 0x51, 0xfb, 0x1e, 0x54, 0x31, 0x9f, 0x62, 0x12, 0x8c, 0x28, 0xba, 0x00, 0xb5, 0x30, 0x22, + 0x22, 0x72, 0x64, 0xa2, 0x35, 0xbb, 0xaa, 0x26, 0x9e, 0xd0, 0x19, 0xda, 0x86, 0x0a, 0x0d, 0x5c, + 0x65, 0x2a, 0x2a, 0x53, 0x99, 0x06, 0xee, 0x13, 0x3a, 0x6b, 0xff, 0x04, 0x65, 0xcc, 0xa7, 0xaf, + 0x69, 0x84, 0xce, 0x43, 0x55, 0xf0, 0xa9, 0x84, 0xc8, 0xe0, 0x8a, 0x9d, 0x15, 0x5c, 0x11, 0x7c, + 0xfa, 0x84, 0xce, 0x42, 0x74, 0x07, 0x40, 0x9a, 0x84, 0xf4, 0x13, 0x2f, 0xfe, 0xe2, 0xa2, 0xc5, + 0xc7, 0xc1, 0xe0, 0x9a, 0x30, 0x5f, 0x61, 0xfb, 0x8f, 0x25, 0xb0, 0x4d, 0xc2, 0x55, 0x9c, 0x97, + 0xc1, 0x56, 0x9b, 0x39, 0x73, 0x32, 0xbb, 0x07, 0x7a, 0xea, 0xb9, 0xdc, 0xc3, 0xbb, 0x70, 0x5e, + 0x2f, 0x24, 0x49, 0xbc, 0xe3, 0x05, 0x03, 0x36, 0x09, 0xbd, 0x23, 0x93, 0x96, 0x7e, 0x01, 0x6f, + 0x2b, 0xc8, 0xab, 0x18, 0xf1, 0x38, 0x06, 0x2c, 0x62, 0xd3, 0xf7, 0x31, 0xbb, 0xb8, 0x98, 0xfd, + 0x30, 0x06, 0xa0, 0xdb, 0xb0, 0x2d, 0xf3, 0xb4, 0xc8, 0x73, 0x49, 0x71, 0x2d, 0xbc, 0x49, 0x03, + 0x77, 0x81, 0xdf, 0x63, 0xcc, 0xd4, 0xeb, 0xf2, 0x22, 0x66, 0xe2, 0xb3, 0xb7, 0x06, 0xf5, 0xb9, + 0x88, 0x7b, 0x75, 0x58, 0xcd, 0x89, 0xb5, 0xdf, 0xc3, 0xb9, 0x83, 0xb8, 0x52, 0x74, 0x1a, 0x6f, + 0xc1, 0x96, 0x66, 0x7d, 0xa0, 0xb2, 0x36, 0x94, 0xf5, 0x60, 0xae, 0xbc, 0xae, 0xc3, 0x86, 0x14, + 0x3e, 0xc6, 0x59, 0x52, 0x1c, 0x44, 0x03, 0x77, 0x8e, 0xd1, 0xfe, 0xdb, 0x02, 0x78, 0x23, 0x8b, + 0x30, 0x76, 0xbb, 0xa9, 0xdd, 0xaa, 0xc2, 0xcc, 0xa4, 0xc7, 0x32, 0xa9, 0x5d, 0x57, 0x66, 0xc5, + 0x48, 0x93, 0x33, 0xc7, 0x4a, 0x53, 0xb3, 0x74, 0x9c, 0x95, 0x6e, 0xc6, 0x75, 0x58, 0x97, 0xc1, + 0xce, 0x7b, 0x2a, 0x9a, 0x74, 0xae, 0xd1, 0xc0, 0x9d, 0xf3, 0x93, 0x63, 0xa4, 0x5e, 0x4a, 0xf3, + 0x8c, 0x34, 0xf9, 0xab, 0x60, 0x67, 0x22, 0xeb, 0xd9, 0x50, 0x4b, 0x04, 0xda, 0xff, 0xd8, 0x50, + 0xc3, 0x7c, 0xfa, 0xc8, 0x63, 0x11, 0x15, 0xe8, 0x0e, 0x2c, 0x0f, 0x7e, 0x26, 0x5e, 0xa0, 0x56, + 0x6a, 0xef, 0x5d, 0xf9, 0x40, 0xfd, 0x6b, 0x74, 0xf7, 0xbe, 0x84, 0xf6, 0x0b, 0x58, 0x73, 0xd0, + 0xf7, 0x00, 0x5e, 0x10, 0x51, 0xc1, 0x28, 0x31, 0xab, 0xb6, 0xf7, 0x3a, 0x27, 0x2b, 0x3c, 0x4e, + 0xf0, 0xfd, 0x02, 0xce, 0xb0, 0xd1, 0x77, 0x50, 0x1b, 0xf0, 0xc0, 0xf5, 0x22, 0x8f, 0x07, 0x2a, + 0x19, 0xf6, 0xde, 0xa7, 0xa7, 0x04, 0x13, 0xc3, 0xfb, 0x05, 0x9c, 0x72, 0xd1, 0x06, 0x94, 0x42, + 0x2f, 0x78, 0xd7, 0x6c, 0xec, 0x58, 0x9d, 0x6a, 0xbf, 0x80, 0xd5, 0x08, 0x75, 0xa0, 0x3e, 0x26, + 0x61, 0xe8, 0x10, 0xc6, 0x9c, 0xa1, 0xe2, 0x37, 0xd7, 0x0c, 0x60, 0x55, 0x1a, 0xf6, 0x19, 0x33, + 0x19, 0xb9, 0x06, 0x8d, 0x43, 0xc6, 0x07, 0xef, 0xb2, 0x50, 0x64, 0xa0, 0xe7, 0x94, 0x25, 0xc5, + 0xde, 0x80, 0x0d, 0xd3, 0x5d, 0x1c, 0x41, 0x47, 0xf4, 0x7d, 0x8c, 0x2f, 0x99, 0x02, 0x58, 0xd3, + 0xbd, 0x06, 0x4b, 0x9b, 0xa1, 0x7c, 0x0e, 0x72, 0xd2, 0x09, 0x89, 0x3f, 0x66, 0x34, 0xc6, 0x9f, + 0xdb, 0xb1, 0x3a, 0x56, 0xbf, 0x80, 0xeb, 0x82, 0x4f, 0x5f, 0x2b, 0x8b, 0x41, 0x7f, 0x0d, 0xcd, + 0x4c, 0x5b, 0xc9, 0x3b, 0x91, 0x07, 0xb0, 0xd6, 0x2f, 0xe0, 0xcd, 0xb4, 0xcb, 0x64, 0x1d, 0xdd, + 0x87, 0x4b, 0xfa, 0x26, 0xc8, 0x9c, 0xde, 0x1c, 0xbf, 0x6c, 0x82, 0x6c, 0x69, 0x58, 0x72, 0x86, + 0xb3, 0x22, 0xaf, 0x60, 0xdd, 0x88, 0xa8, 0x36, 0x19, 0x53, 0x2b, 0x6a, 0x7f, 0x2e, 0x9f, 0x70, + 0x0b, 0x49, 0xb4, 0x4c, 0xc0, 0x20, 0x1d, 0x1a, 0xc9, 0xb7, 0xb0, 0x95, 0x1e, 0xd4, 0x9c, 0x6a, + 0x55, 0xa9, 0xb6, 0x17, 0xa9, 0xe6, 0xdb, 0x44, 0xbf, 0x80, 0x37, 0xa2, 0xdc, 0x8c, 0xd1, 0xee, + 0x02, 0xd2, 0xa7, 0x24, 0xb7, 0xd0, 0x9a, 0x59, 0x68, 0x43, 0xd9, 0xb2, 0xcb, 0x7b, 0x9e, 0xe0, + 0xb3, 0x71, 0xd4, 0x55, 0x1c, 0xff, 0x5b, 0x14, 0x47, 0xda, 0x33, 0x52, 0xbd, 0x8c, 0xff, 0x6f, + 0xe1, 0x82, 0xba, 0x23, 0x9d, 0xb1, 0x4c, 0x36, 0x9f, 0x3a, 0x7c, 0x38, 0x0c, 0x69, 0x14, 0x0b, + 0xc3, 0x8e, 0xd5, 0x59, 0x96, 0x8d, 0x5a, 0x81, 0x5e, 0x52, 0x81, 0xf9, 0xf4, 0x85, 0x42, 0x18, + 0xfe, 0x5d, 0x68, 0xe5, 0xf9, 0xcc, 0xf3, 0xbd, 0x84, 0x6e, 0x1b, 0xfa, 0x56, 0x86, 0xfe, 0x54, + 0x02, 0x0c, 0xbb, 0x07, 0x97, 0x52, 0xb6, 0xd9, 0xb6, 0x9c, 0xc0, 0x8a, 0x11, 0x38, 0x1f, 0x0b, + 0xe8, 0xcd, 0xca, 0x6a, 0xdc, 0x86, 0xed, 0x30, 0x12, 0xde, 0xd8, 0x74, 0x9b, 0x48, 0x90, 0x20, + 0x1c, 0x72, 0xe1, 0x53, 0xd1, 0x5c, 0x35, 0x87, 0x60, 0x53, 0x01, 0x54, 0x26, 0x0e, 0x52, 0xb3, + 0x64, 0x92, 0xf1, 0x98, 0xcd, 0x1c, 0x75, 0x8b, 0xe7, 0x98, 0xeb, 0x71, 0xa5, 0x2a, 0xc0, 0x53, + 0x69, 0xcf, 0x30, 0x5b, 0xf7, 0x60, 0x59, 0x35, 0x16, 0xf4, 0x15, 0x54, 0x74, 0xa4, 0xfa, 0xae, + 0xb6, 0xf7, 0x2e, 0x9d, 0xd8, 0x01, 0x70, 0x8c, 0x6e, 0x3d, 0x04, 0x48, 0x1b, 0xcb, 0x7f, 0x97, + 0xf9, 0xd3, 0x82, 0x5a, 0xd2, 0x55, 0x50, 0x1f, 0x1a, 0x63, 0x41, 0x5d, 0x6f, 0x40, 0xa2, 0xa4, + 0x34, 0x74, 0x97, 0x3c, 0x45, 0xaf, 0x9e, 0xd0, 0x92, 0xb2, 0xb0, 0x23, 0x31, 0x49, 0x44, 0x96, + 0xce, 0x22, 0x02, 0x92, 0x61, 0xf8, 0xf7, 0x60, 0x65, 0x48, 0x58, 0x98, 0x08, 0x14, 0xcf, 0x22, + 0x60, 0x2b, 0x8a, 0x1e, 0xf4, 0xaa, 0x50, 0xd6, 0xdc, 0xf6, 0x5f, 0xcb, 0x50, 0x7d, 0x36, 0x89, + 0x88, 0x5a, 0xe2, 0x3e, 0x54, 0x65, 0x79, 0xca, 0x72, 0x30, 0x4b, 0xfb, 0xff, 0x22, 0xd1, 0x18, + 0xdf, 0x7d, 0x4d, 0x23, 0xf9, 0xf4, 0xeb, 0x17, 0x70, 0x25, 0xd4, 0x9f, 0xe8, 0x47, 0x40, 0x2e, + 0x65, 0x54, 0xa6, 0x48, 0x70, 0xdf, 0x94, 0x9d, 0x59, 0xe2, 0x17, 0x27, 0x8a, 0x3d, 0x50, 0xb4, + 0x47, 0x82, 0xfb, 0xba, 0x0c, 0xe5, 0x89, 0x72, 0xe7, 0xe6, 0xe6, 0xe5, 0x75, 0xab, 0x33, 0x09, + 0x38, 0xab, 0xbc, 0x7e, 0x59, 0xe7, 0xe5, 0xcd, 0x6b, 0xfb, 0x00, 0xea, 0x59, 0x79, 0xc1, 0xa7, + 0xaa, 0x77, 0xdb, 0x7b, 0xd7, 0xce, 0xa8, 0x8d, 0xf9, 0x54, 0x5e, 0x21, 0x6e, 0x76, 0xa2, 0xf5, + 0xab, 0x05, 0x15, 0x93, 0xaa, 0xd3, 0x1f, 0x86, 0x57, 0xa1, 0x31, 0xdf, 0xa7, 0xcd, 0x43, 0xb7, + 0x3e, 0xd7, 0x98, 0x17, 0xbe, 0xb8, 0x8b, 0xa7, 0xbc, 0xb8, 0x4b, 0x99, 0x17, 0x77, 0xeb, 0x37, + 0x0b, 0x1a, 0xf3, 0x69, 0xff, 0xa8, 0x11, 0xee, 0x03, 0xc8, 0x48, 0x74, 0x3f, 0x35, 0xdb, 0x74, + 0x86, 0x86, 0x8e, 0x6b, 0x92, 0xa5, 0x3e, 0x5b, 0x37, 0xb3, 0x21, 0x9a, 0x6d, 0x3a, 0x2d, 0xc4, + 0x56, 0x1d, 0x56, 0x73, 0x7b, 0xd2, 0x03, 0xa8, 0xfa, 0x66, 0xb7, 0xda, 0xbf, 0x5b, 0xb0, 0x8e, + 0x29, 0x71, 0x9f, 0x71, 0xd7, 0x1b, 0xce, 0x7e, 0x10, 0x5e, 0x44, 0xf1, 0x84, 0xd1, 0x8f, 0xba, + 0xf0, 0x2b, 0xb0, 0x42, 0xc6, 0xe3, 0xe4, 0x95, 0x95, 0xbc, 0xc9, 0x6d, 0x3d, 0xab, 0xba, 0x25, + 0xfa, 0x0c, 0x1a, 0x5e, 0x30, 0x10, 0xd4, 0xa7, 0x41, 0xe4, 0x10, 0x9f, 0x4f, 0x82, 0x48, 0xed, + 0x4f, 0x51, 0x5e, 0xfd, 0x89, 0x65, 0x5f, 0x19, 0x7a, 0x65, 0x28, 0x89, 0x09, 0xa3, 0x3d, 0x0f, + 0xb6, 0x06, 0xdc, 0x5f, 0x90, 0xc3, 0xde, 0x5a, 0xcf, 0x0c, 0x1e, 0x90, 0x88, 0xbc, 0x94, 0x3f, + 0xab, 0x2f, 0xad, 0xb7, 0xdf, 0x18, 0xe0, 0x88, 0x33, 0x12, 0x8c, 0xba, 0x5c, 0x8c, 0x76, 0x47, + 0x34, 0x50, 0xbf, 0xb2, 0xbb, 0xda, 0x44, 0xc6, 0x5e, 0x98, 0xfd, 0xeb, 0xbd, 0x13, 0x7f, 0x1f, + 0x96, 0x15, 0xec, 0xe6, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x54, 0x66, 0x7c, 0x1b, 0x0f, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go new file mode 100644 index 000000000..bf22713cc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go @@ -0,0 +1,387 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/v1/bigtable_service.proto + +package bigtable + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BigtableService service + +type BigtableServiceClient interface { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + MutateRows(ctx context.Context, in *MutateRowsRequest, opts ...grpc.CallOption) (*MutateRowsResponse, error) + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*Row, error) +} + +type bigtableServiceClient struct { + cc *grpc.ClientConn +} + +func NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient { + return &bigtableServiceClient{cc} +} + +func (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, "/google.bigtable.v1.BigtableService/ReadRows", opts...) + if err != nil { + return nil, err + } + x := &bigtableServiceReadRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BigtableService_ReadRowsClient interface { + Recv() (*ReadRowsResponse, error) + grpc.ClientStream +} + +type bigtableServiceReadRowsClient struct { + grpc.ClientStream +} + +func (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) { + m := new(ReadRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...) + if err != nil { + return nil, err + } + x := &bigtableServiceSampleRowKeysClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BigtableService_SampleRowKeysClient interface { + Recv() (*SampleRowKeysResponse, error) + grpc.ClientStream +} + +type bigtableServiceSampleRowKeysClient struct { + grpc.ClientStream +} + +func (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) { + m := new(SampleRowKeysResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableServiceClient) MutateRows(ctx context.Context, in *MutateRowsRequest, opts ...grpc.CallOption) (*MutateRowsResponse, error) { + out := new(MutateRowsResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRows", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) { + out := new(CheckAndMutateRowResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*Row, error) { + out := new(Row) + err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for BigtableService service + +type BigtableServiceServer interface { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + ReadRows(*ReadRowsRequest, BigtableService_ReadRowsServer) error + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + MutateRow(context.Context, *MutateRowRequest) (*google_protobuf2.Empty, error) + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + MutateRows(context.Context, *MutateRowsRequest) (*MutateRowsResponse, error) + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + ReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*Row, error) +} + +func RegisterBigtableServiceServer(s *grpc.Server, srv BigtableServiceServer) { + s.RegisterService(&_BigtableService_serviceDesc, srv) +} + +func _BigtableService_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServiceServer).ReadRows(m, &bigtableServiceReadRowsServer{stream}) +} + +type BigtableService_ReadRowsServer interface { + Send(*ReadRowsResponse) error + grpc.ServerStream +} + +type bigtableServiceReadRowsServer struct { + grpc.ServerStream +} + +func (x *bigtableServiceReadRowsServer) Send(m *ReadRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BigtableService_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SampleRowKeysRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServiceServer).SampleRowKeys(m, &bigtableServiceSampleRowKeysServer{stream}) +} + +type BigtableService_SampleRowKeysServer interface { + Send(*SampleRowKeysResponse) error + grpc.ServerStream +} + +type bigtableServiceSampleRowKeysServer struct { + grpc.ServerStream +} + +func (x *bigtableServiceSampleRowKeysServer) Send(m *SampleRowKeysResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BigtableService_MutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MutateRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServiceServer).MutateRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v1.BigtableService/MutateRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServiceServer).MutateRow(ctx, req.(*MutateRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableService_MutateRows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MutateRowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServiceServer).MutateRows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v1.BigtableService/MutateRows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServiceServer).MutateRows(ctx, req.(*MutateRowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableService_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckAndMutateRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServiceServer).CheckAndMutateRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v1.BigtableService/CheckAndMutateRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServiceServer).CheckAndMutateRow(ctx, req.(*CheckAndMutateRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BigtableService_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadModifyWriteRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServiceServer).ReadModifyWriteRow(ctx, req.(*ReadModifyWriteRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BigtableService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.v1.BigtableService", + HandlerType: (*BigtableServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MutateRow", + Handler: _BigtableService_MutateRow_Handler, + }, + { + MethodName: "MutateRows", + Handler: _BigtableService_MutateRows_Handler, + }, + { + MethodName: "CheckAndMutateRow", + Handler: _BigtableService_CheckAndMutateRow_Handler, + }, + { + MethodName: "ReadModifyWriteRow", + Handler: _BigtableService_ReadModifyWriteRow_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadRows", + Handler: _BigtableService_ReadRows_Handler, + ServerStreams: true, + }, + { + StreamName: "SampleRowKeys", + Handler: _BigtableService_SampleRowKeys_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/bigtable/v1/bigtable_service.proto", +} + +func init() { proto.RegisterFile("google/bigtable/v1/bigtable_service.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 521 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcd, 0x6e, 0xd4, 0x30, + 0x10, 0xc7, 0x65, 0x0e, 0xa8, 0x58, 0x42, 0x08, 0x4b, 0x14, 0x69, 0xe1, 0x14, 0xa0, 0xa2, 0x11, + 0x8d, 0xdb, 0x72, 0x0b, 0xe2, 0xd0, 0x45, 0x50, 0x21, 0x58, 0x51, 0xa5, 0xe2, 0x43, 0xe5, 0xb0, + 0x78, 0x93, 0x69, 0x08, 0x4d, 0xe2, 0x60, 0x7b, 0x37, 0x5a, 0xaa, 0x5e, 0x38, 0x71, 0xe7, 0x11, + 0x10, 0x17, 0x5e, 0x80, 0x23, 0xef, 0x00, 0x67, 0x6e, 0x3c, 0x08, 0xb2, 0x63, 0x2f, 0x2c, 0x0d, + 0xcb, 0x8a, 0xee, 0x29, 0x8e, 0xe6, 0x3f, 0x33, 0xbf, 0xff, 0xf8, 0x03, 0xaf, 0xa6, 0x9c, 0xa7, + 0x39, 0xd0, 0x41, 0x96, 0x2a, 0x36, 0xc8, 0x81, 0x8e, 0x36, 0x26, 0xeb, 0xbe, 0x04, 0x31, 0xca, + 0x62, 0x08, 0x2a, 0xc1, 0x15, 0x27, 0xa4, 0x91, 0x06, 0x2e, 0x1c, 0x8c, 0x36, 0x3a, 0x97, 0x6d, + 0x3a, 0xab, 0x32, 0xca, 0xca, 0x92, 0x2b, 0xa6, 0x32, 0x5e, 0xca, 0x26, 0xa3, 0xb3, 0x32, 0xab, + 0x78, 0xc2, 0x14, 0xb3, 0xba, 0xcd, 0x39, 0x20, 0xfa, 0x05, 0x48, 0xc9, 0x52, 0x70, 0xb5, 0x2f, + 0xd9, 0x1c, 0xf3, 0x37, 0x18, 0xee, 0x53, 0x28, 0x2a, 0x35, 0x6e, 0x82, 0x9b, 0xdf, 0x97, 0xf0, + 0xb9, 0xae, 0x2d, 0xb0, 0xdb, 0xe4, 0x93, 0x8f, 0x08, 0x2f, 0x45, 0xc0, 0x92, 0x88, 0xd7, 0x92, + 0x5c, 0x09, 0x8e, 0x9b, 0x09, 0x5c, 0x34, 0x82, 0xd7, 0x43, 0x90, 0xaa, 0x73, 0x75, 0xb6, 0x48, + 0x56, 0xbc, 0x94, 0xe0, 0x3d, 0x7c, 0xfb, 0xed, 0xc7, 0xfb, 0x53, 0xf7, 0xbc, 0x2d, 0x4d, 0x7d, + 0xd8, 0x30, 0x97, 0xac, 0x80, 0xdb, 0x95, 0xe0, 0xaf, 0x20, 0x56, 0x92, 0xfa, 0xf4, 0x0d, 0x2f, + 0x41, 0x7f, 0xe3, 0x7c, 0x28, 0x15, 0x08, 0xbd, 0x34, 0x42, 0x49, 0xfd, 0x23, 0x2a, 0x78, 0x2d, + 0x43, 0x01, 0x2c, 0x09, 0x91, 0xbf, 0x8e, 0xc8, 0x67, 0x84, 0xcf, 0xee, 0xb2, 0xa2, 0xca, 0x21, + 0xe2, 0xf5, 0x03, 0x18, 0x4b, 0x72, 0xbd, 0x8d, 0x63, 0x4a, 0xe2, 0x88, 0x57, 0xe7, 0x50, 0x5a, + 0xec, 0x47, 0x06, 0xfb, 0x3e, 0xd9, 0x3e, 0x11, 0xb6, 0x34, 0xb5, 0x75, 0xe1, 0x75, 0x44, 0x3e, + 0x20, 0x7c, 0xa6, 0x37, 0x54, 0x4c, 0xe9, 0x66, 0xa4, 0x75, 0x7a, 0x93, 0xb0, 0x23, 0x5e, 0x76, + 0x2a, 0xb7, 0x8f, 0xc1, 0x5d, 0xbd, 0x8f, 0xde, 0x33, 0x83, 0x17, 0x79, 0xbd, 0x93, 0xe0, 0xd1, + 0x43, 0xc1, 0xeb, 0xfe, 0x01, 0x8c, 0x8f, 0xc2, 0xc2, 0x34, 0x0e, 0x91, 0x4f, 0x3e, 0x21, 0x8c, + 0x27, 0x18, 0x92, 0x5c, 0x9b, 0x89, 0x39, 0x99, 0xec, 0xca, 0xbf, 0x64, 0x76, 0xac, 0x3d, 0xc3, + 0xbd, 0xed, 0x75, 0xff, 0x93, 0xdb, 0x82, 0xea, 0x9a, 0x1a, 0xf6, 0x2b, 0xc2, 0xe7, 0xef, 0xbc, + 0x84, 0xf8, 0x60, 0xab, 0x4c, 0x7e, 0x8d, 0xf6, 0x46, 0x1b, 0xcc, 0x31, 0x99, 0x43, 0x5f, 0x9b, + 0x53, 0x6d, 0x1d, 0xbc, 0x30, 0x0e, 0xf6, 0xbc, 0xc7, 0x0b, 0x9a, 0x7c, 0x3c, 0xd5, 0x49, 0x9b, + 0xfa, 0x82, 0x30, 0xd1, 0xd7, 0xa8, 0xc7, 0x93, 0x6c, 0x7f, 0xfc, 0x54, 0x64, 0x8d, 0xab, 0xb5, + 0xbf, 0x5d, 0xb7, 0x69, 0x9d, 0xb3, 0x75, 0xb1, 0x55, 0xce, 0x6b, 0x8f, 0x19, 0x03, 0xcf, 0xbd, + 0x27, 0x0b, 0x32, 0x20, 0xa6, 0x11, 0x42, 0xe4, 0x77, 0x2b, 0xbc, 0x1c, 0xf3, 0xa2, 0x05, 0xa0, + 0x7b, 0xe1, 0x8f, 0x67, 0x47, 0xee, 0xe8, 0x73, 0xbd, 0x83, 0xf6, 0x42, 0x2b, 0x4e, 0x79, 0xce, + 0xca, 0x34, 0xe0, 0x22, 0xa5, 0x29, 0x94, 0xe6, 0xd4, 0xd3, 0x26, 0xc4, 0xaa, 0x4c, 0xfe, 0xfe, + 0x04, 0xde, 0x72, 0xeb, 0x77, 0x08, 0x0d, 0x4e, 0x1b, 0xe5, 0xcd, 0x9f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x4c, 0x27, 0x6e, 0x9a, 0xb0, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go new file mode 100644 index 000000000..763b83c27 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go @@ -0,0 +1,759 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/v1/bigtable_service_messages.proto + +package bigtable + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request message for BigtableServer.ReadRows. +type ReadRowsRequest struct { + // The unique name of the table from which to read. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // If neither row_key nor row_range is set, reads from all rows. + // + // Types that are valid to be assigned to Target: + // *ReadRowsRequest_RowKey + // *ReadRowsRequest_RowRange + // *ReadRowsRequest_RowSet + Target isReadRowsRequest_Target `protobuf_oneof:"target"` + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entire table. + Filter *RowFilter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // By default, rows are read sequentially, producing results which are + // guaranteed to arrive in increasing row order. Setting + // "allow_row_interleaving" to true allows multiple rows to be interleaved in + // the response stream, which increases throughput but breaks this guarantee, + // and may force the client to use more memory to buffer partially-received + // rows. Cannot be set to true when specifying "num_rows_limit". + AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving,json=allowRowInterleaving" json:"allow_row_interleaving,omitempty"` + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + // Note that "allow_row_interleaving" cannot be set to true when this is set. + NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit,json=numRowsLimit" json:"num_rows_limit,omitempty"` +} + +func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } +func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRowsRequest) ProtoMessage() {} +func (*ReadRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type isReadRowsRequest_Target interface { + isReadRowsRequest_Target() +} + +type ReadRowsRequest_RowKey struct { + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3,oneof"` +} +type ReadRowsRequest_RowRange struct { + RowRange *RowRange `protobuf:"bytes,3,opt,name=row_range,json=rowRange,oneof"` +} +type ReadRowsRequest_RowSet struct { + RowSet *RowSet `protobuf:"bytes,8,opt,name=row_set,json=rowSet,oneof"` +} + +func (*ReadRowsRequest_RowKey) isReadRowsRequest_Target() {} +func (*ReadRowsRequest_RowRange) isReadRowsRequest_Target() {} +func (*ReadRowsRequest_RowSet) isReadRowsRequest_Target() {} + +func (m *ReadRowsRequest) GetTarget() isReadRowsRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReadRowsRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *ReadRowsRequest) GetRowKey() []byte { + if x, ok := m.GetTarget().(*ReadRowsRequest_RowKey); ok { + return x.RowKey + } + return nil +} + +func (m *ReadRowsRequest) GetRowRange() *RowRange { + if x, ok := m.GetTarget().(*ReadRowsRequest_RowRange); ok { + return x.RowRange + } + return nil +} + +func (m *ReadRowsRequest) GetRowSet() *RowSet { + if x, ok := m.GetTarget().(*ReadRowsRequest_RowSet); ok { + return x.RowSet + } + return nil +} + +func (m *ReadRowsRequest) GetFilter() *RowFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ReadRowsRequest) GetAllowRowInterleaving() bool { + if m != nil { + return m.AllowRowInterleaving + } + return false +} + +func (m *ReadRowsRequest) GetNumRowsLimit() int64 { + if m != nil { + return m.NumRowsLimit + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadRowsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadRowsRequest_OneofMarshaler, _ReadRowsRequest_OneofUnmarshaler, _ReadRowsRequest_OneofSizer, []interface{}{ + (*ReadRowsRequest_RowKey)(nil), + (*ReadRowsRequest_RowRange)(nil), + (*ReadRowsRequest_RowSet)(nil), + } +} + +func _ReadRowsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadRowsRequest) + // target + switch x := m.Target.(type) { + case *ReadRowsRequest_RowKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKey) + case *ReadRowsRequest_RowRange: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RowRange); err != nil { + return err + } + case *ReadRowsRequest_RowSet: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RowSet); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ReadRowsRequest.Target has unexpected type %T", x) + } + return nil +} + +func _ReadRowsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadRowsRequest) + switch tag { + case 2: // target.row_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Target = &ReadRowsRequest_RowKey{x} + return true, err + case 3: // target.row_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowRange) + err := b.DecodeMessage(msg) + m.Target = &ReadRowsRequest_RowRange{msg} + return true, err + case 8: // target.row_set + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowSet) + err := b.DecodeMessage(msg) + m.Target = &ReadRowsRequest_RowSet{msg} + return true, err + default: + return false, nil + } +} + +func _ReadRowsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadRowsRequest) + // target + switch x := m.Target.(type) { + case *ReadRowsRequest_RowKey: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RowKey))) + n += len(x.RowKey) + case *ReadRowsRequest_RowRange: + s := proto.Size(x.RowRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadRowsRequest_RowSet: + s := proto.Size(x.RowSet) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response message for BigtableService.ReadRows. +type ReadRowsResponse struct { + // The key of the row for which we're receiving data. + // Results will be received in increasing row key order, unless + // "allow_row_interleaving" was specified in the request. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // One or more chunks of the row specified by "row_key". + Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"` +} + +func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } +func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse) ProtoMessage() {} +func (*ReadRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ReadRowsResponse) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +// Specifies a piece of a row's contents returned as part of the read +// response stream. +type ReadRowsResponse_Chunk struct { + // Types that are valid to be assigned to Chunk: + // *ReadRowsResponse_Chunk_RowContents + // *ReadRowsResponse_Chunk_ResetRow + // *ReadRowsResponse_Chunk_CommitRow + Chunk isReadRowsResponse_Chunk_Chunk `protobuf_oneof:"chunk"` +} + +func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} } +func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse_Chunk) ProtoMessage() {} +func (*ReadRowsResponse_Chunk) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1, 0} } + +type isReadRowsResponse_Chunk_Chunk interface { + isReadRowsResponse_Chunk_Chunk() +} + +type ReadRowsResponse_Chunk_RowContents struct { + RowContents *Family `protobuf:"bytes,1,opt,name=row_contents,json=rowContents,oneof"` +} +type ReadRowsResponse_Chunk_ResetRow struct { + ResetRow bool `protobuf:"varint,2,opt,name=reset_row,json=resetRow,oneof"` +} +type ReadRowsResponse_Chunk_CommitRow struct { + CommitRow bool `protobuf:"varint,3,opt,name=commit_row,json=commitRow,oneof"` +} + +func (*ReadRowsResponse_Chunk_RowContents) isReadRowsResponse_Chunk_Chunk() {} +func (*ReadRowsResponse_Chunk_ResetRow) isReadRowsResponse_Chunk_Chunk() {} +func (*ReadRowsResponse_Chunk_CommitRow) isReadRowsResponse_Chunk_Chunk() {} + +func (m *ReadRowsResponse_Chunk) GetChunk() isReadRowsResponse_Chunk_Chunk { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *ReadRowsResponse_Chunk) GetRowContents() *Family { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_RowContents); ok { + return x.RowContents + } + return nil +} + +func (m *ReadRowsResponse_Chunk) GetResetRow() bool { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_ResetRow); ok { + return x.ResetRow + } + return false +} + +func (m *ReadRowsResponse_Chunk) GetCommitRow() bool { + if x, ok := m.GetChunk().(*ReadRowsResponse_Chunk_CommitRow); ok { + return x.CommitRow + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadRowsResponse_Chunk) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadRowsResponse_Chunk_OneofMarshaler, _ReadRowsResponse_Chunk_OneofUnmarshaler, _ReadRowsResponse_Chunk_OneofSizer, []interface{}{ + (*ReadRowsResponse_Chunk_RowContents)(nil), + (*ReadRowsResponse_Chunk_ResetRow)(nil), + (*ReadRowsResponse_Chunk_CommitRow)(nil), + } +} + +func _ReadRowsResponse_Chunk_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadRowsResponse_Chunk) + // chunk + switch x := m.Chunk.(type) { + case *ReadRowsResponse_Chunk_RowContents: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RowContents); err != nil { + return err + } + case *ReadRowsResponse_Chunk_ResetRow: + t := uint64(0) + if x.ResetRow { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *ReadRowsResponse_Chunk_CommitRow: + t := uint64(0) + if x.CommitRow { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("ReadRowsResponse_Chunk.Chunk has unexpected type %T", x) + } + return nil +} + +func _ReadRowsResponse_Chunk_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadRowsResponse_Chunk) + switch tag { + case 1: // chunk.row_contents + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Family) + err := b.DecodeMessage(msg) + m.Chunk = &ReadRowsResponse_Chunk_RowContents{msg} + return true, err + case 2: // chunk.reset_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Chunk = &ReadRowsResponse_Chunk_ResetRow{x != 0} + return true, err + case 3: // chunk.commit_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Chunk = &ReadRowsResponse_Chunk_CommitRow{x != 0} + return true, err + default: + return false, nil + } +} + +func _ReadRowsResponse_Chunk_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadRowsResponse_Chunk) + // chunk + switch x := m.Chunk.(type) { + case *ReadRowsResponse_Chunk_RowContents: + s := proto.Size(x.RowContents) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadRowsResponse_Chunk_ResetRow: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case *ReadRowsResponse_Chunk_CommitRow: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for BigtableService.SampleRowKeys. +type SampleRowKeysRequest struct { + // The unique name of the table from which to sample row keys. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` +} + +func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} } +func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysRequest) ProtoMessage() {} +func (*SampleRowKeysRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *SampleRowKeysRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +// Response message for BigtableService.SampleRowKeys. +type SampleRowKeysResponse struct { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Approximate total storage space used by all rows in the table which precede + // "row_key". Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // "offset_bytes" fields. + OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes,json=offsetBytes" json:"offset_bytes,omitempty"` +} + +func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} } +func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysResponse) ProtoMessage() {} +func (*SampleRowKeysResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *SampleRowKeysResponse) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *SampleRowKeysResponse) GetOffsetBytes() int64 { + if m != nil { + return m.OffsetBytes + } + return 0 +} + +// Request message for BigtableService.MutateRow. +type MutateRowRequest struct { + // The unique name of the table to which the mutation should be applied. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + Mutations []*Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} } +func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowRequest) ProtoMessage() {} +func (*MutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *MutateRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *MutateRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *MutateRowRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// Request message for BigtableService.MutateRows. +type MutateRowsRequest struct { + // The unique name of the table to which the mutations should be applied. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The row keys/mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries may + // contain at most 100000 mutations. + Entries []*MutateRowsRequest_Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"` +} + +func (m *MutateRowsRequest) Reset() { *m = MutateRowsRequest{} } +func (m *MutateRowsRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowsRequest) ProtoMessage() {} +func (*MutateRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *MutateRowsRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *MutateRowsRequest) GetEntries() []*MutateRowsRequest_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type MutateRowsRequest_Entry struct { + // The key of the row to which the `mutations` should be applied. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // At least one mutation must be specified. + Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *MutateRowsRequest_Entry) Reset() { *m = MutateRowsRequest_Entry{} } +func (m *MutateRowsRequest_Entry) String() string { return proto.CompactTextString(m) } +func (*MutateRowsRequest_Entry) ProtoMessage() {} +func (*MutateRowsRequest_Entry) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5, 0} } + +func (m *MutateRowsRequest_Entry) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *MutateRowsRequest_Entry) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// Response message for BigtableService.MutateRows. +type MutateRowsResponse struct { + // The results for each Entry from the request, presented in the order + // in which the entries were originally given. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + Statuses []*google_rpc.Status `protobuf:"bytes,1,rep,name=statuses" json:"statuses,omitempty"` +} + +func (m *MutateRowsResponse) Reset() { *m = MutateRowsResponse{} } +func (m *MutateRowsResponse) String() string { return proto.CompactTextString(m) } +func (*MutateRowsResponse) ProtoMessage() {} +func (*MutateRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *MutateRowsResponse) GetStatuses() []*google_rpc.Status { + if m != nil { + return m.Statuses + } + return nil +} + +// Request message for BigtableService.CheckAndMutateRowRequest +type CheckAndMutateRowRequest struct { + // The unique name of the table to which the conditional mutation should be + // applied. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the conditional mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either "true_mutations" or + // "false_mutations" will be executed. If unset, checks that the row contains + // any values at all. + PredicateFilter *RowFilter `protobuf:"bytes,6,opt,name=predicate_filter,json=predicateFilter" json:"predicate_filter,omitempty"` + // Changes to be atomically applied to the specified row if "predicate_filter" + // yields at least one cell when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "false_mutations" is empty, and at most + // 100000. + TrueMutations []*Mutation `protobuf:"bytes,4,rep,name=true_mutations,json=trueMutations" json:"true_mutations,omitempty"` + // Changes to be atomically applied to the specified row if "predicate_filter" + // does not yield any cells when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "true_mutations" is empty, and at most + // 100000. + FalseMutations []*Mutation `protobuf:"bytes,5,rep,name=false_mutations,json=falseMutations" json:"false_mutations,omitempty"` +} + +func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} } +func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowRequest) ProtoMessage() {} +func (*CheckAndMutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *CheckAndMutateRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *CheckAndMutateRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetPredicateFilter() *RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetTrueMutations() []*Mutation { + if m != nil { + return m.TrueMutations + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetFalseMutations() []*Mutation { + if m != nil { + return m.FalseMutations + } + return nil +} + +// Response message for BigtableService.CheckAndMutateRowRequest. +type CheckAndMutateRowResponse struct { + // Whether or not the request's "predicate_filter" yielded any results for + // the specified row. + PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched,json=predicateMatched" json:"predicate_matched,omitempty"` +} + +func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} } +func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowResponse) ProtoMessage() {} +func (*CheckAndMutateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *CheckAndMutateRowResponse) GetPredicateMatched() bool { + if m != nil { + return m.PredicateMatched + } + return false +} + +// Request message for BigtableService.ReadModifyWriteRowRequest. +type ReadModifyWriteRowRequest struct { + // The unique name of the table to which the read/modify/write rules should be + // applied. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the read/modify/write rules should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + Rules []*ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` +} + +func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} } +func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRowRequest) ProtoMessage() {} +func (*ReadModifyWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *ReadModifyWriteRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *ReadModifyWriteRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *ReadModifyWriteRowRequest) GetRules() []*ReadModifyWriteRule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterType((*ReadRowsRequest)(nil), "google.bigtable.v1.ReadRowsRequest") + proto.RegisterType((*ReadRowsResponse)(nil), "google.bigtable.v1.ReadRowsResponse") + proto.RegisterType((*ReadRowsResponse_Chunk)(nil), "google.bigtable.v1.ReadRowsResponse.Chunk") + proto.RegisterType((*SampleRowKeysRequest)(nil), "google.bigtable.v1.SampleRowKeysRequest") + proto.RegisterType((*SampleRowKeysResponse)(nil), "google.bigtable.v1.SampleRowKeysResponse") + proto.RegisterType((*MutateRowRequest)(nil), "google.bigtable.v1.MutateRowRequest") + proto.RegisterType((*MutateRowsRequest)(nil), "google.bigtable.v1.MutateRowsRequest") + proto.RegisterType((*MutateRowsRequest_Entry)(nil), "google.bigtable.v1.MutateRowsRequest.Entry") + proto.RegisterType((*MutateRowsResponse)(nil), "google.bigtable.v1.MutateRowsResponse") + proto.RegisterType((*CheckAndMutateRowRequest)(nil), "google.bigtable.v1.CheckAndMutateRowRequest") + proto.RegisterType((*CheckAndMutateRowResponse)(nil), "google.bigtable.v1.CheckAndMutateRowResponse") + proto.RegisterType((*ReadModifyWriteRowRequest)(nil), "google.bigtable.v1.ReadModifyWriteRowRequest") +} + +func init() { proto.RegisterFile("google/bigtable/v1/bigtable_service_messages.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 788 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5f, 0x8b, 0x23, 0x45, + 0x10, 0xdf, 0x49, 0xcc, 0xbf, 0x4a, 0xdc, 0xdd, 0x6b, 0xce, 0xdb, 0xd9, 0x70, 0x8b, 0x71, 0x10, + 0x0d, 0x1e, 0x4c, 0xb8, 0xd5, 0x7d, 0xb9, 0x43, 0xc4, 0xc4, 0x3d, 0x22, 0x1a, 0x39, 0x3a, 0x0f, + 0x82, 0x08, 0xa1, 0x33, 0xa9, 0xcc, 0x0e, 0x3b, 0xd3, 0x1d, 0xbb, 0x7b, 0x32, 0xe4, 0x59, 0xf0, + 0x5d, 0x3f, 0x85, 0xdf, 0xc8, 0x17, 0x3f, 0x8c, 0x74, 0xcf, 0xe4, 0x8f, 0x6b, 0xa2, 0x11, 0xf6, + 0xad, 0xa7, 0xaa, 0x7e, 0xbf, 0xaa, 0xfa, 0x75, 0x4d, 0x17, 0x5c, 0x87, 0x42, 0x84, 0x31, 0xf6, + 0xa6, 0x51, 0xa8, 0xd9, 0x34, 0xc6, 0xde, 0xf2, 0xe5, 0xe6, 0x3c, 0x51, 0x28, 0x97, 0x51, 0x80, + 0x93, 0x04, 0x95, 0x62, 0x21, 0x2a, 0x7f, 0x21, 0x85, 0x16, 0x84, 0xe4, 0x18, 0x7f, 0x1d, 0xe7, + 0x2f, 0x5f, 0xb6, 0x3f, 0xfa, 0x37, 0x9e, 0x19, 0xd3, 0x2c, 0xc7, 0xb6, 0x2f, 0x8a, 0x38, 0xb9, + 0x08, 0x7a, 0x4a, 0x33, 0x9d, 0x16, 0xa4, 0xde, 0x9f, 0x25, 0x38, 0xa3, 0xc8, 0x66, 0x54, 0x64, + 0x8a, 0xe2, 0x4f, 0x29, 0x2a, 0x4d, 0xae, 0x00, 0x72, 0x02, 0xce, 0x12, 0x74, 0x9d, 0x8e, 0xd3, + 0x6d, 0xd0, 0x86, 0xb5, 0x7c, 0xc7, 0x12, 0x24, 0x97, 0x50, 0x93, 0x22, 0x9b, 0xdc, 0xe3, 0xca, + 0x2d, 0x75, 0x9c, 0x6e, 0x6b, 0x78, 0x42, 0xab, 0x52, 0x64, 0xdf, 0xe0, 0x8a, 0xbc, 0x86, 0x86, + 0x71, 0x49, 0xc6, 0x43, 0x74, 0xcb, 0x1d, 0xa7, 0xdb, 0xbc, 0x7e, 0xee, 0xff, 0xb3, 0x6c, 0x9f, + 0x8a, 0x8c, 0x9a, 0x98, 0xe1, 0x09, 0xad, 0xcb, 0xe2, 0x4c, 0x6e, 0x72, 0x5e, 0x85, 0xda, 0xad, + 0x5b, 0x68, 0xfb, 0x00, 0x74, 0x8c, 0xba, 0xc8, 0x39, 0x46, 0x4d, 0x6e, 0xa0, 0x3a, 0x8f, 0x62, + 0x8d, 0xd2, 0xad, 0x58, 0xd4, 0xd5, 0x01, 0xd4, 0x1b, 0x1b, 0x44, 0x8b, 0x60, 0xf2, 0x19, 0x3c, + 0x63, 0x71, 0x6c, 0x8a, 0x15, 0xd9, 0x24, 0xe2, 0x1a, 0x65, 0x8c, 0x6c, 0x19, 0xf1, 0xd0, 0xad, + 0x76, 0x9c, 0x6e, 0x9d, 0x3e, 0xb5, 0x5e, 0x2a, 0xb2, 0xaf, 0x77, 0x7c, 0xe4, 0x43, 0x38, 0xe5, + 0x69, 0x62, 0x30, 0x6a, 0x12, 0x47, 0x49, 0xa4, 0xdd, 0x5a, 0xc7, 0xe9, 0x96, 0x69, 0x8b, 0xa7, + 0x89, 0x91, 0xf0, 0x5b, 0x63, 0xeb, 0xd7, 0xa1, 0xaa, 0x99, 0x0c, 0x51, 0x7b, 0x3f, 0x97, 0xe0, + 0x7c, 0x2b, 0xaf, 0x5a, 0x08, 0xae, 0x90, 0x5c, 0x6c, 0x05, 0x34, 0xe2, 0xb6, 0x36, 0xf2, 0xf5, + 0xa1, 0x1a, 0xdc, 0xa5, 0xfc, 0x5e, 0xb9, 0xa5, 0x4e, 0xb9, 0xdb, 0xbc, 0xfe, 0x64, 0x6f, 0x2b, + 0x0f, 0xe8, 0xfc, 0x81, 0x81, 0xd0, 0x02, 0xd9, 0xfe, 0xd5, 0x81, 0x8a, 0xb5, 0x90, 0x2f, 0xa0, + 0x65, 0xd2, 0x04, 0x82, 0x6b, 0xe4, 0x5a, 0xd9, 0x5c, 0x07, 0x44, 0x7d, 0xc3, 0x92, 0x28, 0x5e, + 0x0d, 0x4f, 0x68, 0x53, 0x8a, 0x6c, 0x50, 0x00, 0xc8, 0x15, 0x34, 0x24, 0x2a, 0xd4, 0xa6, 0x5d, + 0x7b, 0xd5, 0x75, 0x7b, 0x5f, 0xc6, 0x44, 0x45, 0x46, 0xde, 0x07, 0x08, 0x44, 0x92, 0x44, 0xb9, + 0xbf, 0x5c, 0xf8, 0x1b, 0xb9, 0x8d, 0x8a, 0xac, 0x5f, 0x83, 0x8a, 0x2d, 0xca, 0xbb, 0x81, 0xa7, + 0x63, 0x96, 0x2c, 0x62, 0xa4, 0xb6, 0xcf, 0x23, 0x07, 0xcd, 0x1b, 0xc3, 0x7b, 0x0f, 0x60, 0xff, + 0x25, 0xe0, 0x07, 0xd0, 0x12, 0xf3, 0xb9, 0x29, 0x79, 0xba, 0xd2, 0xa8, 0x6c, 0xd1, 0x65, 0xda, + 0xcc, 0x6d, 0x7d, 0x63, 0xf2, 0x7e, 0x71, 0xe0, 0x7c, 0x94, 0x6a, 0xa6, 0x0d, 0xeb, 0x91, 0x13, + 0x7f, 0xf1, 0x60, 0xe2, 0x37, 0xf9, 0x5e, 0x41, 0x23, 0x31, 0x5c, 0x91, 0xe0, 0xca, 0x2d, 0xdb, + 0x3b, 0xdb, 0x3b, 0xef, 0xa3, 0x22, 0x88, 0x6e, 0xc3, 0xbd, 0x3f, 0x1c, 0x78, 0xb2, 0x29, 0xe4, + 0xd8, 0x7f, 0xef, 0x16, 0x6a, 0xc8, 0xb5, 0x8c, 0x70, 0x3d, 0x22, 0x2f, 0x0e, 0xa6, 0xdb, 0xa5, + 0xf5, 0x6f, 0xb9, 0x96, 0x2b, 0xba, 0xc6, 0xb6, 0x7f, 0x84, 0x8a, 0xb5, 0x1c, 0x56, 0xf2, 0x6f, + 0x9d, 0x95, 0xfe, 0x5f, 0x67, 0x5f, 0x01, 0xd9, 0xad, 0xa0, 0xb8, 0x34, 0x1f, 0xea, 0xf9, 0xcb, + 0x83, 0x66, 0x14, 0x0d, 0x21, 0x59, 0x13, 0xca, 0x45, 0xe0, 0x8f, 0xad, 0x8f, 0x6e, 0x62, 0xbc, + 0xdf, 0x4b, 0xe0, 0x0e, 0xee, 0x30, 0xb8, 0xff, 0x92, 0xcf, 0x1e, 0xed, 0xc2, 0x86, 0x70, 0xbe, + 0x90, 0x38, 0x8b, 0x02, 0xa6, 0x71, 0x52, 0x3c, 0x1b, 0xd5, 0x63, 0x9e, 0x8d, 0xb3, 0x0d, 0x2c, + 0x37, 0x90, 0x01, 0x9c, 0x6a, 0x99, 0xe2, 0x64, 0xab, 0xd2, 0x3b, 0x47, 0xa8, 0xf4, 0xae, 0xc1, + 0xac, 0xbf, 0x14, 0xb9, 0x85, 0xb3, 0x39, 0x8b, 0xd5, 0x2e, 0x4b, 0xe5, 0x08, 0x96, 0x53, 0x0b, + 0xda, 0xd0, 0x78, 0x43, 0xb8, 0xdc, 0xa3, 0x54, 0xa1, 0xfb, 0x0b, 0x78, 0xb2, 0x6d, 0x39, 0x61, + 0x3a, 0xb8, 0xc3, 0x99, 0x55, 0xac, 0x4e, 0xb7, 0x5a, 0x8c, 0x72, 0xbb, 0xf7, 0x9b, 0x03, 0x97, + 0xe6, 0x81, 0x19, 0x89, 0x59, 0x34, 0x5f, 0x7d, 0x2f, 0xa3, 0x47, 0x51, 0xfd, 0x73, 0xa8, 0xc8, + 0x34, 0xc6, 0xf5, 0x2f, 0xf2, 0xf1, 0xa1, 0x67, 0x6d, 0x37, 0x6b, 0x1a, 0x23, 0xcd, 0x51, 0x7d, + 0x0d, 0xcf, 0x02, 0x91, 0xec, 0x01, 0xf5, 0x9f, 0xf7, 0x8b, 0x8f, 0x71, 0xbe, 0x32, 0x47, 0xc5, + 0xc6, 0x7c, 0x6b, 0x76, 0xdb, 0x5b, 0xe7, 0x87, 0x57, 0x05, 0x26, 0x14, 0x31, 0xe3, 0xa1, 0x2f, + 0x64, 0xd8, 0x0b, 0x91, 0xdb, 0xcd, 0xd7, 0xcb, 0x5d, 0x6c, 0x11, 0xa9, 0xdd, 0xed, 0xf9, 0x7a, + 0x7d, 0x9e, 0x56, 0x6d, 0xd8, 0xa7, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x16, 0x55, 0x90, 0x95, + 0xab, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go new file mode 100644 index 000000000..fcdc0faa4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -0,0 +1,1168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/v2/bigtable.proto + +/* +Package bigtable is a generated protocol buffer package. + +It is generated from these files: + google/bigtable/v2/bigtable.proto + google/bigtable/v2/data.proto + +It has these top-level messages: + ReadRowsRequest + ReadRowsResponse + SampleRowKeysRequest + SampleRowKeysResponse + MutateRowRequest + MutateRowResponse + MutateRowsRequest + MutateRowsResponse + CheckAndMutateRowRequest + CheckAndMutateRowResponse + ReadModifyWriteRowRequest + ReadModifyWriteRowResponse + Row + Family + Column + Cell + RowRange + RowSet + ColumnRange + TimestampRange + ValueRange + RowFilter + Mutation + ReadModifyWriteRule +*/ +package bigtable + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/wrappers" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request message for Bigtable.ReadRows. +type ReadRowsRequest struct { + // The unique name of the table from which to read. + // Values are of the form + // `projects//instances//tables/
`. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The row keys and/or ranges to read. If not specified, reads from all rows. + Rows *RowSet `protobuf:"bytes,2,opt,name=rows" json:"rows,omitempty"` + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entirety of each row. + Filter *RowFilter `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"` + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + RowsLimit int64 `protobuf:"varint,4,opt,name=rows_limit,json=rowsLimit" json:"rows_limit,omitempty"` +} + +func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } +func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRowsRequest) ProtoMessage() {} +func (*ReadRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ReadRowsRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *ReadRowsRequest) GetRows() *RowSet { + if m != nil { + return m.Rows + } + return nil +} + +func (m *ReadRowsRequest) GetFilter() *RowFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ReadRowsRequest) GetRowsLimit() int64 { + if m != nil { + return m.RowsLimit + } + return 0 +} + +// Response message for Bigtable.ReadRows. +type ReadRowsResponse struct { + Chunks []*ReadRowsResponse_CellChunk `protobuf:"bytes,1,rep,name=chunks" json:"chunks,omitempty"` + // Optionally the server might return the row key of the last row it + // has scanned. The client can use this to construct a more + // efficient retry request if needed: any row keys or portions of + // ranges less than this row key can be dropped from the request. + // This is primarily useful for cases where the server has read a + // lot of data that was filtered out since the last committed row + // key, allowing the client to skip that work on a retry. + LastScannedRowKey []byte `protobuf:"bytes,2,opt,name=last_scanned_row_key,json=lastScannedRowKey,proto3" json:"last_scanned_row_key,omitempty"` +} + +func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } +func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse) ProtoMessage() {} +func (*ReadRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ReadRowsResponse) GetChunks() []*ReadRowsResponse_CellChunk { + if m != nil { + return m.Chunks + } + return nil +} + +func (m *ReadRowsResponse) GetLastScannedRowKey() []byte { + if m != nil { + return m.LastScannedRowKey + } + return nil +} + +// Specifies a piece of a row's contents returned as part of the read +// response stream. +type ReadRowsResponse_CellChunk struct { + // The row key for this chunk of data. If the row key is empty, + // this CellChunk is a continuation of the same row as the previous + // CellChunk in the response stream, even if that CellChunk was in a + // previous ReadRowsResponse message. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // The column family name for this chunk of data. If this message + // is not present this CellChunk is a continuation of the same column + // family as the previous CellChunk. The empty string can occur as a + // column family name in a response so clients must check + // explicitly for the presence of this message, not just for + // `family_name.value` being non-empty. + FamilyName *google_protobuf1.StringValue `protobuf:"bytes,2,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The column qualifier for this chunk of data. If this message + // is not present, this CellChunk is a continuation of the same column + // as the previous CellChunk. Column qualifiers may be empty so + // clients must check for the presence of this message, not just + // for `qualifier.value` being non-empty. + Qualifier *google_protobuf1.BytesValue `protobuf:"bytes,3,opt,name=qualifier" json:"qualifier,omitempty"` + // The cell's stored timestamp, which also uniquely identifies it + // within its column. Values are always expressed in + // microseconds, but individual tables may set a coarser + // granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will + // only allow values of `timestamp_micros` which are multiples of + // 1000. Timestamps are only set in the first CellChunk per cell + // (for cells split into multiple chunks). + TimestampMicros int64 `protobuf:"varint,4,opt,name=timestamp_micros,json=timestampMicros" json:"timestamp_micros,omitempty"` + // Labels applied to the cell by a + // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + // on the first CellChunk per cell. + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + // The value stored in the cell. Cell values can be split across + // multiple CellChunks. In that case only the value field will be + // set in CellChunks after the first: the timestamp and labels + // will only be present in the first CellChunk, even if the first + // CellChunk came in a previous ReadRowsResponse. + Value []byte `protobuf:"bytes,6,opt,name=value,proto3" json:"value,omitempty"` + // If this CellChunk is part of a chunked cell value and this is + // not the final chunk of that cell, value_size will be set to the + // total length of the cell value. The client can use this size + // to pre-allocate memory to hold the full cell value. + ValueSize int32 `protobuf:"varint,7,opt,name=value_size,json=valueSize" json:"value_size,omitempty"` + // Types that are valid to be assigned to RowStatus: + // *ReadRowsResponse_CellChunk_ResetRow + // *ReadRowsResponse_CellChunk_CommitRow + RowStatus isReadRowsResponse_CellChunk_RowStatus `protobuf_oneof:"row_status"` +} + +func (m *ReadRowsResponse_CellChunk) Reset() { *m = ReadRowsResponse_CellChunk{} } +func (m *ReadRowsResponse_CellChunk) String() string { return proto.CompactTextString(m) } +func (*ReadRowsResponse_CellChunk) ProtoMessage() {} +func (*ReadRowsResponse_CellChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +type isReadRowsResponse_CellChunk_RowStatus interface { + isReadRowsResponse_CellChunk_RowStatus() +} + +type ReadRowsResponse_CellChunk_ResetRow struct { + ResetRow bool `protobuf:"varint,8,opt,name=reset_row,json=resetRow,oneof"` +} +type ReadRowsResponse_CellChunk_CommitRow struct { + CommitRow bool `protobuf:"varint,9,opt,name=commit_row,json=commitRow,oneof"` +} + +func (*ReadRowsResponse_CellChunk_ResetRow) isReadRowsResponse_CellChunk_RowStatus() {} +func (*ReadRowsResponse_CellChunk_CommitRow) isReadRowsResponse_CellChunk_RowStatus() {} + +func (m *ReadRowsResponse_CellChunk) GetRowStatus() isReadRowsResponse_CellChunk_RowStatus { + if m != nil { + return m.RowStatus + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetFamilyName() *google_protobuf1.StringValue { + if m != nil { + return m.FamilyName + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetQualifier() *google_protobuf1.BytesValue { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetTimestampMicros() int64 { + if m != nil { + return m.TimestampMicros + } + return 0 +} + +func (m *ReadRowsResponse_CellChunk) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ReadRowsResponse_CellChunk) GetValueSize() int32 { + if m != nil { + return m.ValueSize + } + return 0 +} + +func (m *ReadRowsResponse_CellChunk) GetResetRow() bool { + if x, ok := m.GetRowStatus().(*ReadRowsResponse_CellChunk_ResetRow); ok { + return x.ResetRow + } + return false +} + +func (m *ReadRowsResponse_CellChunk) GetCommitRow() bool { + if x, ok := m.GetRowStatus().(*ReadRowsResponse_CellChunk_CommitRow); ok { + return x.CommitRow + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadRowsResponse_CellChunk) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadRowsResponse_CellChunk_OneofMarshaler, _ReadRowsResponse_CellChunk_OneofUnmarshaler, _ReadRowsResponse_CellChunk_OneofSizer, []interface{}{ + (*ReadRowsResponse_CellChunk_ResetRow)(nil), + (*ReadRowsResponse_CellChunk_CommitRow)(nil), + } +} + +func _ReadRowsResponse_CellChunk_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadRowsResponse_CellChunk) + // row_status + switch x := m.RowStatus.(type) { + case *ReadRowsResponse_CellChunk_ResetRow: + t := uint64(0) + if x.ResetRow { + t = 1 + } + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *ReadRowsResponse_CellChunk_CommitRow: + t := uint64(0) + if x.CommitRow { + t = 1 + } + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("ReadRowsResponse_CellChunk.RowStatus has unexpected type %T", x) + } + return nil +} + +func _ReadRowsResponse_CellChunk_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadRowsResponse_CellChunk) + switch tag { + case 8: // row_status.reset_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.RowStatus = &ReadRowsResponse_CellChunk_ResetRow{x != 0} + return true, err + case 9: // row_status.commit_row + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.RowStatus = &ReadRowsResponse_CellChunk_CommitRow{x != 0} + return true, err + default: + return false, nil + } +} + +func _ReadRowsResponse_CellChunk_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadRowsResponse_CellChunk) + // row_status + switch x := m.RowStatus.(type) { + case *ReadRowsResponse_CellChunk_ResetRow: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += 1 + case *ReadRowsResponse_CellChunk_CommitRow: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for Bigtable.SampleRowKeys. +type SampleRowKeysRequest struct { + // The unique name of the table from which to sample row keys. + // Values are of the form + // `projects//instances//tables/
`. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` +} + +func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} } +func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysRequest) ProtoMessage() {} +func (*SampleRowKeysRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SampleRowKeysRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +// Response message for Bigtable.SampleRowKeys. +type SampleRowKeysResponse struct { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Approximate total storage space used by all rows in the table which precede + // `row_key`. Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // `offset_bytes` fields. + OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes,json=offsetBytes" json:"offset_bytes,omitempty"` +} + +func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} } +func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) } +func (*SampleRowKeysResponse) ProtoMessage() {} +func (*SampleRowKeysResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SampleRowKeysResponse) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *SampleRowKeysResponse) GetOffsetBytes() int64 { + if m != nil { + return m.OffsetBytes + } + return 0 +} + +// Request message for Bigtable.MutateRow. +type MutateRowRequest struct { + // The unique name of the table to which the mutation should be applied. + // Values are of the form + // `projects//instances//tables/
`. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + Mutations []*Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} } +func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowRequest) ProtoMessage() {} +func (*MutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *MutateRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *MutateRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *MutateRowRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// Response message for Bigtable.MutateRow. +type MutateRowResponse struct { +} + +func (m *MutateRowResponse) Reset() { *m = MutateRowResponse{} } +func (m *MutateRowResponse) String() string { return proto.CompactTextString(m) } +func (*MutateRowResponse) ProtoMessage() {} +func (*MutateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +// Request message for BigtableService.MutateRows. +type MutateRowsRequest struct { + // The unique name of the table to which the mutations should be applied. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The row keys and corresponding mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries can + // contain at most 100000 mutations. + Entries []*MutateRowsRequest_Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"` +} + +func (m *MutateRowsRequest) Reset() { *m = MutateRowsRequest{} } +func (m *MutateRowsRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowsRequest) ProtoMessage() {} +func (*MutateRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *MutateRowsRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *MutateRowsRequest) GetEntries() []*MutateRowsRequest_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type MutateRowsRequest_Entry struct { + // The key of the row to which the `mutations` should be applied. + RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // You must specify at least one mutation. + Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *MutateRowsRequest_Entry) Reset() { *m = MutateRowsRequest_Entry{} } +func (m *MutateRowsRequest_Entry) String() string { return proto.CompactTextString(m) } +func (*MutateRowsRequest_Entry) ProtoMessage() {} +func (*MutateRowsRequest_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} } + +func (m *MutateRowsRequest_Entry) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *MutateRowsRequest_Entry) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// Response message for BigtableService.MutateRows. +type MutateRowsResponse struct { + // One or more results for Entries from the batch request. + Entries []*MutateRowsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` +} + +func (m *MutateRowsResponse) Reset() { *m = MutateRowsResponse{} } +func (m *MutateRowsResponse) String() string { return proto.CompactTextString(m) } +func (*MutateRowsResponse) ProtoMessage() {} +func (*MutateRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MutateRowsResponse) GetEntries() []*MutateRowsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type MutateRowsResponse_Entry struct { + // The index into the original request's `entries` list of the Entry + // for which a result is being reported. + Index int64 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` + // The result of the request Entry identified by `index`. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + Status *google_rpc.Status `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *MutateRowsResponse_Entry) Reset() { *m = MutateRowsResponse_Entry{} } +func (m *MutateRowsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*MutateRowsResponse_Entry) ProtoMessage() {} +func (*MutateRowsResponse_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +func (m *MutateRowsResponse_Entry) GetIndex() int64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *MutateRowsResponse_Entry) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +// Request message for Bigtable.CheckAndMutateRow. +type CheckAndMutateRowRequest struct { + // The unique name of the table to which the conditional mutation should be + // applied. + // Values are of the form + // `projects//instances//tables/
`. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the conditional mutation should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either `true_mutations` or + // `false_mutations` will be executed. If unset, checks that the row contains + // any values at all. + PredicateFilter *RowFilter `protobuf:"bytes,6,opt,name=predicate_filter,json=predicateFilter" json:"predicate_filter,omitempty"` + // Changes to be atomically applied to the specified row if `predicate_filter` + // yields at least one cell when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `false_mutations` is empty, and at most + // 100000. + TrueMutations []*Mutation `protobuf:"bytes,4,rep,name=true_mutations,json=trueMutations" json:"true_mutations,omitempty"` + // Changes to be atomically applied to the specified row if `predicate_filter` + // does not yield any cells when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `true_mutations` is empty, and at most + // 100000. + FalseMutations []*Mutation `protobuf:"bytes,5,rep,name=false_mutations,json=falseMutations" json:"false_mutations,omitempty"` +} + +func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} } +func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowRequest) ProtoMessage() {} +func (*CheckAndMutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CheckAndMutateRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *CheckAndMutateRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetPredicateFilter() *RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetTrueMutations() []*Mutation { + if m != nil { + return m.TrueMutations + } + return nil +} + +func (m *CheckAndMutateRowRequest) GetFalseMutations() []*Mutation { + if m != nil { + return m.FalseMutations + } + return nil +} + +// Response message for Bigtable.CheckAndMutateRow. +type CheckAndMutateRowResponse struct { + // Whether or not the request's `predicate_filter` yielded any results for + // the specified row. + PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched,json=predicateMatched" json:"predicate_matched,omitempty"` +} + +func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} } +func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) } +func (*CheckAndMutateRowResponse) ProtoMessage() {} +func (*CheckAndMutateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CheckAndMutateRowResponse) GetPredicateMatched() bool { + if m != nil { + return m.PredicateMatched + } + return false +} + +// Request message for Bigtable.ReadModifyWriteRow. +type ReadModifyWriteRowRequest struct { + // The unique name of the table to which the read/modify/write rules should be + // applied. + // Values are of the form + // `projects//instances//tables/
`. + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // The key of the row to which the read/modify/write rules should be applied. + RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"` + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + Rules []*ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"` +} + +func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} } +func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRowRequest) ProtoMessage() {} +func (*ReadModifyWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ReadModifyWriteRowRequest) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *ReadModifyWriteRowRequest) GetRowKey() []byte { + if m != nil { + return m.RowKey + } + return nil +} + +func (m *ReadModifyWriteRowRequest) GetRules() []*ReadModifyWriteRule { + if m != nil { + return m.Rules + } + return nil +} + +// Response message for Bigtable.ReadModifyWriteRow. +type ReadModifyWriteRowResponse struct { + // A Row containing the new contents of all cells modified by the request. + Row *Row `protobuf:"bytes,1,opt,name=row" json:"row,omitempty"` +} + +func (m *ReadModifyWriteRowResponse) Reset() { *m = ReadModifyWriteRowResponse{} } +func (m *ReadModifyWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRowResponse) ProtoMessage() {} +func (*ReadModifyWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ReadModifyWriteRowResponse) GetRow() *Row { + if m != nil { + return m.Row + } + return nil +} + +func init() { + proto.RegisterType((*ReadRowsRequest)(nil), "google.bigtable.v2.ReadRowsRequest") + proto.RegisterType((*ReadRowsResponse)(nil), "google.bigtable.v2.ReadRowsResponse") + proto.RegisterType((*ReadRowsResponse_CellChunk)(nil), "google.bigtable.v2.ReadRowsResponse.CellChunk") + proto.RegisterType((*SampleRowKeysRequest)(nil), "google.bigtable.v2.SampleRowKeysRequest") + proto.RegisterType((*SampleRowKeysResponse)(nil), "google.bigtable.v2.SampleRowKeysResponse") + proto.RegisterType((*MutateRowRequest)(nil), "google.bigtable.v2.MutateRowRequest") + proto.RegisterType((*MutateRowResponse)(nil), "google.bigtable.v2.MutateRowResponse") + proto.RegisterType((*MutateRowsRequest)(nil), "google.bigtable.v2.MutateRowsRequest") + proto.RegisterType((*MutateRowsRequest_Entry)(nil), "google.bigtable.v2.MutateRowsRequest.Entry") + proto.RegisterType((*MutateRowsResponse)(nil), "google.bigtable.v2.MutateRowsResponse") + proto.RegisterType((*MutateRowsResponse_Entry)(nil), "google.bigtable.v2.MutateRowsResponse.Entry") + proto.RegisterType((*CheckAndMutateRowRequest)(nil), "google.bigtable.v2.CheckAndMutateRowRequest") + proto.RegisterType((*CheckAndMutateRowResponse)(nil), "google.bigtable.v2.CheckAndMutateRowResponse") + proto.RegisterType((*ReadModifyWriteRowRequest)(nil), "google.bigtable.v2.ReadModifyWriteRowRequest") + proto.RegisterType((*ReadModifyWriteRowResponse)(nil), "google.bigtable.v2.ReadModifyWriteRowResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Bigtable service + +type BigtableClient interface { + // Streams back the contents of all requested rows in key order, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (Bigtable_ReadRowsClient, error) + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (Bigtable_SampleRowKeysClient, error) + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by `mutation`. + MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*MutateRowResponse, error) + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + MutateRows(ctx context.Context, in *MutateRowsRequest, opts ...grpc.CallOption) (Bigtable_MutateRowsClient, error) + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically. The method reads the latest existing timestamp + // and value from the specified columns and writes a new entry based on + // pre-defined read/modify/write rules. The new value for the timestamp is the + // greater of the existing timestamp or the current server time. The method + // returns the new contents of all modified cells. + ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*ReadModifyWriteRowResponse, error) +} + +type bigtableClient struct { + cc *grpc.ClientConn +} + +func NewBigtableClient(cc *grpc.ClientConn) BigtableClient { + return &bigtableClient{cc} +} + +func (c *bigtableClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (Bigtable_ReadRowsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Bigtable_serviceDesc.Streams[0], c.cc, "/google.bigtable.v2.Bigtable/ReadRows", opts...) + if err != nil { + return nil, err + } + x := &bigtableReadRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Bigtable_ReadRowsClient interface { + Recv() (*ReadRowsResponse, error) + grpc.ClientStream +} + +type bigtableReadRowsClient struct { + grpc.ClientStream +} + +func (x *bigtableReadRowsClient) Recv() (*ReadRowsResponse, error) { + m := new(ReadRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (Bigtable_SampleRowKeysClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Bigtable_serviceDesc.Streams[1], c.cc, "/google.bigtable.v2.Bigtable/SampleRowKeys", opts...) + if err != nil { + return nil, err + } + x := &bigtableSampleRowKeysClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Bigtable_SampleRowKeysClient interface { + Recv() (*SampleRowKeysResponse, error) + grpc.ClientStream +} + +type bigtableSampleRowKeysClient struct { + grpc.ClientStream +} + +func (x *bigtableSampleRowKeysClient) Recv() (*SampleRowKeysResponse, error) { + m := new(SampleRowKeysResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*MutateRowResponse, error) { + out := new(MutateRowResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v2.Bigtable/MutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClient) MutateRows(ctx context.Context, in *MutateRowsRequest, opts ...grpc.CallOption) (Bigtable_MutateRowsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Bigtable_serviceDesc.Streams[2], c.cc, "/google.bigtable.v2.Bigtable/MutateRows", opts...) + if err != nil { + return nil, err + } + x := &bigtableMutateRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Bigtable_MutateRowsClient interface { + Recv() (*MutateRowsResponse, error) + grpc.ClientStream +} + +type bigtableMutateRowsClient struct { + grpc.ClientStream +} + +func (x *bigtableMutateRowsClient) Recv() (*MutateRowsResponse, error) { + m := new(MutateRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *bigtableClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) { + out := new(CheckAndMutateRowResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v2.Bigtable/CheckAndMutateRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *bigtableClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*ReadModifyWriteRowResponse, error) { + out := new(ReadModifyWriteRowResponse) + err := grpc.Invoke(ctx, "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Bigtable service + +type BigtableServer interface { + // Streams back the contents of all requested rows in key order, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + ReadRows(*ReadRowsRequest, Bigtable_ReadRowsServer) error + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + SampleRowKeys(*SampleRowKeysRequest, Bigtable_SampleRowKeysServer) error + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by `mutation`. + MutateRow(context.Context, *MutateRowRequest) (*MutateRowResponse, error) + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + MutateRows(*MutateRowsRequest, Bigtable_MutateRowsServer) error + // Mutates a row atomically based on the output of a predicate Reader filter. + CheckAndMutateRow(context.Context, *CheckAndMutateRowRequest) (*CheckAndMutateRowResponse, error) + // Modifies a row atomically. The method reads the latest existing timestamp + // and value from the specified columns and writes a new entry based on + // pre-defined read/modify/write rules. The new value for the timestamp is the + // greater of the existing timestamp or the current server time. The method + // returns the new contents of all modified cells. + ReadModifyWriteRow(context.Context, *ReadModifyWriteRowRequest) (*ReadModifyWriteRowResponse, error) +} + +func RegisterBigtableServer(s *grpc.Server, srv BigtableServer) { + s.RegisterService(&_Bigtable_serviceDesc, srv) +} + +func _Bigtable_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServer).ReadRows(m, &bigtableReadRowsServer{stream}) +} + +type Bigtable_ReadRowsServer interface { + Send(*ReadRowsResponse) error + grpc.ServerStream +} + +type bigtableReadRowsServer struct { + grpc.ServerStream +} + +func (x *bigtableReadRowsServer) Send(m *ReadRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Bigtable_SampleRowKeys_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SampleRowKeysRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServer).SampleRowKeys(m, &bigtableSampleRowKeysServer{stream}) +} + +type Bigtable_SampleRowKeysServer interface { + Send(*SampleRowKeysResponse) error + grpc.ServerStream +} + +type bigtableSampleRowKeysServer struct { + grpc.ServerStream +} + +func (x *bigtableSampleRowKeysServer) Send(m *SampleRowKeysResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Bigtable_MutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MutateRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServer).MutateRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v2.Bigtable/MutateRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServer).MutateRow(ctx, req.(*MutateRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Bigtable_MutateRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(MutateRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BigtableServer).MutateRows(m, &bigtableMutateRowsServer{stream}) +} + +type Bigtable_MutateRowsServer interface { + Send(*MutateRowsResponse) error + grpc.ServerStream +} + +type bigtableMutateRowsServer struct { + grpc.ServerStream +} + +func (x *bigtableMutateRowsServer) Send(m *MutateRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Bigtable_CheckAndMutateRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckAndMutateRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServer).CheckAndMutateRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServer).CheckAndMutateRow(ctx, req.(*CheckAndMutateRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Bigtable_ReadModifyWriteRow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadModifyWriteRowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BigtableServer).ReadModifyWriteRow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BigtableServer).ReadModifyWriteRow(ctx, req.(*ReadModifyWriteRowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Bigtable_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bigtable.v2.Bigtable", + HandlerType: (*BigtableServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MutateRow", + Handler: _Bigtable_MutateRow_Handler, + }, + { + MethodName: "CheckAndMutateRow", + Handler: _Bigtable_CheckAndMutateRow_Handler, + }, + { + MethodName: "ReadModifyWriteRow", + Handler: _Bigtable_ReadModifyWriteRow_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadRows", + Handler: _Bigtable_ReadRows_Handler, + ServerStreams: true, + }, + { + StreamName: "SampleRowKeys", + Handler: _Bigtable_SampleRowKeys_Handler, + ServerStreams: true, + }, + { + StreamName: "MutateRows", + Handler: _Bigtable_MutateRows_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/bigtable/v2/bigtable.proto", +} + +func init() { proto.RegisterFile("google/bigtable/v2/bigtable.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0x66, 0xec, 0xd8, 0xf1, 0x9e, 0x24, 0x4d, 0x32, 0x84, 0x66, 0x6b, 0x12, 0x70, 0x97, 0x16, + 0x1c, 0x97, 0xae, 0x2b, 0xa3, 0x5e, 0xd4, 0x55, 0x0a, 0xd8, 0xe4, 0x07, 0x81, 0xab, 0x6a, 0x2c, + 0x15, 0x09, 0x21, 0x59, 0xe3, 0xf5, 0xd8, 0x59, 0xb2, 0x7f, 0xdd, 0x9d, 0x8d, 0x71, 0x11, 0x12, + 0xe2, 0x82, 0x07, 0x80, 0x6b, 0xc4, 0x15, 0x02, 0x21, 0xc1, 0x25, 0xb7, 0x5c, 0xf0, 0x08, 0x5c, + 0xf0, 0x02, 0x7d, 0x02, 0x9e, 0xa0, 0xda, 0xd9, 0x59, 0xdb, 0x49, 0xec, 0x76, 0x53, 0xf5, 0x6e, + 0xe7, 0x9c, 0xf3, 0x9d, 0xf9, 0xce, 0xef, 0xd8, 0x70, 0x75, 0xe0, 0xba, 0x03, 0x8b, 0x55, 0xbb, + 0xe6, 0x80, 0xd3, 0xae, 0xc5, 0xaa, 0x27, 0xb5, 0xf1, 0xb7, 0xee, 0xf9, 0x2e, 0x77, 0x31, 0x8e, + 0x4d, 0xf4, 0xb1, 0xf8, 0xa4, 0x56, 0xdc, 0x92, 0x30, 0xea, 0x99, 0x55, 0xea, 0x38, 0x2e, 0xa7, + 0xdc, 0x74, 0x9d, 0x20, 0x46, 0x14, 0xb7, 0x67, 0x38, 0xed, 0x51, 0x4e, 0xa5, 0xfa, 0x0d, 0xa9, + 0x16, 0xa7, 0x6e, 0xd8, 0xaf, 0x0e, 0x7d, 0xea, 0x79, 0xcc, 0x4f, 0xe0, 0x9b, 0x52, 0xef, 0x7b, + 0x46, 0x35, 0xe0, 0x94, 0x87, 0x52, 0xa1, 0xfd, 0x85, 0x60, 0x95, 0x30, 0xda, 0x23, 0xee, 0x30, + 0x20, 0xec, 0x51, 0xc8, 0x02, 0x8e, 0xb7, 0x01, 0xc4, 0x1d, 0x1d, 0x87, 0xda, 0x4c, 0x45, 0x25, + 0x54, 0x56, 0x88, 0x22, 0x24, 0xf7, 0xa9, 0xcd, 0xb0, 0x0e, 0x0b, 0xbe, 0x3b, 0x0c, 0xd4, 0x4c, + 0x09, 0x95, 0x97, 0x6a, 0x45, 0xfd, 0x7c, 0x2c, 0x3a, 0x71, 0x87, 0x6d, 0xc6, 0x89, 0xb0, 0xc3, + 0xb7, 0x21, 0xdf, 0x37, 0x2d, 0xce, 0x7c, 0x35, 0x2b, 0x10, 0xdb, 0x73, 0x10, 0xfb, 0xc2, 0x88, + 0x48, 0xe3, 0x88, 0x45, 0x04, 0xef, 0x58, 0xa6, 0x6d, 0x72, 0x75, 0xa1, 0x84, 0xca, 0x59, 0xa2, + 0x44, 0x92, 0x4f, 0x23, 0x81, 0xf6, 0x7f, 0x16, 0xd6, 0x26, 0xc4, 0x03, 0xcf, 0x75, 0x02, 0x86, + 0xf7, 0x21, 0x6f, 0x1c, 0x85, 0xce, 0x71, 0xa0, 0xa2, 0x52, 0xb6, 0xbc, 0x54, 0xd3, 0x67, 0x5e, + 0x75, 0x06, 0xa5, 0x37, 0x99, 0x65, 0x35, 0x23, 0x18, 0x91, 0x68, 0x5c, 0x85, 0x0d, 0x8b, 0x06, + 0xbc, 0x13, 0x18, 0xd4, 0x71, 0x58, 0xaf, 0xe3, 0xbb, 0xc3, 0xce, 0x31, 0x1b, 0x89, 0x90, 0x97, + 0xc9, 0x7a, 0xa4, 0x6b, 0xc7, 0x2a, 0xe2, 0x0e, 0x3f, 0x61, 0xa3, 0xe2, 0x93, 0x0c, 0x28, 0x63, + 0x37, 0x78, 0x13, 0x16, 0x13, 0x04, 0x12, 0x88, 0xbc, 0x2f, 0xcc, 0xf0, 0x2e, 0x2c, 0xf5, 0xa9, + 0x6d, 0x5a, 0xa3, 0x38, 0xb5, 0x71, 0x06, 0xb7, 0x12, 0x92, 0x49, 0xf1, 0xf4, 0x36, 0xf7, 0x4d, + 0x67, 0xf0, 0x90, 0x5a, 0x21, 0x23, 0x10, 0x03, 0x44, 0xe6, 0xef, 0x80, 0xf2, 0x28, 0xa4, 0x96, + 0xd9, 0x37, 0xc7, 0xc9, 0x7c, 0xfd, 0x1c, 0xb8, 0x31, 0xe2, 0x2c, 0x88, 0xb1, 0x13, 0x6b, 0xbc, + 0x03, 0x6b, 0xdc, 0xb4, 0x59, 0xc0, 0xa9, 0xed, 0x75, 0x6c, 0xd3, 0xf0, 0xdd, 0x40, 0xe6, 0x74, + 0x75, 0x2c, 0x6f, 0x09, 0x31, 0xbe, 0x0c, 0x79, 0x8b, 0x76, 0x99, 0x15, 0xa8, 0xb9, 0x52, 0xb6, + 0xac, 0x10, 0x79, 0xc2, 0x1b, 0x90, 0x3b, 0x89, 0xdc, 0xaa, 0x79, 0x11, 0x53, 0x7c, 0x88, 0xca, + 0x24, 0x3e, 0x3a, 0x81, 0xf9, 0x98, 0xa9, 0x8b, 0x25, 0x54, 0xce, 0x11, 0x45, 0x48, 0xda, 0xe6, + 0xe3, 0x48, 0xad, 0xf8, 0x2c, 0x60, 0x3c, 0x4a, 0xa1, 0x5a, 0x28, 0xa1, 0x72, 0xe1, 0xf0, 0x15, + 0x52, 0x10, 0x22, 0xe2, 0x0e, 0xf1, 0x9b, 0x00, 0x86, 0x6b, 0xdb, 0x66, 0xac, 0x57, 0xa4, 0x5e, + 0x89, 0x65, 0xc4, 0x1d, 0x36, 0x96, 0x45, 0x17, 0x74, 0xe2, 0x9e, 0xd5, 0x6e, 0xc3, 0x46, 0x9b, + 0xda, 0x9e, 0xc5, 0xe2, 0xb4, 0xa7, 0xec, 0x58, 0xad, 0x0d, 0xaf, 0x9d, 0x81, 0xc9, 0x7e, 0x99, + 0x5b, 0xa8, 0xab, 0xb0, 0xec, 0xf6, 0xfb, 0x11, 0xef, 0x6e, 0x94, 0x4e, 0x51, 0xa9, 0x2c, 0x59, + 0x8a, 0x65, 0x22, 0xc3, 0xda, 0xf7, 0x08, 0xd6, 0x5a, 0x21, 0xa7, 0x3c, 0xf2, 0x9a, 0x72, 0x74, + 0xa6, 0xee, 0xcb, 0x9c, 0xba, 0xaf, 0x0e, 0x8a, 0x1d, 0xca, 0x89, 0x57, 0xb3, 0xa2, 0x77, 0xb7, + 0x66, 0xf5, 0x6e, 0x4b, 0x1a, 0x91, 0x89, 0xb9, 0xf6, 0x2a, 0xac, 0x4f, 0xf1, 0x88, 0x23, 0xd3, + 0xfe, 0x43, 0x53, 0xd2, 0xb4, 0x93, 0xbd, 0x07, 0x8b, 0xcc, 0xe1, 0xbe, 0x29, 0x02, 0x8e, 0x38, + 0xdc, 0x98, 0xcb, 0x61, 0xda, 0xad, 0xbe, 0xe7, 0x70, 0x7f, 0x44, 0x12, 0x6c, 0xf1, 0x0b, 0xc8, + 0x09, 0xc9, 0xfc, 0xf4, 0x9e, 0x0a, 0x37, 0x73, 0xb1, 0x70, 0x7f, 0x45, 0x80, 0xa7, 0x29, 0x8c, + 0x47, 0x7f, 0xcc, 0x3d, 0x9e, 0xfd, 0x77, 0x9f, 0xc7, 0x5d, 0x4e, 0xff, 0x19, 0xf2, 0x1f, 0x27, + 0xe4, 0x37, 0x20, 0x67, 0x3a, 0x3d, 0xf6, 0x95, 0xa0, 0x9e, 0x25, 0xf1, 0x01, 0x57, 0x20, 0x1f, + 0xf7, 0xa2, 0x1c, 0x5e, 0x9c, 0xdc, 0xe2, 0x7b, 0x86, 0xde, 0x16, 0x1a, 0x22, 0x2d, 0xb4, 0xdf, + 0x32, 0xa0, 0x36, 0x8f, 0x98, 0x71, 0xfc, 0xa1, 0xd3, 0x7b, 0x69, 0x9d, 0x72, 0x08, 0x6b, 0x9e, + 0xcf, 0x7a, 0xa6, 0x41, 0x39, 0xeb, 0xc8, 0xbd, 0x9a, 0x4f, 0xb3, 0x57, 0x57, 0xc7, 0xb0, 0x58, + 0x80, 0x9b, 0x70, 0x89, 0xfb, 0x21, 0xeb, 0x4c, 0x2a, 0xb1, 0x90, 0xa2, 0x12, 0x2b, 0x11, 0x26, + 0x39, 0x05, 0x78, 0x0f, 0x56, 0xfb, 0xd4, 0x0a, 0xa6, 0xbd, 0xe4, 0x52, 0x78, 0xb9, 0x24, 0x40, + 0x63, 0x37, 0xda, 0x21, 0x5c, 0x99, 0x91, 0x29, 0x59, 0xda, 0x1b, 0xb0, 0x3e, 0x09, 0xd9, 0xa6, + 0xdc, 0x38, 0x62, 0x3d, 0x91, 0xb1, 0x02, 0x99, 0xe4, 0xa2, 0x15, 0xcb, 0xb5, 0x1f, 0x10, 0x5c, + 0x89, 0x36, 0x7c, 0xcb, 0xed, 0x99, 0xfd, 0xd1, 0x67, 0xbe, 0xf9, 0x52, 0xb2, 0xbe, 0x0b, 0x39, + 0x3f, 0xb4, 0x58, 0x32, 0x9b, 0xef, 0xcc, 0x7b, 0x57, 0xa6, 0x6f, 0x0d, 0x2d, 0x46, 0x62, 0x94, + 0x76, 0x00, 0xc5, 0x59, 0x9c, 0x64, 0x7c, 0x3b, 0x90, 0x8d, 0xb6, 0x1f, 0x12, 0x55, 0xdc, 0x9c, + 0x53, 0x45, 0x12, 0xd9, 0xd4, 0xfe, 0x28, 0x40, 0xa1, 0x21, 0x15, 0xf8, 0x27, 0x04, 0x85, 0xe4, + 0x31, 0xc3, 0x6f, 0x3d, 0xfb, 0xa9, 0x13, 0xe1, 0x17, 0xaf, 0xa5, 0x79, 0x0f, 0xb5, 0x8f, 0xbe, + 0xfb, 0xf7, 0xc9, 0x8f, 0x99, 0x7b, 0xda, 0x9d, 0xe8, 0x47, 0xc6, 0xd7, 0x93, 0x7c, 0xed, 0x7a, + 0xbe, 0xfb, 0x25, 0x33, 0x78, 0x50, 0xad, 0x54, 0x4d, 0x27, 0xe0, 0xd4, 0x31, 0x58, 0xf4, 0x2d, + 0x2c, 0x82, 0x6a, 0xe5, 0x9b, 0xba, 0x2f, 0x5d, 0xd5, 0x51, 0xe5, 0x16, 0xc2, 0x7f, 0x22, 0x58, + 0x39, 0xb5, 0x77, 0x71, 0x79, 0xd6, 0xfd, 0xb3, 0x36, 0x7a, 0x71, 0x27, 0x85, 0xa5, 0xa4, 0xbb, + 0x2f, 0xe8, 0x7e, 0x80, 0xef, 0x5d, 0x98, 0x6e, 0x30, 0xed, 0xef, 0x16, 0xc2, 0x3f, 0x23, 0x50, + 0xc6, 0xed, 0x87, 0xaf, 0x3d, 0x73, 0x81, 0x24, 0x44, 0xaf, 0x3f, 0xc7, 0x4a, 0x92, 0xdc, 0x13, + 0x24, 0xdf, 0xd7, 0xea, 0x17, 0x26, 0x69, 0x27, 0xbe, 0xea, 0xa8, 0x82, 0x7f, 0x41, 0x00, 0x93, + 0x1d, 0x86, 0xaf, 0xa7, 0xda, 0xcf, 0xc5, 0xb7, 0xd3, 0xad, 0xc2, 0x24, 0x93, 0xda, 0xdd, 0x17, + 0x27, 0x29, 0x4b, 0xff, 0x37, 0x82, 0xf5, 0x73, 0x03, 0x8d, 0x67, 0xae, 0xe4, 0x79, 0x1b, 0xb2, + 0x78, 0x33, 0xa5, 0xb5, 0x24, 0xdf, 0x12, 0xe4, 0x0f, 0xb4, 0xc6, 0x85, 0xc9, 0x1b, 0x67, 0x7d, + 0x46, 0x99, 0xfe, 0x07, 0x01, 0x3e, 0x3f, 0xb3, 0xf8, 0x66, 0x9a, 0xc9, 0x9f, 0xc4, 0xa0, 0xa7, + 0x35, 0x97, 0x41, 0xdc, 0x17, 0x41, 0x1c, 0x6a, 0xcd, 0x17, 0x1a, 0xbd, 0xd3, 0x4e, 0xeb, 0xa8, + 0xd2, 0xf8, 0x16, 0xc1, 0x65, 0xc3, 0xb5, 0x67, 0xb0, 0x68, 0xac, 0x24, 0x7b, 0xe4, 0x41, 0xf4, + 0xcb, 0xf1, 0x01, 0xfa, 0xbc, 0x2e, 0x8d, 0x06, 0xae, 0x45, 0x9d, 0x81, 0xee, 0xfa, 0x83, 0xea, + 0x80, 0x39, 0xe2, 0x77, 0x65, 0x35, 0x56, 0x51, 0xcf, 0x0c, 0xa6, 0xff, 0x81, 0xdc, 0x4d, 0xbe, + 0x7f, 0xcf, 0xa8, 0x07, 0x31, 0xb8, 0x69, 0xb9, 0x61, 0x4f, 0x4f, 0x5c, 0xeb, 0x0f, 0x6b, 0xdd, + 0xbc, 0xf0, 0xf0, 0xde, 0xd3, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x6b, 0x7a, 0xa3, 0x17, 0x0d, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go new file mode 100644 index 000000000..cf96dc22d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go @@ -0,0 +1,2118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bigtable/v2/data.proto + +package bigtable + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +type Row struct { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + Families []*Family `protobuf:"bytes,2,rep,name=families" json:"families,omitempty"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Row) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Row) GetFamilies() []*Family { + if m != nil { + return m.Families + } + return nil +} + +// Specifies (some of) the contents of a single row/column family intersection +// of a table. +type Family struct { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Must not be empty. Sorted in order of increasing "qualifier". + Columns []*Column `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` +} + +func (m *Family) Reset() { *m = Family{} } +func (m *Family) String() string { return proto.CompactTextString(m) } +func (*Family) ProtoMessage() {} +func (*Family) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Family) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Family) GetColumns() []*Column { + if m != nil { + return m.Columns + } + return nil +} + +// Specifies (some of) the contents of a single row/column intersection of a +// table. +type Column struct { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its `column_qualifier_regex_filter` field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier,proto3" json:"qualifier,omitempty"` + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + Cells []*Cell `protobuf:"bytes,2,rep,name=cells" json:"cells,omitempty"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Column) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *Column) GetCells() []*Cell { + if m != nil { + return m.Cells + } + return nil +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +type Cell struct { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of `timestamp_micros` which are multiples of 1000. + TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros,json=timestampMicros" json:"timestamp_micros,omitempty"` + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. + Labels []string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty"` +} + +func (m *Cell) Reset() { *m = Cell{} } +func (m *Cell) String() string { return proto.CompactTextString(m) } +func (*Cell) ProtoMessage() {} +func (*Cell) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Cell) GetTimestampMicros() int64 { + if m != nil { + return m.TimestampMicros + } + return 0 +} + +func (m *Cell) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Cell) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +// Specifies a contiguous range of rows. +type RowRange struct { + // The row key at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartKey: + // *RowRange_StartKeyClosed + // *RowRange_StartKeyOpen + StartKey isRowRange_StartKey `protobuf_oneof:"start_key"` + // The row key at which to end the range. + // If neither field is set, interpreted as the infinite row key, exclusive. + // + // Types that are valid to be assigned to EndKey: + // *RowRange_EndKeyOpen + // *RowRange_EndKeyClosed + EndKey isRowRange_EndKey `protobuf_oneof:"end_key"` +} + +func (m *RowRange) Reset() { *m = RowRange{} } +func (m *RowRange) String() string { return proto.CompactTextString(m) } +func (*RowRange) ProtoMessage() {} +func (*RowRange) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +type isRowRange_StartKey interface { + isRowRange_StartKey() +} +type isRowRange_EndKey interface { + isRowRange_EndKey() +} + +type RowRange_StartKeyClosed struct { + StartKeyClosed []byte `protobuf:"bytes,1,opt,name=start_key_closed,json=startKeyClosed,proto3,oneof"` +} +type RowRange_StartKeyOpen struct { + StartKeyOpen []byte `protobuf:"bytes,2,opt,name=start_key_open,json=startKeyOpen,proto3,oneof"` +} +type RowRange_EndKeyOpen struct { + EndKeyOpen []byte `protobuf:"bytes,3,opt,name=end_key_open,json=endKeyOpen,proto3,oneof"` +} +type RowRange_EndKeyClosed struct { + EndKeyClosed []byte `protobuf:"bytes,4,opt,name=end_key_closed,json=endKeyClosed,proto3,oneof"` +} + +func (*RowRange_StartKeyClosed) isRowRange_StartKey() {} +func (*RowRange_StartKeyOpen) isRowRange_StartKey() {} +func (*RowRange_EndKeyOpen) isRowRange_EndKey() {} +func (*RowRange_EndKeyClosed) isRowRange_EndKey() {} + +func (m *RowRange) GetStartKey() isRowRange_StartKey { + if m != nil { + return m.StartKey + } + return nil +} +func (m *RowRange) GetEndKey() isRowRange_EndKey { + if m != nil { + return m.EndKey + } + return nil +} + +func (m *RowRange) GetStartKeyClosed() []byte { + if x, ok := m.GetStartKey().(*RowRange_StartKeyClosed); ok { + return x.StartKeyClosed + } + return nil +} + +func (m *RowRange) GetStartKeyOpen() []byte { + if x, ok := m.GetStartKey().(*RowRange_StartKeyOpen); ok { + return x.StartKeyOpen + } + return nil +} + +func (m *RowRange) GetEndKeyOpen() []byte { + if x, ok := m.GetEndKey().(*RowRange_EndKeyOpen); ok { + return x.EndKeyOpen + } + return nil +} + +func (m *RowRange) GetEndKeyClosed() []byte { + if x, ok := m.GetEndKey().(*RowRange_EndKeyClosed); ok { + return x.EndKeyClosed + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RowRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RowRange_OneofMarshaler, _RowRange_OneofUnmarshaler, _RowRange_OneofSizer, []interface{}{ + (*RowRange_StartKeyClosed)(nil), + (*RowRange_StartKeyOpen)(nil), + (*RowRange_EndKeyOpen)(nil), + (*RowRange_EndKeyClosed)(nil), + } +} + +func _RowRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RowRange) + // start_key + switch x := m.StartKey.(type) { + case *RowRange_StartKeyClosed: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartKeyClosed) + case *RowRange_StartKeyOpen: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartKeyOpen) + case nil: + default: + return fmt.Errorf("RowRange.StartKey has unexpected type %T", x) + } + // end_key + switch x := m.EndKey.(type) { + case *RowRange_EndKeyOpen: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndKeyOpen) + case *RowRange_EndKeyClosed: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndKeyClosed) + case nil: + default: + return fmt.Errorf("RowRange.EndKey has unexpected type %T", x) + } + return nil +} + +func _RowRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RowRange) + switch tag { + case 1: // start_key.start_key_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartKey = &RowRange_StartKeyClosed{x} + return true, err + case 2: // start_key.start_key_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartKey = &RowRange_StartKeyOpen{x} + return true, err + case 3: // end_key.end_key_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndKey = &RowRange_EndKeyOpen{x} + return true, err + case 4: // end_key.end_key_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndKey = &RowRange_EndKeyClosed{x} + return true, err + default: + return false, nil + } +} + +func _RowRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RowRange) + // start_key + switch x := m.StartKey.(type) { + case *RowRange_StartKeyClosed: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartKeyClosed))) + n += len(x.StartKeyClosed) + case *RowRange_StartKeyOpen: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartKeyOpen))) + n += len(x.StartKeyOpen) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_key + switch x := m.EndKey.(type) { + case *RowRange_EndKeyOpen: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndKeyOpen))) + n += len(x.EndKeyOpen) + case *RowRange_EndKeyClosed: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndKeyClosed))) + n += len(x.EndKeyClosed) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies a non-contiguous set of rows. +type RowSet struct { + // Single rows included in the set. + RowKeys [][]byte `protobuf:"bytes,1,rep,name=row_keys,json=rowKeys,proto3" json:"row_keys,omitempty"` + // Contiguous row ranges included in the set. + RowRanges []*RowRange `protobuf:"bytes,2,rep,name=row_ranges,json=rowRanges" json:"row_ranges,omitempty"` +} + +func (m *RowSet) Reset() { *m = RowSet{} } +func (m *RowSet) String() string { return proto.CompactTextString(m) } +func (*RowSet) ProtoMessage() {} +func (*RowSet) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *RowSet) GetRowKeys() [][]byte { + if m != nil { + return m.RowKeys + } + return nil +} + +func (m *RowSet) GetRowRanges() []*RowRange { + if m != nil { + return m.RowRanges + } + return nil +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from <column_family>:<start_qualifier> to +// <column_family>:<end_qualifier>, where both bounds can be either +// inclusive or exclusive. +type ColumnRange struct { + // The name of the column family within which this range falls. + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The column qualifier at which to start the range (within `column_family`). + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartQualifier: + // *ColumnRange_StartQualifierClosed + // *ColumnRange_StartQualifierOpen + StartQualifier isColumnRange_StartQualifier `protobuf_oneof:"start_qualifier"` + // The column qualifier at which to end the range (within `column_family`). + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndQualifier: + // *ColumnRange_EndQualifierClosed + // *ColumnRange_EndQualifierOpen + EndQualifier isColumnRange_EndQualifier `protobuf_oneof:"end_qualifier"` +} + +func (m *ColumnRange) Reset() { *m = ColumnRange{} } +func (m *ColumnRange) String() string { return proto.CompactTextString(m) } +func (*ColumnRange) ProtoMessage() {} +func (*ColumnRange) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +type isColumnRange_StartQualifier interface { + isColumnRange_StartQualifier() +} +type isColumnRange_EndQualifier interface { + isColumnRange_EndQualifier() +} + +type ColumnRange_StartQualifierClosed struct { + StartQualifierClosed []byte `protobuf:"bytes,2,opt,name=start_qualifier_closed,json=startQualifierClosed,proto3,oneof"` +} +type ColumnRange_StartQualifierOpen struct { + StartQualifierOpen []byte `protobuf:"bytes,3,opt,name=start_qualifier_open,json=startQualifierOpen,proto3,oneof"` +} +type ColumnRange_EndQualifierClosed struct { + EndQualifierClosed []byte `protobuf:"bytes,4,opt,name=end_qualifier_closed,json=endQualifierClosed,proto3,oneof"` +} +type ColumnRange_EndQualifierOpen struct { + EndQualifierOpen []byte `protobuf:"bytes,5,opt,name=end_qualifier_open,json=endQualifierOpen,proto3,oneof"` +} + +func (*ColumnRange_StartQualifierClosed) isColumnRange_StartQualifier() {} +func (*ColumnRange_StartQualifierOpen) isColumnRange_StartQualifier() {} +func (*ColumnRange_EndQualifierClosed) isColumnRange_EndQualifier() {} +func (*ColumnRange_EndQualifierOpen) isColumnRange_EndQualifier() {} + +func (m *ColumnRange) GetStartQualifier() isColumnRange_StartQualifier { + if m != nil { + return m.StartQualifier + } + return nil +} +func (m *ColumnRange) GetEndQualifier() isColumnRange_EndQualifier { + if m != nil { + return m.EndQualifier + } + return nil +} + +func (m *ColumnRange) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *ColumnRange) GetStartQualifierClosed() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierClosed); ok { + return x.StartQualifierClosed + } + return nil +} + +func (m *ColumnRange) GetStartQualifierOpen() []byte { + if x, ok := m.GetStartQualifier().(*ColumnRange_StartQualifierOpen); ok { + return x.StartQualifierOpen + } + return nil +} + +func (m *ColumnRange) GetEndQualifierClosed() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierClosed); ok { + return x.EndQualifierClosed + } + return nil +} + +func (m *ColumnRange) GetEndQualifierOpen() []byte { + if x, ok := m.GetEndQualifier().(*ColumnRange_EndQualifierOpen); ok { + return x.EndQualifierOpen + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ColumnRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ColumnRange_OneofMarshaler, _ColumnRange_OneofUnmarshaler, _ColumnRange_OneofSizer, []interface{}{ + (*ColumnRange_StartQualifierClosed)(nil), + (*ColumnRange_StartQualifierOpen)(nil), + (*ColumnRange_EndQualifierClosed)(nil), + (*ColumnRange_EndQualifierOpen)(nil), + } +} + +func _ColumnRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ColumnRange) + // start_qualifier + switch x := m.StartQualifier.(type) { + case *ColumnRange_StartQualifierClosed: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierClosed) + case *ColumnRange_StartQualifierOpen: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartQualifierOpen) + case nil: + default: + return fmt.Errorf("ColumnRange.StartQualifier has unexpected type %T", x) + } + // end_qualifier + switch x := m.EndQualifier.(type) { + case *ColumnRange_EndQualifierClosed: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierClosed) + case *ColumnRange_EndQualifierOpen: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndQualifierOpen) + case nil: + default: + return fmt.Errorf("ColumnRange.EndQualifier has unexpected type %T", x) + } + return nil +} + +func _ColumnRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ColumnRange) + switch tag { + case 2: // start_qualifier.start_qualifier_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierClosed{x} + return true, err + case 3: // start_qualifier.start_qualifier_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartQualifier = &ColumnRange_StartQualifierOpen{x} + return true, err + case 4: // end_qualifier.end_qualifier_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierClosed{x} + return true, err + case 5: // end_qualifier.end_qualifier_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndQualifier = &ColumnRange_EndQualifierOpen{x} + return true, err + default: + return false, nil + } +} + +func _ColumnRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ColumnRange) + // start_qualifier + switch x := m.StartQualifier.(type) { + case *ColumnRange_StartQualifierClosed: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartQualifierClosed))) + n += len(x.StartQualifierClosed) + case *ColumnRange_StartQualifierOpen: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartQualifierOpen))) + n += len(x.StartQualifierOpen) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_qualifier + switch x := m.EndQualifier.(type) { + case *ColumnRange_EndQualifierClosed: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndQualifierClosed))) + n += len(x.EndQualifierClosed) + case *ColumnRange_EndQualifierOpen: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndQualifierOpen))) + n += len(x.EndQualifierOpen) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specified a contiguous range of microsecond timestamps. +type TimestampRange struct { + // Inclusive lower bound. If left empty, interpreted as 0. + StartTimestampMicros int64 `protobuf:"varint,1,opt,name=start_timestamp_micros,json=startTimestampMicros" json:"start_timestamp_micros,omitempty"` + // Exclusive upper bound. If left empty, interpreted as infinity. + EndTimestampMicros int64 `protobuf:"varint,2,opt,name=end_timestamp_micros,json=endTimestampMicros" json:"end_timestamp_micros,omitempty"` +} + +func (m *TimestampRange) Reset() { *m = TimestampRange{} } +func (m *TimestampRange) String() string { return proto.CompactTextString(m) } +func (*TimestampRange) ProtoMessage() {} +func (*TimestampRange) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *TimestampRange) GetStartTimestampMicros() int64 { + if m != nil { + return m.StartTimestampMicros + } + return 0 +} + +func (m *TimestampRange) GetEndTimestampMicros() int64 { + if m != nil { + return m.EndTimestampMicros + } + return 0 +} + +// Specifies a contiguous range of raw byte values. +type ValueRange struct { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + // + // Types that are valid to be assigned to StartValue: + // *ValueRange_StartValueClosed + // *ValueRange_StartValueOpen + StartValue isValueRange_StartValue `protobuf_oneof:"start_value"` + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + // + // Types that are valid to be assigned to EndValue: + // *ValueRange_EndValueClosed + // *ValueRange_EndValueOpen + EndValue isValueRange_EndValue `protobuf_oneof:"end_value"` +} + +func (m *ValueRange) Reset() { *m = ValueRange{} } +func (m *ValueRange) String() string { return proto.CompactTextString(m) } +func (*ValueRange) ProtoMessage() {} +func (*ValueRange) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +type isValueRange_StartValue interface { + isValueRange_StartValue() +} +type isValueRange_EndValue interface { + isValueRange_EndValue() +} + +type ValueRange_StartValueClosed struct { + StartValueClosed []byte `protobuf:"bytes,1,opt,name=start_value_closed,json=startValueClosed,proto3,oneof"` +} +type ValueRange_StartValueOpen struct { + StartValueOpen []byte `protobuf:"bytes,2,opt,name=start_value_open,json=startValueOpen,proto3,oneof"` +} +type ValueRange_EndValueClosed struct { + EndValueClosed []byte `protobuf:"bytes,3,opt,name=end_value_closed,json=endValueClosed,proto3,oneof"` +} +type ValueRange_EndValueOpen struct { + EndValueOpen []byte `protobuf:"bytes,4,opt,name=end_value_open,json=endValueOpen,proto3,oneof"` +} + +func (*ValueRange_StartValueClosed) isValueRange_StartValue() {} +func (*ValueRange_StartValueOpen) isValueRange_StartValue() {} +func (*ValueRange_EndValueClosed) isValueRange_EndValue() {} +func (*ValueRange_EndValueOpen) isValueRange_EndValue() {} + +func (m *ValueRange) GetStartValue() isValueRange_StartValue { + if m != nil { + return m.StartValue + } + return nil +} +func (m *ValueRange) GetEndValue() isValueRange_EndValue { + if m != nil { + return m.EndValue + } + return nil +} + +func (m *ValueRange) GetStartValueClosed() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueClosed); ok { + return x.StartValueClosed + } + return nil +} + +func (m *ValueRange) GetStartValueOpen() []byte { + if x, ok := m.GetStartValue().(*ValueRange_StartValueOpen); ok { + return x.StartValueOpen + } + return nil +} + +func (m *ValueRange) GetEndValueClosed() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueClosed); ok { + return x.EndValueClosed + } + return nil +} + +func (m *ValueRange) GetEndValueOpen() []byte { + if x, ok := m.GetEndValue().(*ValueRange_EndValueOpen); ok { + return x.EndValueOpen + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ValueRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ValueRange_OneofMarshaler, _ValueRange_OneofUnmarshaler, _ValueRange_OneofSizer, []interface{}{ + (*ValueRange_StartValueClosed)(nil), + (*ValueRange_StartValueOpen)(nil), + (*ValueRange_EndValueClosed)(nil), + (*ValueRange_EndValueOpen)(nil), + } +} + +func _ValueRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ValueRange) + // start_value + switch x := m.StartValue.(type) { + case *ValueRange_StartValueClosed: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueClosed) + case *ValueRange_StartValueOpen: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.StartValueOpen) + case nil: + default: + return fmt.Errorf("ValueRange.StartValue has unexpected type %T", x) + } + // end_value + switch x := m.EndValue.(type) { + case *ValueRange_EndValueClosed: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueClosed) + case *ValueRange_EndValueOpen: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.EndValueOpen) + case nil: + default: + return fmt.Errorf("ValueRange.EndValue has unexpected type %T", x) + } + return nil +} + +func _ValueRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ValueRange) + switch tag { + case 1: // start_value.start_value_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueClosed{x} + return true, err + case 2: // start_value.start_value_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StartValue = &ValueRange_StartValueOpen{x} + return true, err + case 3: // end_value.end_value_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueClosed{x} + return true, err + case 4: // end_value.end_value_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.EndValue = &ValueRange_EndValueOpen{x} + return true, err + default: + return false, nil + } +} + +func _ValueRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ValueRange) + // start_value + switch x := m.StartValue.(type) { + case *ValueRange_StartValueClosed: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartValueClosed))) + n += len(x.StartValueClosed) + case *ValueRange_StartValueOpen: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StartValueOpen))) + n += len(x.StartValueOpen) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_value + switch x := m.EndValue.(type) { + case *ValueRange_EndValueClosed: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndValueClosed))) + n += len(x.EndValueClosed) + case *ValueRange_EndValueOpen: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.EndValueOpen))) + n += len(x.EndValueOpen) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the `value_regex_filter`, +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that `RE2(.)` is equivalent by default to +// `RE2([^\n])`, meaning that it does not match newlines. When attempting to +// match an arbitrary byte, you should therefore use the escape sequence `\C`, +// which may need to be further escaped as `\\C` in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the `strip_value_transformer`, which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +type RowFilter struct { + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + // + // Types that are valid to be assigned to Filter: + // *RowFilter_Chain_ + // *RowFilter_Interleave_ + // *RowFilter_Condition_ + // *RowFilter_Sink + // *RowFilter_PassAllFilter + // *RowFilter_BlockAllFilter + // *RowFilter_RowKeyRegexFilter + // *RowFilter_RowSampleFilter + // *RowFilter_FamilyNameRegexFilter + // *RowFilter_ColumnQualifierRegexFilter + // *RowFilter_ColumnRangeFilter + // *RowFilter_TimestampRangeFilter + // *RowFilter_ValueRegexFilter + // *RowFilter_ValueRangeFilter + // *RowFilter_CellsPerRowOffsetFilter + // *RowFilter_CellsPerRowLimitFilter + // *RowFilter_CellsPerColumnLimitFilter + // *RowFilter_StripValueTransformer + // *RowFilter_ApplyLabelTransformer + Filter isRowFilter_Filter `protobuf_oneof:"filter"` +} + +func (m *RowFilter) Reset() { *m = RowFilter{} } +func (m *RowFilter) String() string { return proto.CompactTextString(m) } +func (*RowFilter) ProtoMessage() {} +func (*RowFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +type isRowFilter_Filter interface { + isRowFilter_Filter() +} + +type RowFilter_Chain_ struct { + Chain *RowFilter_Chain `protobuf:"bytes,1,opt,name=chain,oneof"` +} +type RowFilter_Interleave_ struct { + Interleave *RowFilter_Interleave `protobuf:"bytes,2,opt,name=interleave,oneof"` +} +type RowFilter_Condition_ struct { + Condition *RowFilter_Condition `protobuf:"bytes,3,opt,name=condition,oneof"` +} +type RowFilter_Sink struct { + Sink bool `protobuf:"varint,16,opt,name=sink,oneof"` +} +type RowFilter_PassAllFilter struct { + PassAllFilter bool `protobuf:"varint,17,opt,name=pass_all_filter,json=passAllFilter,oneof"` +} +type RowFilter_BlockAllFilter struct { + BlockAllFilter bool `protobuf:"varint,18,opt,name=block_all_filter,json=blockAllFilter,oneof"` +} +type RowFilter_RowKeyRegexFilter struct { + RowKeyRegexFilter []byte `protobuf:"bytes,4,opt,name=row_key_regex_filter,json=rowKeyRegexFilter,proto3,oneof"` +} +type RowFilter_RowSampleFilter struct { + RowSampleFilter float64 `protobuf:"fixed64,14,opt,name=row_sample_filter,json=rowSampleFilter,oneof"` +} +type RowFilter_FamilyNameRegexFilter struct { + FamilyNameRegexFilter string `protobuf:"bytes,5,opt,name=family_name_regex_filter,json=familyNameRegexFilter,oneof"` +} +type RowFilter_ColumnQualifierRegexFilter struct { + ColumnQualifierRegexFilter []byte `protobuf:"bytes,6,opt,name=column_qualifier_regex_filter,json=columnQualifierRegexFilter,proto3,oneof"` +} +type RowFilter_ColumnRangeFilter struct { + ColumnRangeFilter *ColumnRange `protobuf:"bytes,7,opt,name=column_range_filter,json=columnRangeFilter,oneof"` +} +type RowFilter_TimestampRangeFilter struct { + TimestampRangeFilter *TimestampRange `protobuf:"bytes,8,opt,name=timestamp_range_filter,json=timestampRangeFilter,oneof"` +} +type RowFilter_ValueRegexFilter struct { + ValueRegexFilter []byte `protobuf:"bytes,9,opt,name=value_regex_filter,json=valueRegexFilter,proto3,oneof"` +} +type RowFilter_ValueRangeFilter struct { + ValueRangeFilter *ValueRange `protobuf:"bytes,15,opt,name=value_range_filter,json=valueRangeFilter,oneof"` +} +type RowFilter_CellsPerRowOffsetFilter struct { + CellsPerRowOffsetFilter int32 `protobuf:"varint,10,opt,name=cells_per_row_offset_filter,json=cellsPerRowOffsetFilter,oneof"` +} +type RowFilter_CellsPerRowLimitFilter struct { + CellsPerRowLimitFilter int32 `protobuf:"varint,11,opt,name=cells_per_row_limit_filter,json=cellsPerRowLimitFilter,oneof"` +} +type RowFilter_CellsPerColumnLimitFilter struct { + CellsPerColumnLimitFilter int32 `protobuf:"varint,12,opt,name=cells_per_column_limit_filter,json=cellsPerColumnLimitFilter,oneof"` +} +type RowFilter_StripValueTransformer struct { + StripValueTransformer bool `protobuf:"varint,13,opt,name=strip_value_transformer,json=stripValueTransformer,oneof"` +} +type RowFilter_ApplyLabelTransformer struct { + ApplyLabelTransformer string `protobuf:"bytes,19,opt,name=apply_label_transformer,json=applyLabelTransformer,oneof"` +} + +func (*RowFilter_Chain_) isRowFilter_Filter() {} +func (*RowFilter_Interleave_) isRowFilter_Filter() {} +func (*RowFilter_Condition_) isRowFilter_Filter() {} +func (*RowFilter_Sink) isRowFilter_Filter() {} +func (*RowFilter_PassAllFilter) isRowFilter_Filter() {} +func (*RowFilter_BlockAllFilter) isRowFilter_Filter() {} +func (*RowFilter_RowKeyRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_RowSampleFilter) isRowFilter_Filter() {} +func (*RowFilter_FamilyNameRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnQualifierRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ColumnRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_TimestampRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRegexFilter) isRowFilter_Filter() {} +func (*RowFilter_ValueRangeFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowOffsetFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerRowLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_CellsPerColumnLimitFilter) isRowFilter_Filter() {} +func (*RowFilter_StripValueTransformer) isRowFilter_Filter() {} +func (*RowFilter_ApplyLabelTransformer) isRowFilter_Filter() {} + +func (m *RowFilter) GetFilter() isRowFilter_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *RowFilter) GetChain() *RowFilter_Chain { + if x, ok := m.GetFilter().(*RowFilter_Chain_); ok { + return x.Chain + } + return nil +} + +func (m *RowFilter) GetInterleave() *RowFilter_Interleave { + if x, ok := m.GetFilter().(*RowFilter_Interleave_); ok { + return x.Interleave + } + return nil +} + +func (m *RowFilter) GetCondition() *RowFilter_Condition { + if x, ok := m.GetFilter().(*RowFilter_Condition_); ok { + return x.Condition + } + return nil +} + +func (m *RowFilter) GetSink() bool { + if x, ok := m.GetFilter().(*RowFilter_Sink); ok { + return x.Sink + } + return false +} + +func (m *RowFilter) GetPassAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_PassAllFilter); ok { + return x.PassAllFilter + } + return false +} + +func (m *RowFilter) GetBlockAllFilter() bool { + if x, ok := m.GetFilter().(*RowFilter_BlockAllFilter); ok { + return x.BlockAllFilter + } + return false +} + +func (m *RowFilter) GetRowKeyRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_RowKeyRegexFilter); ok { + return x.RowKeyRegexFilter + } + return nil +} + +func (m *RowFilter) GetRowSampleFilter() float64 { + if x, ok := m.GetFilter().(*RowFilter_RowSampleFilter); ok { + return x.RowSampleFilter + } + return 0 +} + +func (m *RowFilter) GetFamilyNameRegexFilter() string { + if x, ok := m.GetFilter().(*RowFilter_FamilyNameRegexFilter); ok { + return x.FamilyNameRegexFilter + } + return "" +} + +func (m *RowFilter) GetColumnQualifierRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ColumnQualifierRegexFilter); ok { + return x.ColumnQualifierRegexFilter + } + return nil +} + +func (m *RowFilter) GetColumnRangeFilter() *ColumnRange { + if x, ok := m.GetFilter().(*RowFilter_ColumnRangeFilter); ok { + return x.ColumnRangeFilter + } + return nil +} + +func (m *RowFilter) GetTimestampRangeFilter() *TimestampRange { + if x, ok := m.GetFilter().(*RowFilter_TimestampRangeFilter); ok { + return x.TimestampRangeFilter + } + return nil +} + +func (m *RowFilter) GetValueRegexFilter() []byte { + if x, ok := m.GetFilter().(*RowFilter_ValueRegexFilter); ok { + return x.ValueRegexFilter + } + return nil +} + +func (m *RowFilter) GetValueRangeFilter() *ValueRange { + if x, ok := m.GetFilter().(*RowFilter_ValueRangeFilter); ok { + return x.ValueRangeFilter + } + return nil +} + +func (m *RowFilter) GetCellsPerRowOffsetFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowOffsetFilter); ok { + return x.CellsPerRowOffsetFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerRowLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerRowLimitFilter); ok { + return x.CellsPerRowLimitFilter + } + return 0 +} + +func (m *RowFilter) GetCellsPerColumnLimitFilter() int32 { + if x, ok := m.GetFilter().(*RowFilter_CellsPerColumnLimitFilter); ok { + return x.CellsPerColumnLimitFilter + } + return 0 +} + +func (m *RowFilter) GetStripValueTransformer() bool { + if x, ok := m.GetFilter().(*RowFilter_StripValueTransformer); ok { + return x.StripValueTransformer + } + return false +} + +func (m *RowFilter) GetApplyLabelTransformer() string { + if x, ok := m.GetFilter().(*RowFilter_ApplyLabelTransformer); ok { + return x.ApplyLabelTransformer + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RowFilter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RowFilter_OneofMarshaler, _RowFilter_OneofUnmarshaler, _RowFilter_OneofSizer, []interface{}{ + (*RowFilter_Chain_)(nil), + (*RowFilter_Interleave_)(nil), + (*RowFilter_Condition_)(nil), + (*RowFilter_Sink)(nil), + (*RowFilter_PassAllFilter)(nil), + (*RowFilter_BlockAllFilter)(nil), + (*RowFilter_RowKeyRegexFilter)(nil), + (*RowFilter_RowSampleFilter)(nil), + (*RowFilter_FamilyNameRegexFilter)(nil), + (*RowFilter_ColumnQualifierRegexFilter)(nil), + (*RowFilter_ColumnRangeFilter)(nil), + (*RowFilter_TimestampRangeFilter)(nil), + (*RowFilter_ValueRegexFilter)(nil), + (*RowFilter_ValueRangeFilter)(nil), + (*RowFilter_CellsPerRowOffsetFilter)(nil), + (*RowFilter_CellsPerRowLimitFilter)(nil), + (*RowFilter_CellsPerColumnLimitFilter)(nil), + (*RowFilter_StripValueTransformer)(nil), + (*RowFilter_ApplyLabelTransformer)(nil), + } +} + +func _RowFilter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RowFilter) + // filter + switch x := m.Filter.(type) { + case *RowFilter_Chain_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Chain); err != nil { + return err + } + case *RowFilter_Interleave_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Interleave); err != nil { + return err + } + case *RowFilter_Condition_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Condition); err != nil { + return err + } + case *RowFilter_Sink: + t := uint64(0) + if x.Sink { + t = 1 + } + b.EncodeVarint(16<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_PassAllFilter: + t := uint64(0) + if x.PassAllFilter { + t = 1 + } + b.EncodeVarint(17<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_BlockAllFilter: + t := uint64(0) + if x.BlockAllFilter { + t = 1 + } + b.EncodeVarint(18<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_RowKeyRegexFilter: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.RowKeyRegexFilter) + case *RowFilter_RowSampleFilter: + b.EncodeVarint(14<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.RowSampleFilter)) + case *RowFilter_FamilyNameRegexFilter: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FamilyNameRegexFilter) + case *RowFilter_ColumnQualifierRegexFilter: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ColumnQualifierRegexFilter) + case *RowFilter_ColumnRangeFilter: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ColumnRangeFilter); err != nil { + return err + } + case *RowFilter_TimestampRangeFilter: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampRangeFilter); err != nil { + return err + } + case *RowFilter_ValueRegexFilter: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ValueRegexFilter) + case *RowFilter_ValueRangeFilter: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ValueRangeFilter); err != nil { + return err + } + case *RowFilter_CellsPerRowOffsetFilter: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowOffsetFilter)) + case *RowFilter_CellsPerRowLimitFilter: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerRowLimitFilter)) + case *RowFilter_CellsPerColumnLimitFilter: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CellsPerColumnLimitFilter)) + case *RowFilter_StripValueTransformer: + t := uint64(0) + if x.StripValueTransformer { + t = 1 + } + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *RowFilter_ApplyLabelTransformer: + b.EncodeVarint(19<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ApplyLabelTransformer) + case nil: + default: + return fmt.Errorf("RowFilter.Filter has unexpected type %T", x) + } + return nil +} + +func _RowFilter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RowFilter) + switch tag { + case 1: // filter.chain + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Chain) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Chain_{msg} + return true, err + case 2: // filter.interleave + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Interleave) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Interleave_{msg} + return true, err + case 3: // filter.condition + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RowFilter_Condition) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_Condition_{msg} + return true, err + case 16: // filter.sink + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_Sink{x != 0} + return true, err + case 17: // filter.pass_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_PassAllFilter{x != 0} + return true, err + case 18: // filter.block_all_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_BlockAllFilter{x != 0} + return true, err + case 4: // filter.row_key_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_RowKeyRegexFilter{x} + return true, err + case 14: // filter.row_sample_filter + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Filter = &RowFilter_RowSampleFilter{math.Float64frombits(x)} + return true, err + case 5: // filter.family_name_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_FamilyNameRegexFilter{x} + return true, err + case 6: // filter.column_qualifier_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ColumnQualifierRegexFilter{x} + return true, err + case 7: // filter.column_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ColumnRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ColumnRangeFilter{msg} + return true, err + case 8: // filter.timestamp_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TimestampRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_TimestampRangeFilter{msg} + return true, err + case 9: // filter.value_regex_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Filter = &RowFilter_ValueRegexFilter{x} + return true, err + case 15: // filter.value_range_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ValueRange) + err := b.DecodeMessage(msg) + m.Filter = &RowFilter_ValueRangeFilter{msg} + return true, err + case 10: // filter.cells_per_row_offset_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowOffsetFilter{int32(x)} + return true, err + case 11: // filter.cells_per_row_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerRowLimitFilter{int32(x)} + return true, err + case 12: // filter.cells_per_column_limit_filter + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_CellsPerColumnLimitFilter{int32(x)} + return true, err + case 13: // filter.strip_value_transformer + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Filter = &RowFilter_StripValueTransformer{x != 0} + return true, err + case 19: // filter.apply_label_transformer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &RowFilter_ApplyLabelTransformer{x} + return true, err + default: + return false, nil + } +} + +func _RowFilter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RowFilter) + // filter + switch x := m.Filter.(type) { + case *RowFilter_Chain_: + s := proto.Size(x.Chain) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Interleave_: + s := proto.Size(x.Interleave) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Condition_: + s := proto.Size(x.Condition) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_Sink: + n += proto.SizeVarint(16<<3 | proto.WireVarint) + n += 1 + case *RowFilter_PassAllFilter: + n += proto.SizeVarint(17<<3 | proto.WireVarint) + n += 1 + case *RowFilter_BlockAllFilter: + n += proto.SizeVarint(18<<3 | proto.WireVarint) + n += 1 + case *RowFilter_RowKeyRegexFilter: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RowKeyRegexFilter))) + n += len(x.RowKeyRegexFilter) + case *RowFilter_RowSampleFilter: + n += proto.SizeVarint(14<<3 | proto.WireFixed64) + n += 8 + case *RowFilter_FamilyNameRegexFilter: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FamilyNameRegexFilter))) + n += len(x.FamilyNameRegexFilter) + case *RowFilter_ColumnQualifierRegexFilter: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ColumnQualifierRegexFilter))) + n += len(x.ColumnQualifierRegexFilter) + case *RowFilter_ColumnRangeFilter: + s := proto.Size(x.ColumnRangeFilter) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_TimestampRangeFilter: + s := proto.Size(x.TimestampRangeFilter) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_ValueRegexFilter: + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ValueRegexFilter))) + n += len(x.ValueRegexFilter) + case *RowFilter_ValueRangeFilter: + s := proto.Size(x.ValueRangeFilter) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RowFilter_CellsPerRowOffsetFilter: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerRowOffsetFilter)) + case *RowFilter_CellsPerRowLimitFilter: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerRowLimitFilter)) + case *RowFilter_CellsPerColumnLimitFilter: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CellsPerColumnLimitFilter)) + case *RowFilter_StripValueTransformer: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += 1 + case *RowFilter_ApplyLabelTransformer: + n += proto.SizeVarint(19<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ApplyLabelTransformer))) + n += len(x.ApplyLabelTransformer) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A RowFilter which sends rows through several RowFilters in sequence. +type RowFilter_Chain struct { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Chain) Reset() { *m = RowFilter_Chain{} } +func (m *RowFilter_Chain) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Chain) ProtoMessage() {} +func (*RowFilter_Chain) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } + +func (m *RowFilter_Chain) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which sends each row to each of several component +// RowFilters and interleaves the results. +type RowFilter_Interleave struct { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // + // All interleaved filters are executed atomically. + Filters []*RowFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *RowFilter_Interleave) Reset() { *m = RowFilter_Interleave{} } +func (m *RowFilter_Interleave) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Interleave) ProtoMessage() {} +func (*RowFilter_Interleave) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 1} } + +func (m *RowFilter_Interleave) GetFilters() []*RowFilter { + if m != nil { + return m.Filters + } + return nil +} + +// A RowFilter which evaluates one of two possible RowFilters, depending on +// whether or not a predicate RowFilter outputs any cells from the input row. +// +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, Condition filters have poor performance, especially +// when filters are set for the false condition. +type RowFilter_Condition struct { + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. + PredicateFilter *RowFilter `protobuf:"bytes,1,opt,name=predicate_filter,json=predicateFilter" json:"predicate_filter,omitempty"` + // The filter to apply to the input row if `predicate_filter` returns any + // results. If not provided, no results will be returned in the true case. + TrueFilter *RowFilter `protobuf:"bytes,2,opt,name=true_filter,json=trueFilter" json:"true_filter,omitempty"` + // The filter to apply to the input row if `predicate_filter` does not + // return any results. If not provided, no results will be returned in the + // false case. + FalseFilter *RowFilter `protobuf:"bytes,3,opt,name=false_filter,json=falseFilter" json:"false_filter,omitempty"` +} + +func (m *RowFilter_Condition) Reset() { *m = RowFilter_Condition{} } +func (m *RowFilter_Condition) String() string { return proto.CompactTextString(m) } +func (*RowFilter_Condition) ProtoMessage() {} +func (*RowFilter_Condition) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 2} } + +func (m *RowFilter_Condition) GetPredicateFilter() *RowFilter { + if m != nil { + return m.PredicateFilter + } + return nil +} + +func (m *RowFilter_Condition) GetTrueFilter() *RowFilter { + if m != nil { + return m.TrueFilter + } + return nil +} + +func (m *RowFilter_Condition) GetFalseFilter() *RowFilter { + if m != nil { + return m.FalseFilter + } + return nil +} + +// Specifies a particular change to be made to the contents of a row. +type Mutation struct { + // Which of the possible Mutation types to apply. + // + // Types that are valid to be assigned to Mutation: + // *Mutation_SetCell_ + // *Mutation_DeleteFromColumn_ + // *Mutation_DeleteFromFamily_ + // *Mutation_DeleteFromRow_ + Mutation isMutation_Mutation `protobuf_oneof:"mutation"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +type isMutation_Mutation interface { + isMutation_Mutation() +} + +type Mutation_SetCell_ struct { + SetCell *Mutation_SetCell `protobuf:"bytes,1,opt,name=set_cell,json=setCell,oneof"` +} +type Mutation_DeleteFromColumn_ struct { + DeleteFromColumn *Mutation_DeleteFromColumn `protobuf:"bytes,2,opt,name=delete_from_column,json=deleteFromColumn,oneof"` +} +type Mutation_DeleteFromFamily_ struct { + DeleteFromFamily *Mutation_DeleteFromFamily `protobuf:"bytes,3,opt,name=delete_from_family,json=deleteFromFamily,oneof"` +} +type Mutation_DeleteFromRow_ struct { + DeleteFromRow *Mutation_DeleteFromRow `protobuf:"bytes,4,opt,name=delete_from_row,json=deleteFromRow,oneof"` +} + +func (*Mutation_SetCell_) isMutation_Mutation() {} +func (*Mutation_DeleteFromColumn_) isMutation_Mutation() {} +func (*Mutation_DeleteFromFamily_) isMutation_Mutation() {} +func (*Mutation_DeleteFromRow_) isMutation_Mutation() {} + +func (m *Mutation) GetMutation() isMutation_Mutation { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *Mutation) GetSetCell() *Mutation_SetCell { + if x, ok := m.GetMutation().(*Mutation_SetCell_); ok { + return x.SetCell + } + return nil +} + +func (m *Mutation) GetDeleteFromColumn() *Mutation_DeleteFromColumn { + if x, ok := m.GetMutation().(*Mutation_DeleteFromColumn_); ok { + return x.DeleteFromColumn + } + return nil +} + +func (m *Mutation) GetDeleteFromFamily() *Mutation_DeleteFromFamily { + if x, ok := m.GetMutation().(*Mutation_DeleteFromFamily_); ok { + return x.DeleteFromFamily + } + return nil +} + +func (m *Mutation) GetDeleteFromRow() *Mutation_DeleteFromRow { + if x, ok := m.GetMutation().(*Mutation_DeleteFromRow_); ok { + return x.DeleteFromRow + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_SetCell_)(nil), + (*Mutation_DeleteFromColumn_)(nil), + (*Mutation_DeleteFromFamily_)(nil), + (*Mutation_DeleteFromRow_)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // mutation + switch x := m.Mutation.(type) { + case *Mutation_SetCell_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SetCell); err != nil { + return err + } + case *Mutation_DeleteFromColumn_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromColumn); err != nil { + return err + } + case *Mutation_DeleteFromFamily_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromFamily); err != nil { + return err + } + case *Mutation_DeleteFromRow_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeleteFromRow); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Mutation has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 1: // mutation.set_cell + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_SetCell) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_SetCell_{msg} + return true, err + case 2: // mutation.delete_from_column + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromColumn) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromColumn_{msg} + return true, err + case 3: // mutation.delete_from_family + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromFamily) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromFamily_{msg} + return true, err + case 4: // mutation.delete_from_row + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_DeleteFromRow) + err := b.DecodeMessage(msg) + m.Mutation = &Mutation_DeleteFromRow_{msg} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // mutation + switch x := m.Mutation.(type) { + case *Mutation_SetCell_: + s := proto.Size(x.SetCell) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromColumn_: + s := proto.Size(x.DeleteFromColumn) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromFamily_: + s := proto.Size(x.DeleteFromFamily) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_DeleteFromRow_: + s := proto.Size(x.DeleteFromRow) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Mutation which sets the value of the specified cell. +type Mutation_SetCell struct { + // The name of the family into which new data should be written. + // Must match `[-_.a-zA-Z0-9]+` + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the granularity of the table (e.g. micros, millis). + TimestampMicros int64 `protobuf:"varint,3,opt,name=timestamp_micros,json=timestampMicros" json:"timestamp_micros,omitempty"` + // The value to be written into the specified cell. + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Mutation_SetCell) Reset() { *m = Mutation_SetCell{} } +func (m *Mutation_SetCell) String() string { return proto.CompactTextString(m) } +func (*Mutation_SetCell) ProtoMessage() {} +func (*Mutation_SetCell) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10, 0} } + +func (m *Mutation_SetCell) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *Mutation_SetCell) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *Mutation_SetCell) GetTimestampMicros() int64 { + if m != nil { + return m.TimestampMicros + } + return 0 +} + +func (m *Mutation_SetCell) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// A Mutation which deletes cells from the specified column, optionally +// restricting the deletions to a given timestamp range. +type Mutation_DeleteFromColumn struct { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The range of timestamps within which cells should be deleted. + TimeRange *TimestampRange `protobuf:"bytes,3,opt,name=time_range,json=timeRange" json:"time_range,omitempty"` +} + +func (m *Mutation_DeleteFromColumn) Reset() { *m = Mutation_DeleteFromColumn{} } +func (m *Mutation_DeleteFromColumn) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromColumn) ProtoMessage() {} +func (*Mutation_DeleteFromColumn) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10, 1} } + +func (m *Mutation_DeleteFromColumn) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *Mutation_DeleteFromColumn) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *Mutation_DeleteFromColumn) GetTimeRange() *TimestampRange { + if m != nil { + return m.TimeRange + } + return nil +} + +// A Mutation which deletes all cells from the specified column family. +type Mutation_DeleteFromFamily struct { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` +} + +func (m *Mutation_DeleteFromFamily) Reset() { *m = Mutation_DeleteFromFamily{} } +func (m *Mutation_DeleteFromFamily) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromFamily) ProtoMessage() {} +func (*Mutation_DeleteFromFamily) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10, 2} } + +func (m *Mutation_DeleteFromFamily) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +// A Mutation which deletes all cells from the containing row. +type Mutation_DeleteFromRow struct { +} + +func (m *Mutation_DeleteFromRow) Reset() { *m = Mutation_DeleteFromRow{} } +func (m *Mutation_DeleteFromRow) String() string { return proto.CompactTextString(m) } +func (*Mutation_DeleteFromRow) ProtoMessage() {} +func (*Mutation_DeleteFromRow) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10, 3} } + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +type ReadModifyWriteRule struct { + // The name of the family to which the read/modify/write should be applied. + // Must match `[-_.a-zA-Z0-9]+` + FamilyName string `protobuf:"bytes,1,opt,name=family_name,json=familyName" json:"family_name,omitempty"` + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier,json=columnQualifier,proto3" json:"column_qualifier,omitempty"` + // The rule used to determine the column's new latest value from its current + // latest value. + // + // Types that are valid to be assigned to Rule: + // *ReadModifyWriteRule_AppendValue + // *ReadModifyWriteRule_IncrementAmount + Rule isReadModifyWriteRule_Rule `protobuf_oneof:"rule"` +} + +func (m *ReadModifyWriteRule) Reset() { *m = ReadModifyWriteRule{} } +func (m *ReadModifyWriteRule) String() string { return proto.CompactTextString(m) } +func (*ReadModifyWriteRule) ProtoMessage() {} +func (*ReadModifyWriteRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +type isReadModifyWriteRule_Rule interface { + isReadModifyWriteRule_Rule() +} + +type ReadModifyWriteRule_AppendValue struct { + AppendValue []byte `protobuf:"bytes,3,opt,name=append_value,json=appendValue,proto3,oneof"` +} +type ReadModifyWriteRule_IncrementAmount struct { + IncrementAmount int64 `protobuf:"varint,4,opt,name=increment_amount,json=incrementAmount,oneof"` +} + +func (*ReadModifyWriteRule_AppendValue) isReadModifyWriteRule_Rule() {} +func (*ReadModifyWriteRule_IncrementAmount) isReadModifyWriteRule_Rule() {} + +func (m *ReadModifyWriteRule) GetRule() isReadModifyWriteRule_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (m *ReadModifyWriteRule) GetFamilyName() string { + if m != nil { + return m.FamilyName + } + return "" +} + +func (m *ReadModifyWriteRule) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *ReadModifyWriteRule) GetAppendValue() []byte { + if x, ok := m.GetRule().(*ReadModifyWriteRule_AppendValue); ok { + return x.AppendValue + } + return nil +} + +func (m *ReadModifyWriteRule) GetIncrementAmount() int64 { + if x, ok := m.GetRule().(*ReadModifyWriteRule_IncrementAmount); ok { + return x.IncrementAmount + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadModifyWriteRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadModifyWriteRule_OneofMarshaler, _ReadModifyWriteRule_OneofUnmarshaler, _ReadModifyWriteRule_OneofSizer, []interface{}{ + (*ReadModifyWriteRule_AppendValue)(nil), + (*ReadModifyWriteRule_IncrementAmount)(nil), + } +} + +func _ReadModifyWriteRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadModifyWriteRule) + // rule + switch x := m.Rule.(type) { + case *ReadModifyWriteRule_AppendValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AppendValue) + case *ReadModifyWriteRule_IncrementAmount: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IncrementAmount)) + case nil: + default: + return fmt.Errorf("ReadModifyWriteRule.Rule has unexpected type %T", x) + } + return nil +} + +func _ReadModifyWriteRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadModifyWriteRule) + switch tag { + case 3: // rule.append_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Rule = &ReadModifyWriteRule_AppendValue{x} + return true, err + case 4: // rule.increment_amount + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Rule = &ReadModifyWriteRule_IncrementAmount{int64(x)} + return true, err + default: + return false, nil + } +} + +func _ReadModifyWriteRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadModifyWriteRule) + // rule + switch x := m.Rule.(type) { + case *ReadModifyWriteRule_AppendValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AppendValue))) + n += len(x.AppendValue) + case *ReadModifyWriteRule_IncrementAmount: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IncrementAmount)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Row)(nil), "google.bigtable.v2.Row") + proto.RegisterType((*Family)(nil), "google.bigtable.v2.Family") + proto.RegisterType((*Column)(nil), "google.bigtable.v2.Column") + proto.RegisterType((*Cell)(nil), "google.bigtable.v2.Cell") + proto.RegisterType((*RowRange)(nil), "google.bigtable.v2.RowRange") + proto.RegisterType((*RowSet)(nil), "google.bigtable.v2.RowSet") + proto.RegisterType((*ColumnRange)(nil), "google.bigtable.v2.ColumnRange") + proto.RegisterType((*TimestampRange)(nil), "google.bigtable.v2.TimestampRange") + proto.RegisterType((*ValueRange)(nil), "google.bigtable.v2.ValueRange") + proto.RegisterType((*RowFilter)(nil), "google.bigtable.v2.RowFilter") + proto.RegisterType((*RowFilter_Chain)(nil), "google.bigtable.v2.RowFilter.Chain") + proto.RegisterType((*RowFilter_Interleave)(nil), "google.bigtable.v2.RowFilter.Interleave") + proto.RegisterType((*RowFilter_Condition)(nil), "google.bigtable.v2.RowFilter.Condition") + proto.RegisterType((*Mutation)(nil), "google.bigtable.v2.Mutation") + proto.RegisterType((*Mutation_SetCell)(nil), "google.bigtable.v2.Mutation.SetCell") + proto.RegisterType((*Mutation_DeleteFromColumn)(nil), "google.bigtable.v2.Mutation.DeleteFromColumn") + proto.RegisterType((*Mutation_DeleteFromFamily)(nil), "google.bigtable.v2.Mutation.DeleteFromFamily") + proto.RegisterType((*Mutation_DeleteFromRow)(nil), "google.bigtable.v2.Mutation.DeleteFromRow") + proto.RegisterType((*ReadModifyWriteRule)(nil), "google.bigtable.v2.ReadModifyWriteRule") +} + +func init() { proto.RegisterFile("google/bigtable/v2/data.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xf6, 0xc6, 0x89, 0x7f, 0xce, 0x3a, 0xb6, 0x3b, 0x4d, 0x53, 0xd7, 0x34, 0x34, 0xda, 0xa2, + 0xe2, 0x06, 0x70, 0xc0, 0xad, 0xca, 0x4f, 0x11, 0x6a, 0x9c, 0xd2, 0x1a, 0xfa, 0x3f, 0x8d, 0x8a, + 0x54, 0x09, 0x2d, 0x13, 0x7b, 0x6c, 0x56, 0x99, 0xdd, 0x59, 0x76, 0xd7, 0x71, 0x2d, 0xf1, 0x1c, + 0xdc, 0xc3, 0x25, 0xaf, 0xc0, 0x1d, 0x2f, 0x01, 0x8f, 0xc1, 0x03, 0x70, 0x81, 0xe6, 0x67, 0xff, + 0x5c, 0x37, 0x89, 0x50, 0xef, 0x76, 0xcf, 0xf9, 0xbe, 0xef, 0x9c, 0x39, 0x73, 0xe6, 0xec, 0x2c, + 0x6c, 0x4d, 0x38, 0x9f, 0x30, 0xba, 0x7b, 0xe8, 0x4c, 0x22, 0x72, 0xc8, 0xe8, 0xee, 0x71, 0x6f, + 0x77, 0x44, 0x22, 0xd2, 0xf5, 0x03, 0x1e, 0x71, 0x84, 0x94, 0xbb, 0x1b, 0xbb, 0xbb, 0xc7, 0x3d, + 0xeb, 0x09, 0x14, 0x31, 0x9f, 0xa1, 0x26, 0x14, 0x8f, 0xe8, 0xbc, 0x65, 0x6c, 0x1b, 0x9d, 0x1a, + 0x16, 0x8f, 0xe8, 0x16, 0x54, 0xc6, 0xc4, 0x75, 0x98, 0x43, 0xc3, 0xd6, 0xca, 0x76, 0xb1, 0x63, + 0xf6, 0xda, 0xdd, 0xd7, 0xf9, 0xdd, 0x7b, 0x02, 0x33, 0xc7, 0x09, 0xd6, 0xc2, 0x50, 0x52, 0x36, + 0x84, 0x60, 0xd5, 0x23, 0x2e, 0x95, 0xa2, 0x55, 0x2c, 0x9f, 0xd1, 0x4d, 0x28, 0x0f, 0x39, 0x9b, + 0xba, 0xde, 0x89, 0xa2, 0xfb, 0x12, 0x82, 0x63, 0xa8, 0xf5, 0x02, 0x4a, 0xca, 0x84, 0x2e, 0x43, + 0xf5, 0xa7, 0x29, 0x61, 0xce, 0xd8, 0xa1, 0x81, 0xce, 0x36, 0x35, 0xa0, 0x2e, 0xac, 0x0d, 0x29, + 0x63, 0xb1, 0x76, 0x6b, 0xa9, 0x36, 0x65, 0x0c, 0x2b, 0x98, 0x65, 0xc3, 0xaa, 0x78, 0x45, 0xd7, + 0xa1, 0x19, 0x39, 0x2e, 0x0d, 0x23, 0xe2, 0xfa, 0xb6, 0xeb, 0x0c, 0x03, 0x1e, 0x4a, 0xf1, 0x22, + 0x6e, 0x24, 0xf6, 0x47, 0xd2, 0x8c, 0x36, 0x60, 0xed, 0x98, 0xb0, 0x29, 0x6d, 0xad, 0xc8, 0xe0, + 0xea, 0x05, 0x6d, 0x42, 0x89, 0x91, 0x43, 0xca, 0xc2, 0x56, 0x71, 0xbb, 0xd8, 0xa9, 0x62, 0xfd, + 0x66, 0xfd, 0x69, 0x40, 0x05, 0xf3, 0x19, 0x26, 0xde, 0x84, 0xa2, 0x1d, 0x68, 0x86, 0x11, 0x09, + 0x22, 0xfb, 0x88, 0xce, 0xed, 0x21, 0xe3, 0x21, 0x1d, 0xa9, 0x25, 0x0c, 0x0a, 0xb8, 0x2e, 0x3d, + 0x0f, 0xe8, 0x7c, 0x5f, 0xda, 0xd1, 0x35, 0xa8, 0xa7, 0x58, 0xee, 0x53, 0x4f, 0xc5, 0x1b, 0x14, + 0x70, 0x2d, 0x46, 0x3e, 0xf1, 0xa9, 0x87, 0x2c, 0xa8, 0x51, 0x6f, 0x94, 0xa2, 0x8a, 0x12, 0x65, + 0x60, 0xa0, 0xde, 0x28, 0xc6, 0x5c, 0x83, 0x7a, 0x8c, 0xd1, 0x51, 0x57, 0x35, 0xaa, 0xa6, 0x50, + 0x2a, 0x66, 0xdf, 0x84, 0x6a, 0x12, 0xb3, 0x5f, 0x85, 0xb2, 0x26, 0x59, 0x3f, 0x40, 0x09, 0xf3, + 0xd9, 0x73, 0x1a, 0xa1, 0x4b, 0x50, 0x09, 0xf8, 0x4c, 0x18, 0x45, 0x7d, 0x8a, 0x9d, 0x1a, 0x2e, + 0x07, 0x7c, 0xf6, 0x80, 0xce, 0x43, 0x74, 0x1b, 0x40, 0xb8, 0x02, 0xb1, 0xd2, 0xb8, 0xfe, 0x97, + 0x97, 0xd5, 0x3f, 0x2e, 0x07, 0xae, 0x06, 0xfa, 0x29, 0xb4, 0x7e, 0x5b, 0x01, 0x53, 0xef, 0xb9, + 0xac, 0xd4, 0x15, 0x30, 0x65, 0x3f, 0xcd, 0xed, 0x4c, 0x03, 0x81, 0x32, 0x3d, 0x16, 0x6d, 0x74, + 0x0b, 0x36, 0x55, 0xaa, 0xc9, 0xde, 0xc7, 0x4b, 0x8b, 0xcb, 0xb4, 0x21, 0xfd, 0xcf, 0x62, 0xb7, + 0x2e, 0x6b, 0x0f, 0x36, 0x16, 0x79, 0x99, 0xb2, 0x15, 0x30, 0xca, 0xb3, 0x64, 0xf9, 0x7a, 0xb0, + 0x21, 0x2a, 0xf1, 0x5a, 0xa4, 0xb8, 0x88, 0x88, 0x7a, 0xa3, 0xc5, 0x38, 0x5d, 0x40, 0x79, 0x8e, + 0x8c, 0xb2, 0xa6, 0x19, 0xcd, 0x2c, 0x43, 0xc4, 0xe8, 0x9f, 0x83, 0xc6, 0x42, 0x5e, 0xfd, 0x06, + 0xac, 0xe7, 0x24, 0xac, 0x57, 0x50, 0x3f, 0x88, 0x9b, 0x51, 0x95, 0xe9, 0x66, 0x5c, 0x85, 0x37, + 0x34, 0xaf, 0x5a, 0xeb, 0xc1, 0x42, 0x07, 0x7f, 0xac, 0xd6, 0xf3, 0x1a, 0x67, 0x45, 0x72, 0x44, + 0xde, 0x0b, 0x0c, 0xeb, 0x6f, 0x03, 0xe0, 0x85, 0xe8, 0x73, 0x15, 0xb6, 0x0b, 0xaa, 0x4c, 0xb6, + 0xec, 0xfd, 0xc5, 0x4e, 0x56, 0x3d, 0x2e, 0xe1, 0xba, 0x18, 0x49, 0xdf, 0x2b, 0x7c, 0xae, 0x9b, + 0xeb, 0x29, 0x5a, 0x16, 0x7b, 0x07, 0x44, 0x71, 0xf2, 0xca, 0x71, 0x4f, 0x8b, 0x2e, 0xce, 0xea, + 0xea, 0xbe, 0xce, 0xa8, 0x66, 0xfb, 0x3a, 0xd1, 0xec, 0xaf, 0x83, 0x99, 0x89, 0x2f, 0xda, 0x3c, + 0xa1, 0x59, 0xff, 0x9a, 0x50, 0xc5, 0x7c, 0x76, 0xcf, 0x61, 0x11, 0x0d, 0xd0, 0x6d, 0x58, 0x1b, + 0xfe, 0x48, 0x1c, 0x4f, 0x2e, 0xc6, 0xec, 0x5d, 0x7d, 0x43, 0xff, 0x2a, 0x74, 0x77, 0x5f, 0x40, + 0x07, 0x05, 0xac, 0x38, 0xe8, 0x5b, 0x00, 0xc7, 0x8b, 0x68, 0xc0, 0x28, 0x39, 0x56, 0xe3, 0xc1, + 0xec, 0x75, 0x4e, 0x56, 0xf8, 0x26, 0xc1, 0x0f, 0x0a, 0x38, 0xc3, 0x46, 0xf7, 0xa1, 0x3a, 0xe4, + 0xde, 0xc8, 0x89, 0x1c, 0xae, 0x9a, 0xd3, 0xec, 0xbd, 0x7f, 0x4a, 0x32, 0x31, 0x7c, 0x50, 0xc0, + 0x29, 0x17, 0x6d, 0xc0, 0x6a, 0xe8, 0x78, 0x47, 0xad, 0xe6, 0xb6, 0xd1, 0xa9, 0x0c, 0x0a, 0x58, + 0xbe, 0xa1, 0x0e, 0x34, 0x7c, 0x12, 0x86, 0x36, 0x61, 0xcc, 0x1e, 0x4b, 0x7e, 0xeb, 0x9c, 0x06, + 0xac, 0x0b, 0xc7, 0x1e, 0x63, 0xba, 0x22, 0x3b, 0xd0, 0x3c, 0x64, 0x7c, 0x78, 0x94, 0x85, 0x22, + 0x0d, 0xad, 0x4b, 0x4f, 0x8a, 0xfd, 0x04, 0x36, 0xf4, 0x74, 0xb0, 0x03, 0x3a, 0xa1, 0xaf, 0x62, + 0xfc, 0xaa, 0xde, 0xeb, 0x73, 0x6a, 0x56, 0x60, 0xe1, 0xd3, 0x94, 0x0f, 0x41, 0x18, 0xed, 0x90, + 0xb8, 0x3e, 0xa3, 0x31, 0xbe, 0xbe, 0x6d, 0x74, 0x8c, 0x41, 0x01, 0x37, 0x02, 0x3e, 0x7b, 0x2e, + 0x3d, 0x1a, 0xfd, 0x39, 0xb4, 0x32, 0x63, 0x21, 0x1f, 0x44, 0x9c, 0xad, 0xea, 0xa0, 0x80, 0x2f, + 0xa4, 0x53, 0x22, 0x1b, 0x68, 0x1f, 0xb6, 0xd4, 0xc7, 0x24, 0x73, 0x26, 0x73, 0xfc, 0x92, 0x4e, + 0xb2, 0xad, 0x60, 0xc9, 0xf1, 0xcc, 0x8a, 0x3c, 0x83, 0xf3, 0x5a, 0x44, 0x8e, 0xb9, 0x98, 0x5a, + 0x96, 0xfb, 0x73, 0xe5, 0x84, 0x0f, 0x99, 0x40, 0x8b, 0x02, 0x0c, 0xd3, 0x57, 0x2d, 0xf9, 0x12, + 0x36, 0xd3, 0x83, 0x98, 0x53, 0xad, 0x48, 0x55, 0x6b, 0x99, 0x6a, 0x7e, 0x0c, 0x88, 0x61, 0x17, + 0xe5, 0x2c, 0x5a, 0xbb, 0x0b, 0x48, 0x9d, 0x8d, 0xdc, 0x42, 0xab, 0xf1, 0x39, 0x95, 0xbe, 0xec, + 0xf2, 0x1e, 0x27, 0xf8, 0x6c, 0x1e, 0x0d, 0x99, 0xc7, 0xbb, 0xcb, 0xf2, 0x48, 0x67, 0x42, 0xaa, + 0x97, 0x89, 0xff, 0x15, 0xbc, 0x23, 0x3f, 0xb3, 0xb6, 0x2f, 0x8a, 0xcd, 0x67, 0x36, 0x1f, 0x8f, + 0x43, 0x1a, 0xc5, 0xc2, 0xb0, 0x6d, 0x74, 0xd6, 0x06, 0x05, 0x7c, 0x51, 0x82, 0x9e, 0xd2, 0x00, + 0xf3, 0xd9, 0x13, 0x89, 0xd0, 0xfc, 0x2f, 0xa1, 0x9d, 0xe7, 0x33, 0xc7, 0x75, 0x12, 0xba, 0xa9, + 0xe9, 0x9b, 0x19, 0xfa, 0x43, 0x01, 0xd0, 0xec, 0x3e, 0x6c, 0xa5, 0x6c, 0xbd, 0x6d, 0x39, 0x81, + 0x9a, 0x16, 0xb8, 0x14, 0x0b, 0xa8, 0xcd, 0xca, 0x6a, 0x7c, 0x06, 0x17, 0xc3, 0x28, 0x70, 0x7c, + 0x3d, 0x63, 0xa2, 0x80, 0x78, 0xe1, 0x98, 0x07, 0x2e, 0x0d, 0x5a, 0xeb, 0xfa, 0x10, 0x5c, 0x90, + 0x00, 0x59, 0x89, 0x83, 0xd4, 0x2d, 0x98, 0xc4, 0xf7, 0xd9, 0xdc, 0x96, 0x17, 0x81, 0x1c, 0xf3, + 0x7c, 0xdc, 0xa9, 0x12, 0xf0, 0x50, 0xf8, 0x33, 0xcc, 0xf6, 0x1d, 0x58, 0x93, 0x83, 0x05, 0x7d, + 0x0a, 0x65, 0x95, 0xa9, 0xfa, 0xd6, 0x9a, 0xbd, 0xad, 0x13, 0x27, 0x00, 0x8e, 0xd1, 0xed, 0xaf, + 0x01, 0xd2, 0xc1, 0xf2, 0xff, 0x65, 0xfe, 0x32, 0xa0, 0x9a, 0x4c, 0x15, 0x34, 0x80, 0xa6, 0x1f, + 0xd0, 0x91, 0x33, 0x24, 0x51, 0xd2, 0x1a, 0x6a, 0x4a, 0x9e, 0xa2, 0xd7, 0x48, 0x68, 0x49, 0x5b, + 0x98, 0x51, 0x30, 0x4d, 0x44, 0x56, 0xce, 0x22, 0x02, 0x82, 0xa1, 0xf9, 0x77, 0xa0, 0x36, 0x26, + 0x2c, 0x4c, 0x04, 0x8a, 0x67, 0x11, 0x30, 0x25, 0x45, 0xbd, 0xf4, 0x2b, 0x50, 0x52, 0x5c, 0xeb, + 0x9f, 0x35, 0xa8, 0x3c, 0x9a, 0x46, 0x44, 0x2e, 0x71, 0x0f, 0x2a, 0xa2, 0x3d, 0x45, 0x3b, 0xe8, + 0xa5, 0xbd, 0xb7, 0x4c, 0x34, 0xc6, 0x77, 0x9f, 0xd3, 0x48, 0xdc, 0x1e, 0x07, 0x05, 0x5c, 0x0e, + 0xd5, 0x23, 0xfa, 0x1e, 0xd0, 0x88, 0x32, 0x2a, 0x4a, 0x14, 0x70, 0x57, 0xb7, 0x9d, 0x5e, 0xe2, + 0x47, 0x27, 0x8a, 0xdd, 0x95, 0xb4, 0x7b, 0x01, 0x77, 0x55, 0x1b, 0x8a, 0x13, 0x35, 0x5a, 0xb0, + 0x2d, 0xca, 0xab, 0x51, 0xa7, 0x0b, 0x70, 0x56, 0x79, 0x75, 0x39, 0xcf, 0xcb, 0xeb, 0x0b, 0xfb, + 0x01, 0x34, 0xb2, 0xf2, 0x01, 0x9f, 0xc9, 0xd9, 0x6d, 0xf6, 0x76, 0xce, 0xa8, 0x8d, 0xf9, 0x4c, + 0x7c, 0x42, 0x46, 0x59, 0x43, 0xfb, 0x17, 0x03, 0xca, 0xba, 0x54, 0xa7, 0x5f, 0xec, 0xae, 0x43, + 0x73, 0x71, 0x4e, 0xeb, 0x9b, 0x76, 0x63, 0x61, 0x30, 0x2f, 0xbd, 0xb4, 0x17, 0x4f, 0xb9, 0xb4, + 0xaf, 0x66, 0x2e, 0xed, 0xed, 0x5f, 0x0d, 0x68, 0x2e, 0x96, 0xfd, 0xad, 0x66, 0xb8, 0x07, 0x20, + 0x32, 0x51, 0xf3, 0x54, 0x6f, 0xd3, 0x19, 0x06, 0x3a, 0xae, 0x0a, 0x96, 0x7c, 0x6c, 0xdf, 0xc8, + 0xa6, 0xa8, 0xb7, 0xe9, 0xb4, 0x14, 0xdb, 0x0d, 0x58, 0xcf, 0xed, 0x49, 0x1f, 0xa0, 0xe2, 0xea, + 0xdd, 0xb2, 0xfe, 0x30, 0xe0, 0x3c, 0xa6, 0x64, 0xf4, 0x88, 0x8f, 0x9c, 0xf1, 0xfc, 0xbb, 0xc0, + 0x89, 0x28, 0x9e, 0x32, 0xfa, 0x56, 0x17, 0x7e, 0x15, 0x6a, 0xc4, 0xf7, 0x93, 0x5b, 0x56, 0x72, + 0xbd, 0x36, 0x95, 0x55, 0x4e, 0x4b, 0xf4, 0x01, 0x34, 0x1d, 0x6f, 0x18, 0x50, 0x97, 0x7a, 0x91, + 0x4d, 0x5c, 0x3e, 0xf5, 0x22, 0xb9, 0x3f, 0x45, 0xf1, 0xe9, 0x4f, 0x3c, 0x7b, 0xd2, 0xd1, 0x2f, + 0xc1, 0x6a, 0x30, 0x65, 0xb4, 0xff, 0x33, 0x6c, 0x0e, 0xb9, 0xbb, 0xa4, 0x86, 0xfd, 0xea, 0x5d, + 0x12, 0x91, 0xa7, 0xe2, 0x3f, 0xf7, 0xa9, 0xf1, 0xf2, 0x0b, 0x0d, 0x98, 0x70, 0x46, 0xbc, 0x49, + 0x97, 0x07, 0x93, 0xdd, 0x09, 0xf5, 0xe4, 0x5f, 0xf0, 0xae, 0x72, 0x11, 0xdf, 0x09, 0xb3, 0xff, + 0xc9, 0xb7, 0xe3, 0xe7, 0xdf, 0x57, 0x5a, 0xf7, 0x15, 0x79, 0x9f, 0xf1, 0xe9, 0xa8, 0xdb, 0x8f, + 0x63, 0xbc, 0xe8, 0x1d, 0x96, 0xa4, 0xc2, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x32, 0x40, + 0x30, 0x5e, 0x68, 0x0f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/bytestream/bytestream.pb.go b/vendor/google.golang.org/genproto/googleapis/bytestream/bytestream.pb.go new file mode 100644 index 000000000..801494803 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/bytestream/bytestream.pb.go @@ -0,0 +1,555 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/bytestream/bytestream.proto + +/* +Package bytestream is a generated protocol buffer package. + +It is generated from these files: + google/bytestream/bytestream.proto + +It has these top-level messages: + ReadRequest + ReadResponse + WriteRequest + WriteResponse + QueryWriteStatusRequest + QueryWriteStatusResponse +*/ +package bytestream + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "github.com/golang/protobuf/ptypes/wrappers" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request object for ByteStream.Read. +type ReadRequest struct { + // The name of the resource to read. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName" json:"resource_name,omitempty"` + // The offset for the first byte to return in the read, relative to the start + // of the resource. + // + // A `read_offset` that is negative or greater than the size of the resource + // will cause an `OUT_OF_RANGE` error. + ReadOffset int64 `protobuf:"varint,2,opt,name=read_offset,json=readOffset" json:"read_offset,omitempty"` + // The maximum number of `data` bytes the server is allowed to return in the + // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that + // there is no limit, and a negative `read_limit` will cause an error. + // + // If the stream returns fewer bytes than allowed by the `read_limit` and no + // error occurred, the stream includes all data from the `read_offset` to the + // end of the resource. + ReadLimit int64 `protobuf:"varint,3,opt,name=read_limit,json=readLimit" json:"read_limit,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ReadRequest) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ReadRequest) GetReadOffset() int64 { + if m != nil { + return m.ReadOffset + } + return 0 +} + +func (m *ReadRequest) GetReadLimit() int64 { + if m != nil { + return m.ReadLimit + } + return 0 +} + +// Response object for ByteStream.Read. +type ReadResponse struct { + // A portion of the data for the resource. The service **may** leave `data` + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + Data []byte `protobuf:"bytes,10,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ReadResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Request object for ByteStream.Write. +type WriteRequest struct { + // The name of the resource to write. This **must** be set on the first + // `WriteRequest` of each `Write()` action. If it is set on subsequent calls, + // it **must** match the value of the first request. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName" json:"resource_name,omitempty"` + // The offset from the beginning of the resource at which the data should be + // written. It is required on all `WriteRequest`s. + // + // In the first `WriteRequest` of a `Write()` action, it indicates + // the initial offset for the `Write()` call. The value **must** be equal to + // the `committed_size` that a call to `QueryWriteStatus()` would return. + // + // On subsequent calls, this value **must** be set and **must** be equal to + // the sum of the first `write_offset` and the sizes of all `data` bundles + // sent previously on this stream. + // + // An incorrect value will cause an error. + WriteOffset int64 `protobuf:"varint,2,opt,name=write_offset,json=writeOffset" json:"write_offset,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteRequest`s subsequent to one in which `finish_write` is `true` will + // cause an error. + FinishWrite bool `protobuf:"varint,3,opt,name=finish_write,json=finishWrite" json:"finish_write,omitempty"` + // A portion of the data for the resource. The client **may** leave `data` + // empty for any given `WriteRequest`. This enables the client to inform the + // service that the request is still live while it is running an operation to + // generate more data. + Data []byte `protobuf:"bytes,10,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *WriteRequest) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *WriteRequest) GetWriteOffset() int64 { + if m != nil { + return m.WriteOffset + } + return 0 +} + +func (m *WriteRequest) GetFinishWrite() bool { + if m != nil { + return m.FinishWrite + } + return false +} + +func (m *WriteRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Response object for ByteStream.Write. +type WriteResponse struct { + // The number of bytes that have been processed for the given resource. + CommittedSize int64 `protobuf:"varint,1,opt,name=committed_size,json=committedSize" json:"committed_size,omitempty"` +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (m *WriteResponse) String() string { return proto.CompactTextString(m) } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *WriteResponse) GetCommittedSize() int64 { + if m != nil { + return m.CommittedSize + } + return 0 +} + +// Request object for ByteStream.QueryWriteStatus. +type QueryWriteStatusRequest struct { + // The name of the resource whose write status is being requested. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName" json:"resource_name,omitempty"` +} + +func (m *QueryWriteStatusRequest) Reset() { *m = QueryWriteStatusRequest{} } +func (m *QueryWriteStatusRequest) String() string { return proto.CompactTextString(m) } +func (*QueryWriteStatusRequest) ProtoMessage() {} +func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *QueryWriteStatusRequest) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +// Response object for ByteStream.QueryWriteStatus. +type QueryWriteStatusResponse struct { + // The number of bytes that have been processed for the given resource. + CommittedSize int64 `protobuf:"varint,1,opt,name=committed_size,json=committedSize" json:"committed_size,omitempty"` + // `complete` is `true` only if the client has sent a `WriteRequest` with + // `finish_write` set to true, and the server has processed that request. + Complete bool `protobuf:"varint,2,opt,name=complete" json:"complete,omitempty"` +} + +func (m *QueryWriteStatusResponse) Reset() { *m = QueryWriteStatusResponse{} } +func (m *QueryWriteStatusResponse) String() string { return proto.CompactTextString(m) } +func (*QueryWriteStatusResponse) ProtoMessage() {} +func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *QueryWriteStatusResponse) GetCommittedSize() int64 { + if m != nil { + return m.CommittedSize + } + return 0 +} + +func (m *QueryWriteStatusResponse) GetComplete() bool { + if m != nil { + return m.Complete + } + return false +} + +func init() { + proto.RegisterType((*ReadRequest)(nil), "google.bytestream.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "google.bytestream.ReadResponse") + proto.RegisterType((*WriteRequest)(nil), "google.bytestream.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "google.bytestream.WriteResponse") + proto.RegisterType((*QueryWriteStatusRequest)(nil), "google.bytestream.QueryWriteStatusRequest") + proto.RegisterType((*QueryWriteStatusResponse)(nil), "google.bytestream.QueryWriteStatusResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ByteStream service + +type ByteStreamClient interface { + // `Read()` is used to retrieve the contents of a resource as a sequence + // of bytes. The bytes are returned in a sequence of responses, and the + // responses are delivered as the results of a server-side streaming RPC. + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (ByteStream_ReadClient, error) + // `Write()` is used to send the contents of a resource as a sequence of + // bytes. The bytes are sent in a sequence of request protos of a client-side + // streaming RPC. + // + // A `Write()` action is resumable. If there is an error or the connection is + // broken during the `Write()`, the client should check the status of the + // `Write()` by calling `QueryWriteStatus()` and continue writing from the + // returned `committed_size`. This may be less than the amount of data the + // client previously sent. + // + // Calling `Write()` on a resource name that was previously written and + // finalized could cause an error, depending on whether the underlying service + // allows over-writing of previously written resources. + // + // When the client closes the request channel, the service will respond with + // a `WriteResponse`. The service will not view the resource as `complete` + // until the client has sent a `WriteRequest` with `finish_write` set to + // `true`. Sending any requests on a stream after sending a request with + // `finish_write` set to `true` will cause an error. The client **should** + // check the `WriteResponse` it receives to determine how much data the + // service was able to commit and whether the service views the resource as + // `complete` or not. + Write(ctx context.Context, opts ...grpc.CallOption) (ByteStream_WriteClient, error) + // `QueryWriteStatus()` is used to find the `committed_size` for a resource + // that is being written, which can then be used as the `write_offset` for + // the next `Write()` call. + // + // If the resource does not exist (i.e., the resource has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this resource. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // resource name, the sequence of returned `committed_size` values will be + // non-decreasing. + QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) +} + +type byteStreamClient struct { + cc *grpc.ClientConn +} + +func NewByteStreamClient(cc *grpc.ClientConn) ByteStreamClient { + return &byteStreamClient{cc} +} + +func (c *byteStreamClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (ByteStream_ReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ByteStream_serviceDesc.Streams[0], c.cc, "/google.bytestream.ByteStream/Read", opts...) + if err != nil { + return nil, err + } + x := &byteStreamReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ByteStream_ReadClient interface { + Recv() (*ReadResponse, error) + grpc.ClientStream +} + +type byteStreamReadClient struct { + grpc.ClientStream +} + +func (x *byteStreamReadClient) Recv() (*ReadResponse, error) { + m := new(ReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *byteStreamClient) Write(ctx context.Context, opts ...grpc.CallOption) (ByteStream_WriteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ByteStream_serviceDesc.Streams[1], c.cc, "/google.bytestream.ByteStream/Write", opts...) + if err != nil { + return nil, err + } + x := &byteStreamWriteClient{stream} + return x, nil +} + +type ByteStream_WriteClient interface { + Send(*WriteRequest) error + CloseAndRecv() (*WriteResponse, error) + grpc.ClientStream +} + +type byteStreamWriteClient struct { + grpc.ClientStream +} + +func (x *byteStreamWriteClient) Send(m *WriteRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *byteStreamWriteClient) CloseAndRecv() (*WriteResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(WriteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *byteStreamClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { + out := new(QueryWriteStatusResponse) + err := grpc.Invoke(ctx, "/google.bytestream.ByteStream/QueryWriteStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ByteStream service + +type ByteStreamServer interface { + // `Read()` is used to retrieve the contents of a resource as a sequence + // of bytes. The bytes are returned in a sequence of responses, and the + // responses are delivered as the results of a server-side streaming RPC. + Read(*ReadRequest, ByteStream_ReadServer) error + // `Write()` is used to send the contents of a resource as a sequence of + // bytes. The bytes are sent in a sequence of request protos of a client-side + // streaming RPC. + // + // A `Write()` action is resumable. If there is an error or the connection is + // broken during the `Write()`, the client should check the status of the + // `Write()` by calling `QueryWriteStatus()` and continue writing from the + // returned `committed_size`. This may be less than the amount of data the + // client previously sent. + // + // Calling `Write()` on a resource name that was previously written and + // finalized could cause an error, depending on whether the underlying service + // allows over-writing of previously written resources. + // + // When the client closes the request channel, the service will respond with + // a `WriteResponse`. The service will not view the resource as `complete` + // until the client has sent a `WriteRequest` with `finish_write` set to + // `true`. Sending any requests on a stream after sending a request with + // `finish_write` set to `true` will cause an error. The client **should** + // check the `WriteResponse` it receives to determine how much data the + // service was able to commit and whether the service views the resource as + // `complete` or not. + Write(ByteStream_WriteServer) error + // `QueryWriteStatus()` is used to find the `committed_size` for a resource + // that is being written, which can then be used as the `write_offset` for + // the next `Write()` call. + // + // If the resource does not exist (i.e., the resource has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this resource. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // resource name, the sequence of returned `committed_size` values will be + // non-decreasing. + QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) +} + +func RegisterByteStreamServer(s *grpc.Server, srv ByteStreamServer) { + s.RegisterService(&_ByteStream_serviceDesc, srv) +} + +func _ByteStream_Read_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ByteStreamServer).Read(m, &byteStreamReadServer{stream}) +} + +type ByteStream_ReadServer interface { + Send(*ReadResponse) error + grpc.ServerStream +} + +type byteStreamReadServer struct { + grpc.ServerStream +} + +func (x *byteStreamReadServer) Send(m *ReadResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ByteStream_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ByteStreamServer).Write(&byteStreamWriteServer{stream}) +} + +type ByteStream_WriteServer interface { + SendAndClose(*WriteResponse) error + Recv() (*WriteRequest, error) + grpc.ServerStream +} + +type byteStreamWriteServer struct { + grpc.ServerStream +} + +func (x *byteStreamWriteServer) SendAndClose(m *WriteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *byteStreamWriteServer) Recv() (*WriteRequest, error) { + m := new(WriteRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ByteStream_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryWriteStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ByteStreamServer).QueryWriteStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.bytestream.ByteStream/QueryWriteStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ByteStreamServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ByteStream_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.bytestream.ByteStream", + HandlerType: (*ByteStreamServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryWriteStatus", + Handler: _ByteStream_QueryWriteStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Read", + Handler: _ByteStream_Read_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _ByteStream_Write_Handler, + ClientStreams: true, + }, + }, + Metadata: "google/bytestream/bytestream.proto", +} + +func init() { proto.RegisterFile("google/bytestream/bytestream.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 446 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x5b, 0x8b, 0x13, 0x31, + 0x14, 0x66, 0xb6, 0xab, 0x74, 0x4f, 0xa7, 0x5e, 0x02, 0xe2, 0x30, 0xe8, 0x6e, 0x77, 0x44, 0x28, + 0x0a, 0x33, 0xa2, 0xe0, 0xcb, 0x82, 0x0f, 0x7d, 0x13, 0x16, 0x2f, 0xd9, 0x07, 0x41, 0x90, 0x21, + 0x6d, 0x4f, 0xc7, 0x60, 0x93, 0x8c, 0x49, 0x86, 0xa5, 0xfb, 0x1f, 0x7c, 0xf1, 0x17, 0x4b, 0x92, + 0xb1, 0x1d, 0x6d, 0x0b, 0xdb, 0xb7, 0xe4, 0xbb, 0xcc, 0xf9, 0xe6, 0xe4, 0x1c, 0xc8, 0x2a, 0xa5, + 0xaa, 0x25, 0x16, 0xd3, 0x95, 0x45, 0x63, 0x35, 0x32, 0xd1, 0x39, 0xe6, 0xb5, 0x56, 0x56, 0x91, + 0x87, 0x41, 0x93, 0x6f, 0x88, 0xf4, 0x49, 0x6b, 0x63, 0x35, 0x2f, 0x98, 0x94, 0xca, 0x32, 0xcb, + 0x95, 0x34, 0xc1, 0x90, 0x9e, 0xb6, 0xac, 0xbf, 0x4d, 0x9b, 0x45, 0x71, 0xad, 0x59, 0x5d, 0xa3, + 0x6e, 0xf9, 0x4c, 0xc3, 0x80, 0x22, 0x9b, 0x53, 0xfc, 0xd9, 0xa0, 0xb1, 0xe4, 0x19, 0x0c, 0x35, + 0x1a, 0xd5, 0xe8, 0x19, 0x96, 0x92, 0x09, 0x4c, 0xa2, 0x51, 0x34, 0x3e, 0xa1, 0xf1, 0x5f, 0xf0, + 0x03, 0x13, 0x48, 0xce, 0x60, 0xa0, 0x91, 0xcd, 0x4b, 0xb5, 0x58, 0x18, 0xb4, 0xc9, 0xd1, 0x28, + 0x1a, 0xf7, 0x28, 0x38, 0xe8, 0xa3, 0x47, 0xc8, 0x53, 0xf0, 0xb7, 0x72, 0xc9, 0x05, 0xb7, 0x49, + 0xcf, 0xf3, 0x27, 0x0e, 0xb9, 0x74, 0x40, 0x96, 0x41, 0x1c, 0x6a, 0x9a, 0x5a, 0x49, 0x83, 0x84, + 0xc0, 0xf1, 0x9c, 0x59, 0x96, 0xc0, 0x28, 0x1a, 0xc7, 0xd4, 0x9f, 0xb3, 0x5f, 0x11, 0xc4, 0x5f, + 0x34, 0xb7, 0x78, 0x50, 0xb2, 0x73, 0x88, 0xaf, 0x9d, 0xe9, 0xdf, 0x68, 0x03, 0x8f, 0xb5, 0xd9, + 0xce, 0x21, 0x5e, 0x70, 0xc9, 0xcd, 0xf7, 0xd2, 0xa3, 0x3e, 0x5d, 0x9f, 0x0e, 0x02, 0xe6, 0x2b, + 0xee, 0xcc, 0xf3, 0x16, 0x86, 0x6d, 0x9c, 0x36, 0xf4, 0x73, 0xb8, 0x37, 0x53, 0x42, 0x70, 0x6b, + 0x71, 0x5e, 0x1a, 0x7e, 0x13, 0x02, 0xf5, 0xe8, 0x70, 0x8d, 0x5e, 0xf1, 0x1b, 0xcc, 0xde, 0xc1, + 0xe3, 0xcf, 0x0d, 0xea, 0x95, 0x37, 0x5f, 0x59, 0x66, 0x1b, 0x73, 0xc8, 0x1f, 0x65, 0xdf, 0x20, + 0xd9, 0xf6, 0x1f, 0x14, 0x81, 0xa4, 0xd0, 0x9f, 0x29, 0x51, 0x2f, 0xd1, 0xa2, 0x6f, 0x48, 0x9f, + 0xae, 0xef, 0xaf, 0x7f, 0x1f, 0x01, 0x4c, 0x56, 0xee, 0xcb, 0x6e, 0x96, 0xc8, 0x7b, 0x38, 0x76, + 0x2f, 0x43, 0x4e, 0xf3, 0xad, 0x39, 0xcb, 0x3b, 0x63, 0x92, 0x9e, 0xed, 0xe5, 0x43, 0xb4, 0x57, + 0x11, 0xb9, 0x84, 0x3b, 0xa1, 0x9b, 0xbb, 0xb4, 0xdd, 0x97, 0x4d, 0x47, 0xfb, 0x05, 0xe1, 0x6b, + 0xe3, 0x88, 0xfc, 0x80, 0x07, 0xff, 0xb7, 0x81, 0xbc, 0xd8, 0xe1, 0xdb, 0xd3, 0xeb, 0xf4, 0xe5, + 0xad, 0xb4, 0xa1, 0xdc, 0x04, 0xe1, 0xd1, 0x4c, 0x89, 0x6d, 0xc7, 0xe4, 0xfe, 0xa6, 0x55, 0x9f, + 0xdc, 0xf6, 0x7c, 0xbd, 0x68, 0x35, 0x95, 0x5a, 0x32, 0x59, 0xe5, 0x4a, 0x57, 0x45, 0x85, 0xd2, + 0x6f, 0x56, 0x11, 0x28, 0x56, 0x73, 0xd3, 0x59, 0xe3, 0x8b, 0xcd, 0x71, 0x7a, 0xd7, 0xeb, 0xde, + 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x91, 0x09, 0xd4, 0xf8, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/audit/audit_log.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/audit/audit_log.pb.go new file mode 100644 index 000000000..33c48ea63 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/audit/audit_log.pb.go @@ -0,0 +1,310 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/audit/audit_log.proto + +/* +Package audit is a generated protocol buffer package. + +It is generated from these files: + google/cloud/audit/audit_log.proto + +It has these top-level messages: + AuditLog + AuthenticationInfo + AuthorizationInfo + RequestMetadata +*/ +package audit + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Common audit log format for Google Cloud Platform API operations. +type AuditLog struct { + // The name of the API service performing the operation. For example, + // `"datastore.googleapis.com"`. + ServiceName string `protobuf:"bytes,7,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` + // The name of the service method or operation. + // For API calls, this should be the name of the API method. + // For example, + // + // "google.datastore.v1.Datastore.RunQuery" + // "google.logging.v1.LoggingService.DeleteLog" + MethodName string `protobuf:"bytes,8,opt,name=method_name,json=methodName" json:"method_name,omitempty"` + // The resource or collection that is the target of the operation. + // The name is a scheme-less URI, not including the API service name. + // For example: + // + // "shelves/SHELF_ID/books" + // "shelves/SHELF_ID/books/BOOK_ID" + ResourceName string `protobuf:"bytes,11,opt,name=resource_name,json=resourceName" json:"resource_name,omitempty"` + // The number of items returned from a List or Query API method, + // if applicable. + NumResponseItems int64 `protobuf:"varint,12,opt,name=num_response_items,json=numResponseItems" json:"num_response_items,omitempty"` + // The status of the overall operation. + Status *google_rpc.Status `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` + // Authentication information. + AuthenticationInfo *AuthenticationInfo `protobuf:"bytes,3,opt,name=authentication_info,json=authenticationInfo" json:"authentication_info,omitempty"` + // Authorization information. If there are multiple + // resources or permissions involved, then there is + // one AuthorizationInfo element for each {resource, permission} tuple. + AuthorizationInfo []*AuthorizationInfo `protobuf:"bytes,9,rep,name=authorization_info,json=authorizationInfo" json:"authorization_info,omitempty"` + // Metadata about the operation. + RequestMetadata *RequestMetadata `protobuf:"bytes,4,opt,name=request_metadata,json=requestMetadata" json:"request_metadata,omitempty"` + // The operation request. This may not include all request parameters, + // such as those that are too large, privacy-sensitive, or duplicated + // elsewhere in the log record. + // It should never include user-generated data, such as file contents. + // When the JSON object represented here has a proto equivalent, the proto + // name will be indicated in the `@type` property. + Request *google_protobuf2.Struct `protobuf:"bytes,16,opt,name=request" json:"request,omitempty"` + // The operation response. This may not include all response elements, + // such as those that are too large, privacy-sensitive, or duplicated + // elsewhere in the log record. + // It should never include user-generated data, such as file contents. + // When the JSON object represented here has a proto equivalent, the proto + // name will be indicated in the `@type` property. + Response *google_protobuf2.Struct `protobuf:"bytes,17,opt,name=response" json:"response,omitempty"` + // Other service-specific data about the request, response, and other + // activities. + ServiceData *google_protobuf1.Any `protobuf:"bytes,15,opt,name=service_data,json=serviceData" json:"service_data,omitempty"` +} + +func (m *AuditLog) Reset() { *m = AuditLog{} } +func (m *AuditLog) String() string { return proto.CompactTextString(m) } +func (*AuditLog) ProtoMessage() {} +func (*AuditLog) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AuditLog) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *AuditLog) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *AuditLog) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *AuditLog) GetNumResponseItems() int64 { + if m != nil { + return m.NumResponseItems + } + return 0 +} + +func (m *AuditLog) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *AuditLog) GetAuthenticationInfo() *AuthenticationInfo { + if m != nil { + return m.AuthenticationInfo + } + return nil +} + +func (m *AuditLog) GetAuthorizationInfo() []*AuthorizationInfo { + if m != nil { + return m.AuthorizationInfo + } + return nil +} + +func (m *AuditLog) GetRequestMetadata() *RequestMetadata { + if m != nil { + return m.RequestMetadata + } + return nil +} + +func (m *AuditLog) GetRequest() *google_protobuf2.Struct { + if m != nil { + return m.Request + } + return nil +} + +func (m *AuditLog) GetResponse() *google_protobuf2.Struct { + if m != nil { + return m.Response + } + return nil +} + +func (m *AuditLog) GetServiceData() *google_protobuf1.Any { + if m != nil { + return m.ServiceData + } + return nil +} + +// Authentication information for the operation. +type AuthenticationInfo struct { + // The email address of the authenticated user making the request. + PrincipalEmail string `protobuf:"bytes,1,opt,name=principal_email,json=principalEmail" json:"principal_email,omitempty"` +} + +func (m *AuthenticationInfo) Reset() { *m = AuthenticationInfo{} } +func (m *AuthenticationInfo) String() string { return proto.CompactTextString(m) } +func (*AuthenticationInfo) ProtoMessage() {} +func (*AuthenticationInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *AuthenticationInfo) GetPrincipalEmail() string { + if m != nil { + return m.PrincipalEmail + } + return "" +} + +// Authorization information for the operation. +type AuthorizationInfo struct { + // The resource being accessed, as a REST-style string. For example: + // + // bigquery.googlapis.com/projects/PROJECTID/datasets/DATASETID + Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` + // The required IAM permission. + Permission string `protobuf:"bytes,2,opt,name=permission" json:"permission,omitempty"` + // Whether or not authorization for `resource` and `permission` + // was granted. + Granted bool `protobuf:"varint,3,opt,name=granted" json:"granted,omitempty"` +} + +func (m *AuthorizationInfo) Reset() { *m = AuthorizationInfo{} } +func (m *AuthorizationInfo) String() string { return proto.CompactTextString(m) } +func (*AuthorizationInfo) ProtoMessage() {} +func (*AuthorizationInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *AuthorizationInfo) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *AuthorizationInfo) GetPermission() string { + if m != nil { + return m.Permission + } + return "" +} + +func (m *AuthorizationInfo) GetGranted() bool { + if m != nil { + return m.Granted + } + return false +} + +// Metadata about the request. +type RequestMetadata struct { + // The IP address of the caller. + CallerIp string `protobuf:"bytes,1,opt,name=caller_ip,json=callerIp" json:"caller_ip,omitempty"` + // The user agent of the caller. + // This information is not authenticated and should be treated accordingly. + // For example: + // + // + `google-api-python-client/1.4.0`: + // The request was made by the Google API client for Python. + // + `Cloud SDK Command Line Tool apitools-client/1.0 gcloud/0.9.62`: + // The request was made by the Google Cloud SDK CLI (gcloud). + // + `AppEngine-Google; (+http://code.google.com/appengine; appid: s~my-project`: + // The request was made from the `my-project` App Engine app. + CallerSuppliedUserAgent string `protobuf:"bytes,2,opt,name=caller_supplied_user_agent,json=callerSuppliedUserAgent" json:"caller_supplied_user_agent,omitempty"` +} + +func (m *RequestMetadata) Reset() { *m = RequestMetadata{} } +func (m *RequestMetadata) String() string { return proto.CompactTextString(m) } +func (*RequestMetadata) ProtoMessage() {} +func (*RequestMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RequestMetadata) GetCallerIp() string { + if m != nil { + return m.CallerIp + } + return "" +} + +func (m *RequestMetadata) GetCallerSuppliedUserAgent() string { + if m != nil { + return m.CallerSuppliedUserAgent + } + return "" +} + +func init() { + proto.RegisterType((*AuditLog)(nil), "google.cloud.audit.AuditLog") + proto.RegisterType((*AuthenticationInfo)(nil), "google.cloud.audit.AuthenticationInfo") + proto.RegisterType((*AuthorizationInfo)(nil), "google.cloud.audit.AuthorizationInfo") + proto.RegisterType((*RequestMetadata)(nil), "google.cloud.audit.RequestMetadata") +} + +func init() { proto.RegisterFile("google/cloud/audit/audit_log.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 576 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0x5f, 0x6f, 0xd3, 0x30, + 0x14, 0xc5, 0x55, 0x36, 0x6d, 0xad, 0xbb, 0xd1, 0xd6, 0x20, 0x1a, 0xca, 0x04, 0xa5, 0x13, 0x50, + 0x21, 0x94, 0x88, 0xed, 0x61, 0x0f, 0x13, 0x0f, 0x9d, 0xe0, 0xa1, 0x12, 0x4c, 0x53, 0x0a, 0x42, + 0xe2, 0x25, 0x72, 0x93, 0xdb, 0xcc, 0x22, 0xb1, 0x8d, 0xff, 0x20, 0x8d, 0xef, 0xcc, 0x77, 0x40, + 0xbd, 0x71, 0x4a, 0xd7, 0x0e, 0x5e, 0x2c, 0xf9, 0x9c, 0xdf, 0xbd, 0x76, 0xaf, 0x4f, 0x43, 0x46, + 0xb9, 0x94, 0x79, 0x01, 0x51, 0x5a, 0x48, 0x97, 0x45, 0xcc, 0x65, 0xdc, 0x56, 0x6b, 0x52, 0xc8, + 0x3c, 0x54, 0x5a, 0x5a, 0x49, 0x69, 0xc5, 0x84, 0xc8, 0x84, 0xe8, 0x0e, 0x8e, 0x7c, 0x1d, 0x53, + 0x3c, 0x62, 0x42, 0x48, 0xcb, 0x2c, 0x97, 0xc2, 0x54, 0x15, 0x83, 0xc7, 0xde, 0xc5, 0xdd, 0xdc, + 0x2d, 0x22, 0x26, 0x6e, 0xbc, 0x75, 0xb4, 0x69, 0x19, 0xab, 0x5d, 0x6a, 0xbd, 0xdb, 0xf7, 0xae, + 0x56, 0x69, 0x64, 0x2c, 0xb3, 0xce, 0x77, 0x1c, 0xfd, 0xde, 0x25, 0xcd, 0xc9, 0xf2, 0xe4, 0x8f, + 0x32, 0xa7, 0xcf, 0xc9, 0x81, 0x01, 0xfd, 0x93, 0xa7, 0x90, 0x08, 0x56, 0x42, 0xb0, 0x3f, 0x6c, + 0x8c, 0x5b, 0x71, 0xdb, 0x6b, 0x97, 0xac, 0x04, 0xfa, 0x8c, 0xb4, 0x4b, 0xb0, 0xd7, 0x32, 0xab, + 0x88, 0x26, 0x12, 0xa4, 0x92, 0x10, 0x38, 0x26, 0x87, 0x1a, 0x8c, 0x74, 0xba, 0x6e, 0xd2, 0x46, + 0xe4, 0xa0, 0x16, 0x11, 0x7a, 0x43, 0xa8, 0x70, 0x65, 0xa2, 0xc1, 0x28, 0x29, 0x0c, 0x24, 0xdc, + 0x42, 0x69, 0x82, 0x83, 0x61, 0x63, 0xbc, 0x13, 0x77, 0x85, 0x2b, 0x63, 0x6f, 0x4c, 0x97, 0x3a, + 0x7d, 0x4d, 0xf6, 0xaa, 0x3b, 0x07, 0xf7, 0x86, 0x8d, 0x71, 0xfb, 0x84, 0x86, 0x7e, 0x70, 0x5a, + 0xa5, 0xe1, 0x0c, 0x9d, 0xd8, 0x13, 0xf4, 0x2b, 0x79, 0xc0, 0x9c, 0xbd, 0x06, 0x61, 0x79, 0x8a, + 0xa3, 0x4b, 0xb8, 0x58, 0xc8, 0x60, 0x07, 0x0b, 0x5f, 0x86, 0xdb, 0x13, 0x0f, 0x27, 0xb7, 0xf0, + 0xa9, 0x58, 0xc8, 0x98, 0xb2, 0x2d, 0x8d, 0x7e, 0x26, 0xa8, 0x4a, 0xcd, 0x7f, 0xad, 0xf5, 0x6d, + 0x0d, 0x77, 0xc6, 0xed, 0x93, 0x17, 0xff, 0xea, 0xbb, 0xa2, 0xb1, 0x6d, 0x8f, 0x6d, 0x4a, 0xf4, + 0x92, 0x74, 0x35, 0xfc, 0x70, 0x60, 0x6c, 0x52, 0x82, 0x65, 0x19, 0xb3, 0x2c, 0xd8, 0xc5, 0xbb, + 0x1e, 0xdf, 0xd5, 0x33, 0xae, 0xd8, 0x4f, 0x1e, 0x8d, 0x3b, 0xfa, 0xb6, 0x40, 0xdf, 0x92, 0x7d, + 0x2f, 0x05, 0x5d, 0x6c, 0xd3, 0xaf, 0xdb, 0xd4, 0xb9, 0x08, 0x67, 0x98, 0x8b, 0xb8, 0xe6, 0xe8, + 0x29, 0x69, 0xd6, 0xef, 0x10, 0xf4, 0xfe, 0x5f, 0xb3, 0x02, 0xe9, 0xd9, 0xdf, 0xa4, 0xe0, 0x9d, + 0x3b, 0x58, 0xf8, 0x70, 0xab, 0x70, 0x22, 0x6e, 0x56, 0xf9, 0x79, 0xcf, 0x2c, 0x1b, 0xbd, 0x23, + 0x74, 0x7b, 0xe0, 0xf4, 0x15, 0xe9, 0x28, 0xcd, 0x45, 0xca, 0x15, 0x2b, 0x12, 0x28, 0x19, 0x2f, + 0x82, 0x06, 0xc6, 0xe6, 0xfe, 0x4a, 0xfe, 0xb0, 0x54, 0x47, 0x9c, 0xf4, 0xb6, 0xe6, 0x4a, 0x07, + 0xf8, 0x0b, 0x30, 0x5d, 0xbe, 0x6c, 0xb5, 0xa7, 0x4f, 0x09, 0x51, 0xa0, 0x4b, 0x6e, 0x0c, 0x97, + 0x02, 0xf3, 0xd3, 0x8a, 0xd7, 0x14, 0x1a, 0x90, 0xfd, 0x5c, 0x33, 0x61, 0x21, 0xc3, 0x8c, 0x34, + 0xe3, 0x7a, 0x3b, 0xfa, 0x4e, 0x3a, 0x1b, 0xe3, 0xa6, 0x4f, 0x48, 0x2b, 0x65, 0x45, 0x01, 0x3a, + 0xe1, 0xaa, 0x3e, 0xa9, 0x12, 0xa6, 0x8a, 0x9e, 0x93, 0x81, 0x37, 0x8d, 0x53, 0xaa, 0xe0, 0x90, + 0x25, 0xce, 0x80, 0x4e, 0x58, 0x0e, 0xc2, 0xfa, 0x93, 0xfb, 0x15, 0x31, 0xf3, 0xc0, 0x17, 0x03, + 0x7a, 0xb2, 0xb4, 0x2f, 0xe6, 0xe4, 0x51, 0x2a, 0xcb, 0x3b, 0x9e, 0xfc, 0xe2, 0xb0, 0xfe, 0x77, + 0x5e, 0x2d, 0x67, 0x7a, 0xd5, 0xf8, 0x76, 0xe6, 0xa1, 0x5c, 0x16, 0x4c, 0xe4, 0xa1, 0xd4, 0x79, + 0x94, 0x83, 0xc0, 0x89, 0x47, 0x95, 0xc5, 0x14, 0x37, 0xeb, 0x1f, 0x9e, 0x73, 0x5c, 0xe7, 0x7b, + 0xc8, 0x9c, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x90, 0xe4, 0x37, 0xbf, 0x9b, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/bigquery/logging/v1/audit_data.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/bigquery/logging/v1/audit_data.pb.go new file mode 100644 index 000000000..84044ac8e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/bigquery/logging/v1/audit_data.pb.go @@ -0,0 +1,2225 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/bigquery/logging/v1/audit_data.proto + +/* +Package logging is a generated protocol buffer package. + +It is generated from these files: + google/cloud/bigquery/logging/v1/audit_data.proto + +It has these top-level messages: + AuditData + TableInsertRequest + TableUpdateRequest + TableInsertResponse + TableUpdateResponse + DatasetListRequest + DatasetInsertRequest + DatasetInsertResponse + DatasetUpdateRequest + DatasetUpdateResponse + JobInsertRequest + JobInsertResponse + JobQueryRequest + JobQueryResponse + JobGetQueryResultsRequest + JobGetQueryResultsResponse + JobQueryDoneResponse + JobCompletedEvent + TableDataListRequest + Table + TableInfo + TableViewDefinition + Dataset + DatasetInfo + BigQueryAcl + Job + JobConfiguration + TableDefinition + JobStatus + JobStatistics + DatasetName + TableName + JobName +*/ +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// BigQuery request and response messages for audit log. +// Note: `Table.schema` has been deprecated in favor of `Table.schemaJson`. +// `Table.schema` may continue to be present in your logs during this +// transition. +type AuditData struct { + // Request data for each BigQuery method. + // + // Types that are valid to be assigned to Request: + // *AuditData_TableInsertRequest + // *AuditData_TableUpdateRequest + // *AuditData_DatasetListRequest + // *AuditData_DatasetInsertRequest + // *AuditData_DatasetUpdateRequest + // *AuditData_JobInsertRequest + // *AuditData_JobQueryRequest + // *AuditData_JobGetQueryResultsRequest + // *AuditData_TableDataListRequest + Request isAuditData_Request `protobuf_oneof:"request"` + // Response data for each BigQuery method. + // + // Types that are valid to be assigned to Response: + // *AuditData_TableInsertResponse + // *AuditData_TableUpdateResponse + // *AuditData_DatasetInsertResponse + // *AuditData_DatasetUpdateResponse + // *AuditData_JobInsertResponse + // *AuditData_JobQueryResponse + // *AuditData_JobGetQueryResultsResponse + // *AuditData_JobQueryDoneResponse + Response isAuditData_Response `protobuf_oneof:"response"` + // A job completion event. + JobCompletedEvent *JobCompletedEvent `protobuf:"bytes,17,opt,name=job_completed_event,json=jobCompletedEvent" json:"job_completed_event,omitempty"` +} + +func (m *AuditData) Reset() { *m = AuditData{} } +func (m *AuditData) String() string { return proto.CompactTextString(m) } +func (*AuditData) ProtoMessage() {} +func (*AuditData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAuditData_Request interface { + isAuditData_Request() +} +type isAuditData_Response interface { + isAuditData_Response() +} + +type AuditData_TableInsertRequest struct { + TableInsertRequest *TableInsertRequest `protobuf:"bytes,1,opt,name=table_insert_request,json=tableInsertRequest,oneof"` +} +type AuditData_TableUpdateRequest struct { + TableUpdateRequest *TableUpdateRequest `protobuf:"bytes,16,opt,name=table_update_request,json=tableUpdateRequest,oneof"` +} +type AuditData_DatasetListRequest struct { + DatasetListRequest *DatasetListRequest `protobuf:"bytes,2,opt,name=dataset_list_request,json=datasetListRequest,oneof"` +} +type AuditData_DatasetInsertRequest struct { + DatasetInsertRequest *DatasetInsertRequest `protobuf:"bytes,3,opt,name=dataset_insert_request,json=datasetInsertRequest,oneof"` +} +type AuditData_DatasetUpdateRequest struct { + DatasetUpdateRequest *DatasetUpdateRequest `protobuf:"bytes,4,opt,name=dataset_update_request,json=datasetUpdateRequest,oneof"` +} +type AuditData_JobInsertRequest struct { + JobInsertRequest *JobInsertRequest `protobuf:"bytes,5,opt,name=job_insert_request,json=jobInsertRequest,oneof"` +} +type AuditData_JobQueryRequest struct { + JobQueryRequest *JobQueryRequest `protobuf:"bytes,6,opt,name=job_query_request,json=jobQueryRequest,oneof"` +} +type AuditData_JobGetQueryResultsRequest struct { + JobGetQueryResultsRequest *JobGetQueryResultsRequest `protobuf:"bytes,7,opt,name=job_get_query_results_request,json=jobGetQueryResultsRequest,oneof"` +} +type AuditData_TableDataListRequest struct { + TableDataListRequest *TableDataListRequest `protobuf:"bytes,8,opt,name=table_data_list_request,json=tableDataListRequest,oneof"` +} +type AuditData_TableInsertResponse struct { + TableInsertResponse *TableInsertResponse `protobuf:"bytes,9,opt,name=table_insert_response,json=tableInsertResponse,oneof"` +} +type AuditData_TableUpdateResponse struct { + TableUpdateResponse *TableUpdateResponse `protobuf:"bytes,10,opt,name=table_update_response,json=tableUpdateResponse,oneof"` +} +type AuditData_DatasetInsertResponse struct { + DatasetInsertResponse *DatasetInsertResponse `protobuf:"bytes,11,opt,name=dataset_insert_response,json=datasetInsertResponse,oneof"` +} +type AuditData_DatasetUpdateResponse struct { + DatasetUpdateResponse *DatasetUpdateResponse `protobuf:"bytes,12,opt,name=dataset_update_response,json=datasetUpdateResponse,oneof"` +} +type AuditData_JobInsertResponse struct { + JobInsertResponse *JobInsertResponse `protobuf:"bytes,18,opt,name=job_insert_response,json=jobInsertResponse,oneof"` +} +type AuditData_JobQueryResponse struct { + JobQueryResponse *JobQueryResponse `protobuf:"bytes,13,opt,name=job_query_response,json=jobQueryResponse,oneof"` +} +type AuditData_JobGetQueryResultsResponse struct { + JobGetQueryResultsResponse *JobGetQueryResultsResponse `protobuf:"bytes,14,opt,name=job_get_query_results_response,json=jobGetQueryResultsResponse,oneof"` +} +type AuditData_JobQueryDoneResponse struct { + JobQueryDoneResponse *JobQueryDoneResponse `protobuf:"bytes,15,opt,name=job_query_done_response,json=jobQueryDoneResponse,oneof"` +} + +func (*AuditData_TableInsertRequest) isAuditData_Request() {} +func (*AuditData_TableUpdateRequest) isAuditData_Request() {} +func (*AuditData_DatasetListRequest) isAuditData_Request() {} +func (*AuditData_DatasetInsertRequest) isAuditData_Request() {} +func (*AuditData_DatasetUpdateRequest) isAuditData_Request() {} +func (*AuditData_JobInsertRequest) isAuditData_Request() {} +func (*AuditData_JobQueryRequest) isAuditData_Request() {} +func (*AuditData_JobGetQueryResultsRequest) isAuditData_Request() {} +func (*AuditData_TableDataListRequest) isAuditData_Request() {} +func (*AuditData_TableInsertResponse) isAuditData_Response() {} +func (*AuditData_TableUpdateResponse) isAuditData_Response() {} +func (*AuditData_DatasetInsertResponse) isAuditData_Response() {} +func (*AuditData_DatasetUpdateResponse) isAuditData_Response() {} +func (*AuditData_JobInsertResponse) isAuditData_Response() {} +func (*AuditData_JobQueryResponse) isAuditData_Response() {} +func (*AuditData_JobGetQueryResultsResponse) isAuditData_Response() {} +func (*AuditData_JobQueryDoneResponse) isAuditData_Response() {} + +func (m *AuditData) GetRequest() isAuditData_Request { + if m != nil { + return m.Request + } + return nil +} +func (m *AuditData) GetResponse() isAuditData_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *AuditData) GetTableInsertRequest() *TableInsertRequest { + if x, ok := m.GetRequest().(*AuditData_TableInsertRequest); ok { + return x.TableInsertRequest + } + return nil +} + +func (m *AuditData) GetTableUpdateRequest() *TableUpdateRequest { + if x, ok := m.GetRequest().(*AuditData_TableUpdateRequest); ok { + return x.TableUpdateRequest + } + return nil +} + +func (m *AuditData) GetDatasetListRequest() *DatasetListRequest { + if x, ok := m.GetRequest().(*AuditData_DatasetListRequest); ok { + return x.DatasetListRequest + } + return nil +} + +func (m *AuditData) GetDatasetInsertRequest() *DatasetInsertRequest { + if x, ok := m.GetRequest().(*AuditData_DatasetInsertRequest); ok { + return x.DatasetInsertRequest + } + return nil +} + +func (m *AuditData) GetDatasetUpdateRequest() *DatasetUpdateRequest { + if x, ok := m.GetRequest().(*AuditData_DatasetUpdateRequest); ok { + return x.DatasetUpdateRequest + } + return nil +} + +func (m *AuditData) GetJobInsertRequest() *JobInsertRequest { + if x, ok := m.GetRequest().(*AuditData_JobInsertRequest); ok { + return x.JobInsertRequest + } + return nil +} + +func (m *AuditData) GetJobQueryRequest() *JobQueryRequest { + if x, ok := m.GetRequest().(*AuditData_JobQueryRequest); ok { + return x.JobQueryRequest + } + return nil +} + +func (m *AuditData) GetJobGetQueryResultsRequest() *JobGetQueryResultsRequest { + if x, ok := m.GetRequest().(*AuditData_JobGetQueryResultsRequest); ok { + return x.JobGetQueryResultsRequest + } + return nil +} + +func (m *AuditData) GetTableDataListRequest() *TableDataListRequest { + if x, ok := m.GetRequest().(*AuditData_TableDataListRequest); ok { + return x.TableDataListRequest + } + return nil +} + +func (m *AuditData) GetTableInsertResponse() *TableInsertResponse { + if x, ok := m.GetResponse().(*AuditData_TableInsertResponse); ok { + return x.TableInsertResponse + } + return nil +} + +func (m *AuditData) GetTableUpdateResponse() *TableUpdateResponse { + if x, ok := m.GetResponse().(*AuditData_TableUpdateResponse); ok { + return x.TableUpdateResponse + } + return nil +} + +func (m *AuditData) GetDatasetInsertResponse() *DatasetInsertResponse { + if x, ok := m.GetResponse().(*AuditData_DatasetInsertResponse); ok { + return x.DatasetInsertResponse + } + return nil +} + +func (m *AuditData) GetDatasetUpdateResponse() *DatasetUpdateResponse { + if x, ok := m.GetResponse().(*AuditData_DatasetUpdateResponse); ok { + return x.DatasetUpdateResponse + } + return nil +} + +func (m *AuditData) GetJobInsertResponse() *JobInsertResponse { + if x, ok := m.GetResponse().(*AuditData_JobInsertResponse); ok { + return x.JobInsertResponse + } + return nil +} + +func (m *AuditData) GetJobQueryResponse() *JobQueryResponse { + if x, ok := m.GetResponse().(*AuditData_JobQueryResponse); ok { + return x.JobQueryResponse + } + return nil +} + +func (m *AuditData) GetJobGetQueryResultsResponse() *JobGetQueryResultsResponse { + if x, ok := m.GetResponse().(*AuditData_JobGetQueryResultsResponse); ok { + return x.JobGetQueryResultsResponse + } + return nil +} + +func (m *AuditData) GetJobQueryDoneResponse() *JobQueryDoneResponse { + if x, ok := m.GetResponse().(*AuditData_JobQueryDoneResponse); ok { + return x.JobQueryDoneResponse + } + return nil +} + +func (m *AuditData) GetJobCompletedEvent() *JobCompletedEvent { + if m != nil { + return m.JobCompletedEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AuditData) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AuditData_OneofMarshaler, _AuditData_OneofUnmarshaler, _AuditData_OneofSizer, []interface{}{ + (*AuditData_TableInsertRequest)(nil), + (*AuditData_TableUpdateRequest)(nil), + (*AuditData_DatasetListRequest)(nil), + (*AuditData_DatasetInsertRequest)(nil), + (*AuditData_DatasetUpdateRequest)(nil), + (*AuditData_JobInsertRequest)(nil), + (*AuditData_JobQueryRequest)(nil), + (*AuditData_JobGetQueryResultsRequest)(nil), + (*AuditData_TableDataListRequest)(nil), + (*AuditData_TableInsertResponse)(nil), + (*AuditData_TableUpdateResponse)(nil), + (*AuditData_DatasetInsertResponse)(nil), + (*AuditData_DatasetUpdateResponse)(nil), + (*AuditData_JobInsertResponse)(nil), + (*AuditData_JobQueryResponse)(nil), + (*AuditData_JobGetQueryResultsResponse)(nil), + (*AuditData_JobQueryDoneResponse)(nil), + } +} + +func _AuditData_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AuditData) + // request + switch x := m.Request.(type) { + case *AuditData_TableInsertRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableInsertRequest); err != nil { + return err + } + case *AuditData_TableUpdateRequest: + b.EncodeVarint(16<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableUpdateRequest); err != nil { + return err + } + case *AuditData_DatasetListRequest: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatasetListRequest); err != nil { + return err + } + case *AuditData_DatasetInsertRequest: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatasetInsertRequest); err != nil { + return err + } + case *AuditData_DatasetUpdateRequest: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatasetUpdateRequest); err != nil { + return err + } + case *AuditData_JobInsertRequest: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobInsertRequest); err != nil { + return err + } + case *AuditData_JobQueryRequest: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobQueryRequest); err != nil { + return err + } + case *AuditData_JobGetQueryResultsRequest: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobGetQueryResultsRequest); err != nil { + return err + } + case *AuditData_TableDataListRequest: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableDataListRequest); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AuditData.Request has unexpected type %T", x) + } + // response + switch x := m.Response.(type) { + case *AuditData_TableInsertResponse: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableInsertResponse); err != nil { + return err + } + case *AuditData_TableUpdateResponse: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableUpdateResponse); err != nil { + return err + } + case *AuditData_DatasetInsertResponse: + b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatasetInsertResponse); err != nil { + return err + } + case *AuditData_DatasetUpdateResponse: + b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatasetUpdateResponse); err != nil { + return err + } + case *AuditData_JobInsertResponse: + b.EncodeVarint(18<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobInsertResponse); err != nil { + return err + } + case *AuditData_JobQueryResponse: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobQueryResponse); err != nil { + return err + } + case *AuditData_JobGetQueryResultsResponse: + b.EncodeVarint(14<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobGetQueryResultsResponse); err != nil { + return err + } + case *AuditData_JobQueryDoneResponse: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JobQueryDoneResponse); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AuditData.Response has unexpected type %T", x) + } + return nil +} + +func _AuditData_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AuditData) + switch tag { + case 1: // request.table_insert_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TableInsertRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_TableInsertRequest{msg} + return true, err + case 16: // request.table_update_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TableUpdateRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_TableUpdateRequest{msg} + return true, err + case 2: // request.dataset_list_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatasetListRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_DatasetListRequest{msg} + return true, err + case 3: // request.dataset_insert_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatasetInsertRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_DatasetInsertRequest{msg} + return true, err + case 4: // request.dataset_update_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatasetUpdateRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_DatasetUpdateRequest{msg} + return true, err + case 5: // request.job_insert_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobInsertRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_JobInsertRequest{msg} + return true, err + case 6: // request.job_query_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobQueryRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_JobQueryRequest{msg} + return true, err + case 7: // request.job_get_query_results_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobGetQueryResultsRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_JobGetQueryResultsRequest{msg} + return true, err + case 8: // request.table_data_list_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TableDataListRequest) + err := b.DecodeMessage(msg) + m.Request = &AuditData_TableDataListRequest{msg} + return true, err + case 9: // response.table_insert_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TableInsertResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_TableInsertResponse{msg} + return true, err + case 10: // response.table_update_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TableUpdateResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_TableUpdateResponse{msg} + return true, err + case 11: // response.dataset_insert_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatasetInsertResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_DatasetInsertResponse{msg} + return true, err + case 12: // response.dataset_update_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatasetUpdateResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_DatasetUpdateResponse{msg} + return true, err + case 18: // response.job_insert_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobInsertResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_JobInsertResponse{msg} + return true, err + case 13: // response.job_query_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobQueryResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_JobQueryResponse{msg} + return true, err + case 14: // response.job_get_query_results_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobGetQueryResultsResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_JobGetQueryResultsResponse{msg} + return true, err + case 15: // response.job_query_done_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobQueryDoneResponse) + err := b.DecodeMessage(msg) + m.Response = &AuditData_JobQueryDoneResponse{msg} + return true, err + default: + return false, nil + } +} + +func _AuditData_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AuditData) + // request + switch x := m.Request.(type) { + case *AuditData_TableInsertRequest: + s := proto.Size(x.TableInsertRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_TableUpdateRequest: + s := proto.Size(x.TableUpdateRequest) + n += proto.SizeVarint(16<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_DatasetListRequest: + s := proto.Size(x.DatasetListRequest) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_DatasetInsertRequest: + s := proto.Size(x.DatasetInsertRequest) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_DatasetUpdateRequest: + s := proto.Size(x.DatasetUpdateRequest) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobInsertRequest: + s := proto.Size(x.JobInsertRequest) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobQueryRequest: + s := proto.Size(x.JobQueryRequest) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobGetQueryResultsRequest: + s := proto.Size(x.JobGetQueryResultsRequest) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_TableDataListRequest: + s := proto.Size(x.TableDataListRequest) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // response + switch x := m.Response.(type) { + case *AuditData_TableInsertResponse: + s := proto.Size(x.TableInsertResponse) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_TableUpdateResponse: + s := proto.Size(x.TableUpdateResponse) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_DatasetInsertResponse: + s := proto.Size(x.DatasetInsertResponse) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_DatasetUpdateResponse: + s := proto.Size(x.DatasetUpdateResponse) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobInsertResponse: + s := proto.Size(x.JobInsertResponse) + n += proto.SizeVarint(18<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobQueryResponse: + s := proto.Size(x.JobQueryResponse) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobGetQueryResultsResponse: + s := proto.Size(x.JobGetQueryResultsResponse) + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditData_JobQueryDoneResponse: + s := proto.Size(x.JobQueryDoneResponse) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Table insert request. +type TableInsertRequest struct { + // The new table. + Resource *Table `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *TableInsertRequest) Reset() { *m = TableInsertRequest{} } +func (m *TableInsertRequest) String() string { return proto.CompactTextString(m) } +func (*TableInsertRequest) ProtoMessage() {} +func (*TableInsertRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *TableInsertRequest) GetResource() *Table { + if m != nil { + return m.Resource + } + return nil +} + +// Table update request. +type TableUpdateRequest struct { + // The table to be updated. + Resource *Table `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *TableUpdateRequest) Reset() { *m = TableUpdateRequest{} } +func (m *TableUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*TableUpdateRequest) ProtoMessage() {} +func (*TableUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *TableUpdateRequest) GetResource() *Table { + if m != nil { + return m.Resource + } + return nil +} + +// Table insert response. +type TableInsertResponse struct { + // Final state of the inserted table. + Resource *Table `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *TableInsertResponse) Reset() { *m = TableInsertResponse{} } +func (m *TableInsertResponse) String() string { return proto.CompactTextString(m) } +func (*TableInsertResponse) ProtoMessage() {} +func (*TableInsertResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *TableInsertResponse) GetResource() *Table { + if m != nil { + return m.Resource + } + return nil +} + +// Table update response. +type TableUpdateResponse struct { + // Final state of the updated table. + Resource *Table `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *TableUpdateResponse) Reset() { *m = TableUpdateResponse{} } +func (m *TableUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*TableUpdateResponse) ProtoMessage() {} +func (*TableUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *TableUpdateResponse) GetResource() *Table { + if m != nil { + return m.Resource + } + return nil +} + +// Dataset list request. +type DatasetListRequest struct { + // Whether to list all datasets, including hidden ones. + ListAll bool `protobuf:"varint,1,opt,name=list_all,json=listAll" json:"list_all,omitempty"` +} + +func (m *DatasetListRequest) Reset() { *m = DatasetListRequest{} } +func (m *DatasetListRequest) String() string { return proto.CompactTextString(m) } +func (*DatasetListRequest) ProtoMessage() {} +func (*DatasetListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DatasetListRequest) GetListAll() bool { + if m != nil { + return m.ListAll + } + return false +} + +// Dataset insert request. +type DatasetInsertRequest struct { + // The dataset to be inserted. + Resource *Dataset `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *DatasetInsertRequest) Reset() { *m = DatasetInsertRequest{} } +func (m *DatasetInsertRequest) String() string { return proto.CompactTextString(m) } +func (*DatasetInsertRequest) ProtoMessage() {} +func (*DatasetInsertRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DatasetInsertRequest) GetResource() *Dataset { + if m != nil { + return m.Resource + } + return nil +} + +// Dataset insert response. +type DatasetInsertResponse struct { + // Final state of the inserted dataset. + Resource *Dataset `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *DatasetInsertResponse) Reset() { *m = DatasetInsertResponse{} } +func (m *DatasetInsertResponse) String() string { return proto.CompactTextString(m) } +func (*DatasetInsertResponse) ProtoMessage() {} +func (*DatasetInsertResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *DatasetInsertResponse) GetResource() *Dataset { + if m != nil { + return m.Resource + } + return nil +} + +// Dataset update request. +type DatasetUpdateRequest struct { + // The dataset to be updated. + Resource *Dataset `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *DatasetUpdateRequest) Reset() { *m = DatasetUpdateRequest{} } +func (m *DatasetUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*DatasetUpdateRequest) ProtoMessage() {} +func (*DatasetUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *DatasetUpdateRequest) GetResource() *Dataset { + if m != nil { + return m.Resource + } + return nil +} + +// Dataset update response. +type DatasetUpdateResponse struct { + // Final state of the updated dataset. + Resource *Dataset `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *DatasetUpdateResponse) Reset() { *m = DatasetUpdateResponse{} } +func (m *DatasetUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*DatasetUpdateResponse) ProtoMessage() {} +func (*DatasetUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DatasetUpdateResponse) GetResource() *Dataset { + if m != nil { + return m.Resource + } + return nil +} + +// Job insert request. +type JobInsertRequest struct { + // Job insert request. + Resource *Job `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *JobInsertRequest) Reset() { *m = JobInsertRequest{} } +func (m *JobInsertRequest) String() string { return proto.CompactTextString(m) } +func (*JobInsertRequest) ProtoMessage() {} +func (*JobInsertRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *JobInsertRequest) GetResource() *Job { + if m != nil { + return m.Resource + } + return nil +} + +// Job insert response. +type JobInsertResponse struct { + // Job insert response. + Resource *Job `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *JobInsertResponse) Reset() { *m = JobInsertResponse{} } +func (m *JobInsertResponse) String() string { return proto.CompactTextString(m) } +func (*JobInsertResponse) ProtoMessage() {} +func (*JobInsertResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *JobInsertResponse) GetResource() *Job { + if m != nil { + return m.Resource + } + return nil +} + +// Job query request. +type JobQueryRequest struct { + // The query. + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + // The maximum number of results. + MaxResults uint32 `protobuf:"varint,2,opt,name=max_results,json=maxResults" json:"max_results,omitempty"` + // The default dataset for tables that do not have a dataset specified. + DefaultDataset *DatasetName `protobuf:"bytes,3,opt,name=default_dataset,json=defaultDataset" json:"default_dataset,omitempty"` + // Project that the query should be charged to. + ProjectId string `protobuf:"bytes,4,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // If true, don't actually run the job. Just check that it would run. + DryRun bool `protobuf:"varint,5,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` +} + +func (m *JobQueryRequest) Reset() { *m = JobQueryRequest{} } +func (m *JobQueryRequest) String() string { return proto.CompactTextString(m) } +func (*JobQueryRequest) ProtoMessage() {} +func (*JobQueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *JobQueryRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *JobQueryRequest) GetMaxResults() uint32 { + if m != nil { + return m.MaxResults + } + return 0 +} + +func (m *JobQueryRequest) GetDefaultDataset() *DatasetName { + if m != nil { + return m.DefaultDataset + } + return nil +} + +func (m *JobQueryRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *JobQueryRequest) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +// Job query response. +type JobQueryResponse struct { + // The total number of rows in the full query result set. + TotalResults uint64 `protobuf:"varint,1,opt,name=total_results,json=totalResults" json:"total_results,omitempty"` + // Information about the queried job. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *JobQueryResponse) Reset() { *m = JobQueryResponse{} } +func (m *JobQueryResponse) String() string { return proto.CompactTextString(m) } +func (*JobQueryResponse) ProtoMessage() {} +func (*JobQueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *JobQueryResponse) GetTotalResults() uint64 { + if m != nil { + return m.TotalResults + } + return 0 +} + +func (m *JobQueryResponse) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Job getQueryResults request. +type JobGetQueryResultsRequest struct { + // Maximum number of results to return. + MaxResults uint32 `protobuf:"varint,1,opt,name=max_results,json=maxResults" json:"max_results,omitempty"` + // Zero-based row number at which to start. + StartRow uint64 `protobuf:"varint,2,opt,name=start_row,json=startRow" json:"start_row,omitempty"` +} + +func (m *JobGetQueryResultsRequest) Reset() { *m = JobGetQueryResultsRequest{} } +func (m *JobGetQueryResultsRequest) String() string { return proto.CompactTextString(m) } +func (*JobGetQueryResultsRequest) ProtoMessage() {} +func (*JobGetQueryResultsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *JobGetQueryResultsRequest) GetMaxResults() uint32 { + if m != nil { + return m.MaxResults + } + return 0 +} + +func (m *JobGetQueryResultsRequest) GetStartRow() uint64 { + if m != nil { + return m.StartRow + } + return 0 +} + +// Job getQueryResults response. +type JobGetQueryResultsResponse struct { + // Total number of results in query results. + TotalResults uint64 `protobuf:"varint,1,opt,name=total_results,json=totalResults" json:"total_results,omitempty"` + // The job that was created to run the query. + // It completed if `job.status.state` is `DONE`. + // It failed if `job.status.errorResult` is also present. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *JobGetQueryResultsResponse) Reset() { *m = JobGetQueryResultsResponse{} } +func (m *JobGetQueryResultsResponse) String() string { return proto.CompactTextString(m) } +func (*JobGetQueryResultsResponse) ProtoMessage() {} +func (*JobGetQueryResultsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *JobGetQueryResultsResponse) GetTotalResults() uint64 { + if m != nil { + return m.TotalResults + } + return 0 +} + +func (m *JobGetQueryResultsResponse) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Job getQueryDone response. +type JobQueryDoneResponse struct { + // The job and status information. + // The job completed if `job.status.state` is `DONE`. + Job *Job `protobuf:"bytes,1,opt,name=job" json:"job,omitempty"` +} + +func (m *JobQueryDoneResponse) Reset() { *m = JobQueryDoneResponse{} } +func (m *JobQueryDoneResponse) String() string { return proto.CompactTextString(m) } +func (*JobQueryDoneResponse) ProtoMessage() {} +func (*JobQueryDoneResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *JobQueryDoneResponse) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Query job completed event. +type JobCompletedEvent struct { + // Name of the event. + EventName string `protobuf:"bytes,1,opt,name=event_name,json=eventName" json:"event_name,omitempty"` + // Job information. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *JobCompletedEvent) Reset() { *m = JobCompletedEvent{} } +func (m *JobCompletedEvent) String() string { return proto.CompactTextString(m) } +func (*JobCompletedEvent) ProtoMessage() {} +func (*JobCompletedEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *JobCompletedEvent) GetEventName() string { + if m != nil { + return m.EventName + } + return "" +} + +func (m *JobCompletedEvent) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Table data-list request. +type TableDataListRequest struct { + // Starting row offset. + StartRow uint64 `protobuf:"varint,1,opt,name=start_row,json=startRow" json:"start_row,omitempty"` + // Maximum number of results to return. + MaxResults uint32 `protobuf:"varint,2,opt,name=max_results,json=maxResults" json:"max_results,omitempty"` +} + +func (m *TableDataListRequest) Reset() { *m = TableDataListRequest{} } +func (m *TableDataListRequest) String() string { return proto.CompactTextString(m) } +func (*TableDataListRequest) ProtoMessage() {} +func (*TableDataListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *TableDataListRequest) GetStartRow() uint64 { + if m != nil { + return m.StartRow + } + return 0 +} + +func (m *TableDataListRequest) GetMaxResults() uint32 { + if m != nil { + return m.MaxResults + } + return 0 +} + +// Describes a BigQuery table. +// See the [Table](/bigquery/docs/reference/v2/tables) API resource +// for more details on individual fields. +// Note: `Table.schema` has been deprecated in favor of `Table.schemaJson`. +// `Table.schema` may continue to be present in your logs during this +// transition. +type Table struct { + // The name of the table. + TableName *TableName `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + // User-provided metadata for the table. + Info *TableInfo `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // A JSON representation of the table's schema. + SchemaJson string `protobuf:"bytes,8,opt,name=schema_json,json=schemaJson" json:"schema_json,omitempty"` + // If present, this is a virtual table defined by a SQL query. + View *TableViewDefinition `protobuf:"bytes,4,opt,name=view" json:"view,omitempty"` + // The expiration date for the table, after which the table + // is deleted and the storage reclaimed. + // If not present, the table persists indefinitely. + ExpireTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"` + // The time the table was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time the table was last truncated + // by an operation with a `writeDisposition` of `WRITE_TRUNCATE`. + TruncateTime *google_protobuf2.Timestamp `protobuf:"bytes,7,opt,name=truncate_time,json=truncateTime" json:"truncate_time,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *Table) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *Table) GetInfo() *TableInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *Table) GetSchemaJson() string { + if m != nil { + return m.SchemaJson + } + return "" +} + +func (m *Table) GetView() *TableViewDefinition { + if m != nil { + return m.View + } + return nil +} + +func (m *Table) GetExpireTime() *google_protobuf2.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +func (m *Table) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Table) GetTruncateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.TruncateTime + } + return nil +} + +// User-provided metadata for a table. +type TableInfo struct { + // A short name for the table, such as`"Analytics Data - Jan 2011"`. + FriendlyName string `protobuf:"bytes,1,opt,name=friendly_name,json=friendlyName" json:"friendly_name,omitempty"` + // A long description, perhaps several paragraphs, + // describing the table contents in detail. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *TableInfo) Reset() { *m = TableInfo{} } +func (m *TableInfo) String() string { return proto.CompactTextString(m) } +func (*TableInfo) ProtoMessage() {} +func (*TableInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *TableInfo) GetFriendlyName() string { + if m != nil { + return m.FriendlyName + } + return "" +} + +func (m *TableInfo) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes a virtual table defined by a SQL query. +type TableViewDefinition struct { + // SQL query defining the view. + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *TableViewDefinition) Reset() { *m = TableViewDefinition{} } +func (m *TableViewDefinition) String() string { return proto.CompactTextString(m) } +func (*TableViewDefinition) ProtoMessage() {} +func (*TableViewDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *TableViewDefinition) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +// BigQuery dataset information. +// See the [Dataset](/bigquery/docs/reference/v2/datasets) API resource +// for more details on individual fields. +type Dataset struct { + // The name of the dataset. + DatasetName *DatasetName `protobuf:"bytes,1,opt,name=dataset_name,json=datasetName" json:"dataset_name,omitempty"` + // User-provided metadata for the dataset. + Info *DatasetInfo `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The time the dataset was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time the dataset was last modified. + UpdateTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The access control list for the dataset. + Acl *BigQueryAcl `protobuf:"bytes,6,opt,name=acl" json:"acl,omitempty"` + // If this field is present, each table that does not specify an + // expiration time is assigned an expiration time by adding this + // duration to the table's `createTime`. If this field is empty, + // there is no default table expiration time. + DefaultTableExpireDuration *google_protobuf1.Duration `protobuf:"bytes,8,opt,name=default_table_expire_duration,json=defaultTableExpireDuration" json:"default_table_expire_duration,omitempty"` +} + +func (m *Dataset) Reset() { *m = Dataset{} } +func (m *Dataset) String() string { return proto.CompactTextString(m) } +func (*Dataset) ProtoMessage() {} +func (*Dataset) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *Dataset) GetDatasetName() *DatasetName { + if m != nil { + return m.DatasetName + } + return nil +} + +func (m *Dataset) GetInfo() *DatasetInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *Dataset) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Dataset) GetUpdateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *Dataset) GetAcl() *BigQueryAcl { + if m != nil { + return m.Acl + } + return nil +} + +func (m *Dataset) GetDefaultTableExpireDuration() *google_protobuf1.Duration { + if m != nil { + return m.DefaultTableExpireDuration + } + return nil +} + +// User-provided metadata for a dataset. +type DatasetInfo struct { + // A short name for the dataset, such as`"Analytics Data 2011"`. + FriendlyName string `protobuf:"bytes,1,opt,name=friendly_name,json=friendlyName" json:"friendly_name,omitempty"` + // A long description, perhaps several paragraphs, + // describing the dataset contents in detail. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *DatasetInfo) Reset() { *m = DatasetInfo{} } +func (m *DatasetInfo) String() string { return proto.CompactTextString(m) } +func (*DatasetInfo) ProtoMessage() {} +func (*DatasetInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *DatasetInfo) GetFriendlyName() string { + if m != nil { + return m.FriendlyName + } + return "" +} + +func (m *DatasetInfo) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// An access control list. +type BigQueryAcl struct { + // Access control entry list. + Entries []*BigQueryAcl_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` +} + +func (m *BigQueryAcl) Reset() { *m = BigQueryAcl{} } +func (m *BigQueryAcl) String() string { return proto.CompactTextString(m) } +func (*BigQueryAcl) ProtoMessage() {} +func (*BigQueryAcl) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *BigQueryAcl) GetEntries() []*BigQueryAcl_Entry { + if m != nil { + return m.Entries + } + return nil +} + +// Access control entry. +type BigQueryAcl_Entry struct { + // The granted role, which can be `READER`, `WRITER`, or `OWNER`. + Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"` + // Grants access to a group identified by an email address. + GroupEmail string `protobuf:"bytes,2,opt,name=group_email,json=groupEmail" json:"group_email,omitempty"` + // Grants access to a user identified by an email address. + UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail" json:"user_email,omitempty"` + // Grants access to all members of a domain. + Domain string `protobuf:"bytes,4,opt,name=domain" json:"domain,omitempty"` + // Grants access to special groups. Valid groups are `PROJECT_OWNERS`, + // `PROJECT_READERS`, `PROJECT_WRITERS` and `ALL_AUTHENTICATED_USERS`. + SpecialGroup string `protobuf:"bytes,5,opt,name=special_group,json=specialGroup" json:"special_group,omitempty"` + // Grants access to a BigQuery View. + ViewName *TableName `protobuf:"bytes,6,opt,name=view_name,json=viewName" json:"view_name,omitempty"` +} + +func (m *BigQueryAcl_Entry) Reset() { *m = BigQueryAcl_Entry{} } +func (m *BigQueryAcl_Entry) String() string { return proto.CompactTextString(m) } +func (*BigQueryAcl_Entry) ProtoMessage() {} +func (*BigQueryAcl_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} } + +func (m *BigQueryAcl_Entry) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *BigQueryAcl_Entry) GetGroupEmail() string { + if m != nil { + return m.GroupEmail + } + return "" +} + +func (m *BigQueryAcl_Entry) GetUserEmail() string { + if m != nil { + return m.UserEmail + } + return "" +} + +func (m *BigQueryAcl_Entry) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *BigQueryAcl_Entry) GetSpecialGroup() string { + if m != nil { + return m.SpecialGroup + } + return "" +} + +func (m *BigQueryAcl_Entry) GetViewName() *TableName { + if m != nil { + return m.ViewName + } + return nil +} + +// Describes a job. +type Job struct { + // Job name. + JobName *JobName `protobuf:"bytes,1,opt,name=job_name,json=jobName" json:"job_name,omitempty"` + // Job configuration. + JobConfiguration *JobConfiguration `protobuf:"bytes,2,opt,name=job_configuration,json=jobConfiguration" json:"job_configuration,omitempty"` + // Job status. + JobStatus *JobStatus `protobuf:"bytes,3,opt,name=job_status,json=jobStatus" json:"job_status,omitempty"` + // Job statistics. + JobStatistics *JobStatistics `protobuf:"bytes,4,opt,name=job_statistics,json=jobStatistics" json:"job_statistics,omitempty"` +} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *Job) GetJobName() *JobName { + if m != nil { + return m.JobName + } + return nil +} + +func (m *Job) GetJobConfiguration() *JobConfiguration { + if m != nil { + return m.JobConfiguration + } + return nil +} + +func (m *Job) GetJobStatus() *JobStatus { + if m != nil { + return m.JobStatus + } + return nil +} + +func (m *Job) GetJobStatistics() *JobStatistics { + if m != nil { + return m.JobStatistics + } + return nil +} + +// Job configuration information. +// See the [Jobs](/bigquery/docs/reference/v2/jobs) API resource +// for more details on individual fields. +type JobConfiguration struct { + // Job configuration information. + // + // Types that are valid to be assigned to Configuration: + // *JobConfiguration_Query_ + // *JobConfiguration_Load_ + // *JobConfiguration_Extract_ + // *JobConfiguration_TableCopy_ + Configuration isJobConfiguration_Configuration `protobuf_oneof:"configuration"` + // If true, don't actually run the job. Just check that it would run. + DryRun bool `protobuf:"varint,9,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` +} + +func (m *JobConfiguration) Reset() { *m = JobConfiguration{} } +func (m *JobConfiguration) String() string { return proto.CompactTextString(m) } +func (*JobConfiguration) ProtoMessage() {} +func (*JobConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type isJobConfiguration_Configuration interface { + isJobConfiguration_Configuration() +} + +type JobConfiguration_Query_ struct { + Query *JobConfiguration_Query `protobuf:"bytes,5,opt,name=query,oneof"` +} +type JobConfiguration_Load_ struct { + Load *JobConfiguration_Load `protobuf:"bytes,6,opt,name=load,oneof"` +} +type JobConfiguration_Extract_ struct { + Extract *JobConfiguration_Extract `protobuf:"bytes,7,opt,name=extract,oneof"` +} +type JobConfiguration_TableCopy_ struct { + TableCopy *JobConfiguration_TableCopy `protobuf:"bytes,8,opt,name=table_copy,json=tableCopy,oneof"` +} + +func (*JobConfiguration_Query_) isJobConfiguration_Configuration() {} +func (*JobConfiguration_Load_) isJobConfiguration_Configuration() {} +func (*JobConfiguration_Extract_) isJobConfiguration_Configuration() {} +func (*JobConfiguration_TableCopy_) isJobConfiguration_Configuration() {} + +func (m *JobConfiguration) GetConfiguration() isJobConfiguration_Configuration { + if m != nil { + return m.Configuration + } + return nil +} + +func (m *JobConfiguration) GetQuery() *JobConfiguration_Query { + if x, ok := m.GetConfiguration().(*JobConfiguration_Query_); ok { + return x.Query + } + return nil +} + +func (m *JobConfiguration) GetLoad() *JobConfiguration_Load { + if x, ok := m.GetConfiguration().(*JobConfiguration_Load_); ok { + return x.Load + } + return nil +} + +func (m *JobConfiguration) GetExtract() *JobConfiguration_Extract { + if x, ok := m.GetConfiguration().(*JobConfiguration_Extract_); ok { + return x.Extract + } + return nil +} + +func (m *JobConfiguration) GetTableCopy() *JobConfiguration_TableCopy { + if x, ok := m.GetConfiguration().(*JobConfiguration_TableCopy_); ok { + return x.TableCopy + } + return nil +} + +func (m *JobConfiguration) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*JobConfiguration) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _JobConfiguration_OneofMarshaler, _JobConfiguration_OneofUnmarshaler, _JobConfiguration_OneofSizer, []interface{}{ + (*JobConfiguration_Query_)(nil), + (*JobConfiguration_Load_)(nil), + (*JobConfiguration_Extract_)(nil), + (*JobConfiguration_TableCopy_)(nil), + } +} + +func _JobConfiguration_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*JobConfiguration) + // configuration + switch x := m.Configuration.(type) { + case *JobConfiguration_Query_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *JobConfiguration_Load_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Load); err != nil { + return err + } + case *JobConfiguration_Extract_: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extract); err != nil { + return err + } + case *JobConfiguration_TableCopy_: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TableCopy); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("JobConfiguration.Configuration has unexpected type %T", x) + } + return nil +} + +func _JobConfiguration_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*JobConfiguration) + switch tag { + case 5: // configuration.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobConfiguration_Query) + err := b.DecodeMessage(msg) + m.Configuration = &JobConfiguration_Query_{msg} + return true, err + case 6: // configuration.load + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobConfiguration_Load) + err := b.DecodeMessage(msg) + m.Configuration = &JobConfiguration_Load_{msg} + return true, err + case 7: // configuration.extract + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobConfiguration_Extract) + err := b.DecodeMessage(msg) + m.Configuration = &JobConfiguration_Extract_{msg} + return true, err + case 8: // configuration.table_copy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JobConfiguration_TableCopy) + err := b.DecodeMessage(msg) + m.Configuration = &JobConfiguration_TableCopy_{msg} + return true, err + default: + return false, nil + } +} + +func _JobConfiguration_OneofSizer(msg proto.Message) (n int) { + m := msg.(*JobConfiguration) + // configuration + switch x := m.Configuration.(type) { + case *JobConfiguration_Query_: + s := proto.Size(x.Query) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *JobConfiguration_Load_: + s := proto.Size(x.Load) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *JobConfiguration_Extract_: + s := proto.Size(x.Extract) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *JobConfiguration_TableCopy_: + s := proto.Size(x.TableCopy) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Describes a query job, which executes a SQL-like query. +type JobConfiguration_Query struct { + // The SQL query to run. + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + // The table where results are written. + DestinationTable *TableName `protobuf:"bytes,2,opt,name=destination_table,json=destinationTable" json:"destination_table,omitempty"` + // Describes when a job is allowed to create a table: + // `CREATE_IF_NEEDED`, `CREATE_NEVER`. + CreateDisposition string `protobuf:"bytes,3,opt,name=create_disposition,json=createDisposition" json:"create_disposition,omitempty"` + // Describes how writes affect existing tables: + // `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. + WriteDisposition string `protobuf:"bytes,4,opt,name=write_disposition,json=writeDisposition" json:"write_disposition,omitempty"` + // If a table name is specified without a dataset in a query, + // this dataset will be added to table name. + DefaultDataset *DatasetName `protobuf:"bytes,5,opt,name=default_dataset,json=defaultDataset" json:"default_dataset,omitempty"` + // Describes data sources outside BigQuery, if needed. + TableDefinitions []*TableDefinition `protobuf:"bytes,6,rep,name=table_definitions,json=tableDefinitions" json:"table_definitions,omitempty"` +} + +func (m *JobConfiguration_Query) Reset() { *m = JobConfiguration_Query{} } +func (m *JobConfiguration_Query) String() string { return proto.CompactTextString(m) } +func (*JobConfiguration_Query) ProtoMessage() {} +func (*JobConfiguration_Query) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } + +func (m *JobConfiguration_Query) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *JobConfiguration_Query) GetDestinationTable() *TableName { + if m != nil { + return m.DestinationTable + } + return nil +} + +func (m *JobConfiguration_Query) GetCreateDisposition() string { + if m != nil { + return m.CreateDisposition + } + return "" +} + +func (m *JobConfiguration_Query) GetWriteDisposition() string { + if m != nil { + return m.WriteDisposition + } + return "" +} + +func (m *JobConfiguration_Query) GetDefaultDataset() *DatasetName { + if m != nil { + return m.DefaultDataset + } + return nil +} + +func (m *JobConfiguration_Query) GetTableDefinitions() []*TableDefinition { + if m != nil { + return m.TableDefinitions + } + return nil +} + +// Describes a load job, which loads data from an external source via +// the import pipeline. +type JobConfiguration_Load struct { + // URIs for the data to be imported. Only Google Cloud Storage URIs are + // supported. + SourceUris []string `protobuf:"bytes,1,rep,name=source_uris,json=sourceUris" json:"source_uris,omitempty"` + // The table schema in JSON format representation of a TableSchema. + SchemaJson string `protobuf:"bytes,6,opt,name=schema_json,json=schemaJson" json:"schema_json,omitempty"` + // The table where the imported data is written. + DestinationTable *TableName `protobuf:"bytes,3,opt,name=destination_table,json=destinationTable" json:"destination_table,omitempty"` + // Describes when a job is allowed to create a table: + // `CREATE_IF_NEEDED`, `CREATE_NEVER`. + CreateDisposition string `protobuf:"bytes,4,opt,name=create_disposition,json=createDisposition" json:"create_disposition,omitempty"` + // Describes how writes affect existing tables: + // `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. + WriteDisposition string `protobuf:"bytes,5,opt,name=write_disposition,json=writeDisposition" json:"write_disposition,omitempty"` +} + +func (m *JobConfiguration_Load) Reset() { *m = JobConfiguration_Load{} } +func (m *JobConfiguration_Load) String() string { return proto.CompactTextString(m) } +func (*JobConfiguration_Load) ProtoMessage() {} +func (*JobConfiguration_Load) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 1} } + +func (m *JobConfiguration_Load) GetSourceUris() []string { + if m != nil { + return m.SourceUris + } + return nil +} + +func (m *JobConfiguration_Load) GetSchemaJson() string { + if m != nil { + return m.SchemaJson + } + return "" +} + +func (m *JobConfiguration_Load) GetDestinationTable() *TableName { + if m != nil { + return m.DestinationTable + } + return nil +} + +func (m *JobConfiguration_Load) GetCreateDisposition() string { + if m != nil { + return m.CreateDisposition + } + return "" +} + +func (m *JobConfiguration_Load) GetWriteDisposition() string { + if m != nil { + return m.WriteDisposition + } + return "" +} + +// Describes an extract job, which exports data to an external source +// via the export pipeline. +type JobConfiguration_Extract struct { + // Google Cloud Storage URIs where extracted data should be written. + DestinationUris []string `protobuf:"bytes,1,rep,name=destination_uris,json=destinationUris" json:"destination_uris,omitempty"` + // The source table. + SourceTable *TableName `protobuf:"bytes,2,opt,name=source_table,json=sourceTable" json:"source_table,omitempty"` +} + +func (m *JobConfiguration_Extract) Reset() { *m = JobConfiguration_Extract{} } +func (m *JobConfiguration_Extract) String() string { return proto.CompactTextString(m) } +func (*JobConfiguration_Extract) ProtoMessage() {} +func (*JobConfiguration_Extract) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 2} } + +func (m *JobConfiguration_Extract) GetDestinationUris() []string { + if m != nil { + return m.DestinationUris + } + return nil +} + +func (m *JobConfiguration_Extract) GetSourceTable() *TableName { + if m != nil { + return m.SourceTable + } + return nil +} + +// Describes a copy job, which copies an existing table to another table. +type JobConfiguration_TableCopy struct { + // Source tables. + SourceTables []*TableName `protobuf:"bytes,1,rep,name=source_tables,json=sourceTables" json:"source_tables,omitempty"` + // Destination table. + DestinationTable *TableName `protobuf:"bytes,2,opt,name=destination_table,json=destinationTable" json:"destination_table,omitempty"` + // Describes when a job is allowed to create a table: + // `CREATE_IF_NEEDED`, `CREATE_NEVER`. + CreateDisposition string `protobuf:"bytes,3,opt,name=create_disposition,json=createDisposition" json:"create_disposition,omitempty"` + // Describes how writes affect existing tables: + // `WRITE_TRUNCATE`, `WRITE_APPEND`, `WRITE_EMPTY`. + WriteDisposition string `protobuf:"bytes,4,opt,name=write_disposition,json=writeDisposition" json:"write_disposition,omitempty"` +} + +func (m *JobConfiguration_TableCopy) Reset() { *m = JobConfiguration_TableCopy{} } +func (m *JobConfiguration_TableCopy) String() string { return proto.CompactTextString(m) } +func (*JobConfiguration_TableCopy) ProtoMessage() {} +func (*JobConfiguration_TableCopy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 3} } + +func (m *JobConfiguration_TableCopy) GetSourceTables() []*TableName { + if m != nil { + return m.SourceTables + } + return nil +} + +func (m *JobConfiguration_TableCopy) GetDestinationTable() *TableName { + if m != nil { + return m.DestinationTable + } + return nil +} + +func (m *JobConfiguration_TableCopy) GetCreateDisposition() string { + if m != nil { + return m.CreateDisposition + } + return "" +} + +func (m *JobConfiguration_TableCopy) GetWriteDisposition() string { + if m != nil { + return m.WriteDisposition + } + return "" +} + +// Describes an external data source used in a query. +type TableDefinition struct { + // Name of the table, used in queries. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Google Cloud Storage URIs for the data to be imported. + SourceUris []string `protobuf:"bytes,2,rep,name=source_uris,json=sourceUris" json:"source_uris,omitempty"` +} + +func (m *TableDefinition) Reset() { *m = TableDefinition{} } +func (m *TableDefinition) String() string { return proto.CompactTextString(m) } +func (*TableDefinition) ProtoMessage() {} +func (*TableDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *TableDefinition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TableDefinition) GetSourceUris() []string { + if m != nil { + return m.SourceUris + } + return nil +} + +// Running state of a job. +type JobStatus struct { + // State of a job: `PENDING`, `RUNNING`, or `DONE`. + State string `protobuf:"bytes,1,opt,name=state" json:"state,omitempty"` + // If the job did not complete successfully, this field describes why. + Error *google_rpc.Status `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` +} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *JobStatus) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *JobStatus) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +// Job statistics that may change after a job starts. +type JobStatistics struct { + // Time when the job was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time when the job started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time when the job ended. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Total bytes processed for a job. + TotalProcessedBytes int64 `protobuf:"varint,4,opt,name=total_processed_bytes,json=totalProcessedBytes" json:"total_processed_bytes,omitempty"` + // Processed bytes, adjusted by the job's CPU usage. + TotalBilledBytes int64 `protobuf:"varint,5,opt,name=total_billed_bytes,json=totalBilledBytes" json:"total_billed_bytes,omitempty"` + // The tier assigned by CPU-based billing. + BillingTier int32 `protobuf:"varint,7,opt,name=billing_tier,json=billingTier" json:"billing_tier,omitempty"` +} + +func (m *JobStatistics) Reset() { *m = JobStatistics{} } +func (m *JobStatistics) String() string { return proto.CompactTextString(m) } +func (*JobStatistics) ProtoMessage() {} +func (*JobStatistics) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *JobStatistics) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *JobStatistics) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *JobStatistics) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *JobStatistics) GetTotalProcessedBytes() int64 { + if m != nil { + return m.TotalProcessedBytes + } + return 0 +} + +func (m *JobStatistics) GetTotalBilledBytes() int64 { + if m != nil { + return m.TotalBilledBytes + } + return 0 +} + +func (m *JobStatistics) GetBillingTier() int32 { + if m != nil { + return m.BillingTier + } + return 0 +} + +// The fully-qualified name for a dataset. +type DatasetName struct { + // The project ID. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The dataset ID within the project. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` +} + +func (m *DatasetName) Reset() { *m = DatasetName{} } +func (m *DatasetName) String() string { return proto.CompactTextString(m) } +func (*DatasetName) ProtoMessage() {} +func (*DatasetName) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *DatasetName) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DatasetName) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +// The fully-qualified name for a table. +type TableName struct { + // The project ID. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The dataset ID within the project. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The table ID of the table within the dataset. + TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId" json:"table_id,omitempty"` +} + +func (m *TableName) Reset() { *m = TableName{} } +func (m *TableName) String() string { return proto.CompactTextString(m) } +func (*TableName) ProtoMessage() {} +func (*TableName) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *TableName) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *TableName) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *TableName) GetTableId() string { + if m != nil { + return m.TableId + } + return "" +} + +// The fully-qualified name for a job. +type JobName struct { + // The project ID. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The job ID within the project. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *JobName) Reset() { *m = JobName{} } +func (m *JobName) String() string { return proto.CompactTextString(m) } +func (*JobName) ProtoMessage() {} +func (*JobName) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *JobName) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *JobName) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func init() { + proto.RegisterType((*AuditData)(nil), "google.cloud.bigquery.logging.v1.AuditData") + proto.RegisterType((*TableInsertRequest)(nil), "google.cloud.bigquery.logging.v1.TableInsertRequest") + proto.RegisterType((*TableUpdateRequest)(nil), "google.cloud.bigquery.logging.v1.TableUpdateRequest") + proto.RegisterType((*TableInsertResponse)(nil), "google.cloud.bigquery.logging.v1.TableInsertResponse") + proto.RegisterType((*TableUpdateResponse)(nil), "google.cloud.bigquery.logging.v1.TableUpdateResponse") + proto.RegisterType((*DatasetListRequest)(nil), "google.cloud.bigquery.logging.v1.DatasetListRequest") + proto.RegisterType((*DatasetInsertRequest)(nil), "google.cloud.bigquery.logging.v1.DatasetInsertRequest") + proto.RegisterType((*DatasetInsertResponse)(nil), "google.cloud.bigquery.logging.v1.DatasetInsertResponse") + proto.RegisterType((*DatasetUpdateRequest)(nil), "google.cloud.bigquery.logging.v1.DatasetUpdateRequest") + proto.RegisterType((*DatasetUpdateResponse)(nil), "google.cloud.bigquery.logging.v1.DatasetUpdateResponse") + proto.RegisterType((*JobInsertRequest)(nil), "google.cloud.bigquery.logging.v1.JobInsertRequest") + proto.RegisterType((*JobInsertResponse)(nil), "google.cloud.bigquery.logging.v1.JobInsertResponse") + proto.RegisterType((*JobQueryRequest)(nil), "google.cloud.bigquery.logging.v1.JobQueryRequest") + proto.RegisterType((*JobQueryResponse)(nil), "google.cloud.bigquery.logging.v1.JobQueryResponse") + proto.RegisterType((*JobGetQueryResultsRequest)(nil), "google.cloud.bigquery.logging.v1.JobGetQueryResultsRequest") + proto.RegisterType((*JobGetQueryResultsResponse)(nil), "google.cloud.bigquery.logging.v1.JobGetQueryResultsResponse") + proto.RegisterType((*JobQueryDoneResponse)(nil), "google.cloud.bigquery.logging.v1.JobQueryDoneResponse") + proto.RegisterType((*JobCompletedEvent)(nil), "google.cloud.bigquery.logging.v1.JobCompletedEvent") + proto.RegisterType((*TableDataListRequest)(nil), "google.cloud.bigquery.logging.v1.TableDataListRequest") + proto.RegisterType((*Table)(nil), "google.cloud.bigquery.logging.v1.Table") + proto.RegisterType((*TableInfo)(nil), "google.cloud.bigquery.logging.v1.TableInfo") + proto.RegisterType((*TableViewDefinition)(nil), "google.cloud.bigquery.logging.v1.TableViewDefinition") + proto.RegisterType((*Dataset)(nil), "google.cloud.bigquery.logging.v1.Dataset") + proto.RegisterType((*DatasetInfo)(nil), "google.cloud.bigquery.logging.v1.DatasetInfo") + proto.RegisterType((*BigQueryAcl)(nil), "google.cloud.bigquery.logging.v1.BigQueryAcl") + proto.RegisterType((*BigQueryAcl_Entry)(nil), "google.cloud.bigquery.logging.v1.BigQueryAcl.Entry") + proto.RegisterType((*Job)(nil), "google.cloud.bigquery.logging.v1.Job") + proto.RegisterType((*JobConfiguration)(nil), "google.cloud.bigquery.logging.v1.JobConfiguration") + proto.RegisterType((*JobConfiguration_Query)(nil), "google.cloud.bigquery.logging.v1.JobConfiguration.Query") + proto.RegisterType((*JobConfiguration_Load)(nil), "google.cloud.bigquery.logging.v1.JobConfiguration.Load") + proto.RegisterType((*JobConfiguration_Extract)(nil), "google.cloud.bigquery.logging.v1.JobConfiguration.Extract") + proto.RegisterType((*JobConfiguration_TableCopy)(nil), "google.cloud.bigquery.logging.v1.JobConfiguration.TableCopy") + proto.RegisterType((*TableDefinition)(nil), "google.cloud.bigquery.logging.v1.TableDefinition") + proto.RegisterType((*JobStatus)(nil), "google.cloud.bigquery.logging.v1.JobStatus") + proto.RegisterType((*JobStatistics)(nil), "google.cloud.bigquery.logging.v1.JobStatistics") + proto.RegisterType((*DatasetName)(nil), "google.cloud.bigquery.logging.v1.DatasetName") + proto.RegisterType((*TableName)(nil), "google.cloud.bigquery.logging.v1.TableName") + proto.RegisterType((*JobName)(nil), "google.cloud.bigquery.logging.v1.JobName") +} + +func init() { proto.RegisterFile("google/cloud/bigquery/logging/v1/audit_data.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2036 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0x8f, 0x2c, 0xc9, 0x92, 0x9e, 0xec, 0xd8, 0xee, 0xd8, 0x9b, 0x58, 0x90, 0xdd, 0x30, 0x40, + 0xb1, 0x29, 0x13, 0xa9, 0x9c, 0x65, 0x09, 0x90, 0xad, 0x4a, 0xd9, 0xb1, 0x89, 0x93, 0xfd, 0x83, + 0x19, 0x1c, 0x17, 0xbb, 0xc5, 0xae, 0x6a, 0x34, 0xd3, 0xd6, 0xb6, 0x76, 0x34, 0x3d, 0x99, 0x69, + 0xc5, 0x31, 0x17, 0x0a, 0x8a, 0x1b, 0x47, 0x3e, 0x0c, 0x07, 0x2e, 0x50, 0x7c, 0x01, 0x8e, 0x5c, + 0xb8, 0xf0, 0x41, 0x28, 0xaa, 0x5f, 0x77, 0x8f, 0x66, 0x46, 0x72, 0x79, 0xc6, 0x68, 0x0f, 0x7b, + 0x9b, 0x79, 0xdd, 0xbf, 0xf7, 0xeb, 0x7e, 0xfd, 0xfe, 0xf5, 0x0c, 0xec, 0x0e, 0x39, 0x1f, 0xfa, + 0xb4, 0xe7, 0xfa, 0x7c, 0xe2, 0xf5, 0x06, 0x6c, 0xf8, 0x6a, 0x42, 0xa3, 0x8b, 0x9e, 0xcf, 0x87, + 0x43, 0x16, 0x0c, 0x7b, 0xaf, 0x77, 0x7b, 0xce, 0xc4, 0x63, 0xa2, 0xef, 0x39, 0xc2, 0xe9, 0x86, + 0x11, 0x17, 0x9c, 0xdc, 0x53, 0x90, 0x2e, 0x42, 0xba, 0x06, 0xd2, 0xd5, 0x90, 0xee, 0xeb, 0xdd, + 0xce, 0xb7, 0xb5, 0x52, 0x27, 0x64, 0x3d, 0x27, 0x08, 0xb8, 0x70, 0x04, 0xe3, 0x41, 0xac, 0xf0, + 0x9d, 0xb7, 0xf5, 0x28, 0xbe, 0x0d, 0x26, 0x67, 0x3d, 0x6f, 0x12, 0xe1, 0x04, 0x3d, 0xfe, 0x4e, + 0x7e, 0x5c, 0xb0, 0x31, 0x8d, 0x85, 0x33, 0x0e, 0xf5, 0x84, 0xdb, 0x7a, 0x42, 0x14, 0xba, 0xbd, + 0x58, 0x38, 0x62, 0xa2, 0x35, 0x5b, 0xff, 0x5e, 0x83, 0xd6, 0x9e, 0x5c, 0xee, 0x81, 0x23, 0x1c, + 0xf2, 0x25, 0x6c, 0x0a, 0x67, 0xe0, 0xd3, 0x3e, 0x0b, 0x62, 0x1a, 0x89, 0x7e, 0x44, 0x5f, 0x4d, + 0x68, 0x2c, 0xee, 0x54, 0xee, 0x55, 0xde, 0x6d, 0x3f, 0xfc, 0x51, 0xf7, 0xaa, 0x6d, 0x74, 0x4f, + 0x24, 0xfa, 0x39, 0x82, 0x6d, 0x85, 0x3d, 0xba, 0x61, 0x13, 0x31, 0x23, 0x9d, 0x32, 0x4d, 0x42, + 0xcf, 0x11, 0x34, 0x61, 0x5a, 0x2f, 0xc5, 0xf4, 0x12, 0xc1, 0x79, 0xa6, 0x8c, 0x54, 0x32, 0xc9, + 0x93, 0x88, 0xa9, 0xe8, 0xfb, 0x2c, 0x9e, 0xee, 0x69, 0xa9, 0x28, 0xd3, 0x81, 0x42, 0x7f, 0xc4, + 0xe2, 0xf4, 0x9e, 0xbc, 0x19, 0x29, 0x09, 0xe0, 0x2d, 0xc3, 0x94, 0xb3, 0x5f, 0x15, 0xb9, 0x7e, + 0x5c, 0x98, 0x2b, 0x6f, 0x41, 0xb3, 0x83, 0xac, 0x0d, 0x53, 0x7c, 0x39, 0x2b, 0xd6, 0x4a, 0xf2, + 0xe5, 0xed, 0x68, 0xf8, 0xb2, 0x96, 0x1c, 0x00, 0x19, 0xf1, 0x41, 0x7e, 0x6f, 0x75, 0xe4, 0x7a, + 0x78, 0x35, 0xd7, 0x0b, 0x3e, 0xc8, 0xef, 0x6b, 0x7d, 0x94, 0x93, 0x91, 0x3e, 0x6c, 0x48, 0x0e, + 0x04, 0x27, 0x14, 0xcb, 0x48, 0xb1, 0x5b, 0x88, 0xe2, 0x97, 0x52, 0x36, 0x65, 0x58, 0x1b, 0x65, + 0x45, 0xe4, 0x77, 0x70, 0x57, 0x12, 0x0c, 0xa9, 0x48, 0x48, 0xe2, 0x89, 0x2f, 0xe2, 0x84, 0xac, + 0x81, 0x64, 0x8f, 0x0b, 0x91, 0x3d, 0xa3, 0x42, 0x2b, 0x47, 0x1d, 0x53, 0xda, 0xed, 0xd1, 0x65, + 0x83, 0x84, 0xc3, 0x6d, 0xe5, 0xf9, 0xd2, 0xc6, 0x59, 0x97, 0x6c, 0x16, 0x3d, 0x36, 0x74, 0x7e, + 0x79, 0x76, 0x59, 0xa7, 0x54, 0x21, 0x95, 0x93, 0x93, 0xaf, 0x60, 0x2b, 0x17, 0xd4, 0x71, 0xc8, + 0x83, 0x98, 0xde, 0x69, 0x21, 0xdd, 0xfb, 0x25, 0xa3, 0x5a, 0x81, 0x8f, 0x2a, 0xf6, 0x2d, 0x31, + 0x2b, 0x9e, 0x92, 0x25, 0x1e, 0xa9, 0xc9, 0xa0, 0x14, 0x99, 0x71, 0xbc, 0x1c, 0x59, 0x56, 0x4c, + 0x5e, 0xc1, 0xed, 0x99, 0x80, 0xd3, 0x74, 0x6d, 0xa4, 0x7b, 0x54, 0x3a, 0xe2, 0x12, 0xc2, 0x2d, + 0x6f, 0xde, 0x40, 0x9a, 0x32, 0xbf, 0xc3, 0x95, 0x92, 0x94, 0x33, 0x7b, 0xdc, 0xf2, 0xe6, 0x0d, + 0x10, 0x0a, 0xb7, 0x32, 0x61, 0xa7, 0xe9, 0x08, 0xd2, 0xbd, 0x57, 0x2a, 0xee, 0x12, 0xaa, 0x8d, + 0x51, 0x5e, 0x68, 0xa2, 0x3b, 0x09, 0x0a, 0xc5, 0xb2, 0x5a, 0x22, 0xba, 0x8d, 0xb7, 0x1b, 0x92, + 0xf5, 0x51, 0x4e, 0x46, 0xfe, 0x50, 0x81, 0xb7, 0x2f, 0x8b, 0x3e, 0x4d, 0x78, 0x13, 0x09, 0x3f, + 0xb8, 0x5e, 0xf8, 0x25, 0xd4, 0x9d, 0xd1, 0xa5, 0xa3, 0x32, 0x00, 0xa7, 0x1b, 0xf5, 0x78, 0x90, + 0x3a, 0xc2, 0xb5, 0xa2, 0x01, 0x68, 0x76, 0x7b, 0xc0, 0x83, 0xf4, 0x09, 0x6e, 0x8e, 0xe6, 0xc8, + 0x89, 0xab, 0x0e, 0xd0, 0xe5, 0xe3, 0xd0, 0xa7, 0x82, 0x7a, 0x7d, 0xfa, 0x9a, 0x06, 0xe2, 0xce, + 0x46, 0x89, 0x03, 0x7c, 0x6a, 0xb0, 0x87, 0x12, 0x8a, 0xc7, 0x97, 0x15, 0xed, 0xb7, 0xa0, 0xa1, + 0xd3, 0xc8, 0x3e, 0x40, 0xd3, 0xec, 0xc8, 0xfa, 0x14, 0xc8, 0x6c, 0x4d, 0x26, 0x4f, 0x71, 0x06, + 0x9f, 0x44, 0x2e, 0xd5, 0xb5, 0xfd, 0x07, 0x05, 0x03, 0xd3, 0x4e, 0x80, 0x89, 0xea, 0x6c, 0x91, + 0x58, 0x88, 0xea, 0xcf, 0xe0, 0xd6, 0x9c, 0x9c, 0xb3, 0x58, 0xdd, 0xb9, 0x28, 0x5b, 0x88, 0xee, + 0x1e, 0x90, 0xd9, 0x6e, 0x81, 0x6c, 0x43, 0x13, 0xd3, 0xbc, 0xe3, 0xfb, 0xa8, 0xba, 0x69, 0x37, + 0xe4, 0xfb, 0x9e, 0xef, 0x5b, 0x9f, 0xc3, 0xe6, 0xbc, 0x92, 0x4f, 0x0e, 0x67, 0x56, 0x73, 0xbf, + 0x70, 0x5e, 0x49, 0xad, 0xe7, 0x0b, 0xd8, 0x9a, 0x9b, 0xdf, 0x16, 0xa5, 0x7f, 0xba, 0xfc, 0xac, + 0x13, 0x2c, 0x7c, 0xf9, 0xb9, 0xc3, 0x5a, 0x90, 0xfe, 0x97, 0xb0, 0x9e, 0x6f, 0x4a, 0xc8, 0xde, + 0x8c, 0xea, 0xef, 0x17, 0x8a, 0xd0, 0x94, 0xda, 0x53, 0xd8, 0x98, 0xc9, 0xb9, 0x8b, 0xd0, 0xfb, + 0xaf, 0x0a, 0xac, 0xe5, 0x3a, 0x1c, 0xb2, 0x09, 0x75, 0x44, 0xa1, 0xce, 0x96, 0xad, 0x5e, 0xc8, + 0x3b, 0xd0, 0x1e, 0x3b, 0x6f, 0x4c, 0x72, 0xc5, 0x56, 0x77, 0xd5, 0x86, 0xb1, 0xf3, 0x46, 0xe7, + 0x42, 0x72, 0x0a, 0x6b, 0x1e, 0x3d, 0x73, 0x26, 0xbe, 0xba, 0xa6, 0xc4, 0xd4, 0xf4, 0xa8, 0x0f, + 0x0a, 0xdb, 0xf1, 0x13, 0x67, 0x4c, 0xed, 0x9b, 0x5a, 0x8b, 0x96, 0x91, 0xbb, 0x00, 0x61, 0xc4, + 0x47, 0xd4, 0x15, 0x7d, 0xe6, 0x61, 0x1b, 0xda, 0xb2, 0x5b, 0x5a, 0xf2, 0xdc, 0x23, 0xb7, 0xa1, + 0xe1, 0xc9, 0xa4, 0x3f, 0x09, 0xb0, 0x6d, 0x6c, 0xda, 0xcb, 0x5e, 0x74, 0x61, 0x4f, 0x02, 0x2b, + 0xc4, 0x93, 0xc8, 0x16, 0x8b, 0xef, 0xc2, 0xaa, 0xe0, 0xc2, 0xf1, 0x93, 0x6d, 0xc8, 0x2d, 0xd6, + 0xec, 0x15, 0x14, 0x9a, 0x8d, 0x3c, 0x82, 0xea, 0x88, 0x0f, 0x74, 0x33, 0x5f, 0xd0, 0xa2, 0x12, + 0x61, 0x7d, 0x0a, 0xdb, 0x97, 0x36, 0x70, 0x79, 0xfb, 0x55, 0x66, 0xec, 0xf7, 0x2d, 0x68, 0xc5, + 0xc2, 0x91, 0xe5, 0x98, 0x9f, 0x23, 0x79, 0xcd, 0x6e, 0xa2, 0xc0, 0xe6, 0xe7, 0xd6, 0x6f, 0xa1, + 0x73, 0x79, 0x71, 0xfa, 0x9a, 0xb7, 0xf5, 0x0b, 0xd8, 0x9c, 0x57, 0x9b, 0x8c, 0xc2, 0x4a, 0x69, + 0x85, 0x5f, 0xa1, 0x33, 0x67, 0x8b, 0x8d, 0x3c, 0x66, 0xac, 0x61, 0xfd, 0xc0, 0x19, 0x53, 0xed, + 0x7a, 0x2d, 0x94, 0x48, 0xaf, 0xb8, 0xfe, 0xea, 0x4f, 0x60, 0x73, 0x5e, 0x6b, 0x9b, 0x35, 0x77, + 0x25, 0x6b, 0xee, 0x2b, 0x9d, 0xdd, 0xfa, 0x6b, 0x15, 0xea, 0xa8, 0x96, 0xbc, 0x00, 0x50, 0xdd, + 0x69, 0xb2, 0xee, 0xf6, 0xc3, 0x9d, 0x82, 0x69, 0x1e, 0xfd, 0xbd, 0x25, 0xcc, 0x23, 0x79, 0x02, + 0x35, 0x16, 0x9c, 0x71, 0xbd, 0xcb, 0x9d, 0xc2, 0x5d, 0xf4, 0x19, 0xb7, 0x11, 0x28, 0xd7, 0x1d, + 0xbb, 0x5f, 0xd2, 0xb1, 0xd3, 0x1f, 0xc5, 0x3c, 0xc0, 0xe6, 0xbf, 0x65, 0x83, 0x12, 0xbd, 0x88, + 0x79, 0x40, 0x9e, 0x43, 0xed, 0x35, 0xa3, 0xe7, 0xfa, 0x36, 0x57, 0xb4, 0x75, 0x3e, 0x65, 0xf4, + 0xfc, 0x80, 0x9e, 0xb1, 0x80, 0x09, 0xc6, 0x03, 0x1b, 0x55, 0x90, 0xc7, 0xd0, 0xa6, 0x6f, 0x42, + 0x16, 0xd1, 0xbe, 0x60, 0x63, 0xaa, 0xef, 0x6c, 0x1d, 0xa3, 0xd1, 0x7c, 0x36, 0xe8, 0x9e, 0x98, + 0xcf, 0x06, 0x36, 0xa8, 0xe9, 0x52, 0x20, 0xc1, 0x6e, 0x44, 0x65, 0xaf, 0x8b, 0xe0, 0xe5, 0xab, + 0xc1, 0x6a, 0x3a, 0x82, 0x9f, 0xc0, 0xaa, 0x88, 0x26, 0x81, 0x9b, 0xc0, 0x1b, 0x57, 0xc2, 0x57, + 0x0c, 0x40, 0x8a, 0x2c, 0x1b, 0x5a, 0x89, 0xe5, 0x64, 0xf0, 0x9c, 0x45, 0x8c, 0x06, 0x9e, 0x7f, + 0x91, 0xf6, 0xbd, 0x15, 0x23, 0xc4, 0x93, 0xb9, 0x07, 0x6d, 0x8f, 0xc6, 0x6e, 0xc4, 0x42, 0x69, + 0x01, 0x3c, 0xa0, 0x96, 0x9d, 0x16, 0x59, 0x3b, 0xba, 0x07, 0xc8, 0xda, 0x6a, 0x7e, 0x32, 0xb5, + 0xfe, 0x52, 0x85, 0x86, 0xc9, 0x6f, 0xc7, 0xb0, 0x62, 0xda, 0xff, 0x94, 0x0b, 0x95, 0x4c, 0x9a, + 0x6d, 0x6f, 0xfa, 0x42, 0xf6, 0x32, 0x6e, 0xf4, 0xa0, 0xc4, 0x85, 0x25, 0x71, 0xa4, 0xdc, 0xf9, + 0xd4, 0x4a, 0x9d, 0xcf, 0x63, 0x68, 0xeb, 0x8b, 0x4c, 0x51, 0xcf, 0x50, 0xd3, 0xf5, 0xe1, 0x56, + 0x1d, 0xd7, 0xd7, 0x1e, 0x51, 0x60, 0xed, 0xfb, 0x6c, 0x88, 0xa9, 0x69, 0xcf, 0xf5, 0x6d, 0x89, + 0x24, 0xbf, 0x81, 0xbb, 0xa6, 0x0e, 0xa9, 0xc0, 0xd4, 0x5e, 0x6a, 0xbe, 0x6f, 0xe9, 0x2b, 0xf1, + 0xf6, 0xcc, 0x7a, 0x0e, 0xf4, 0x04, 0xbb, 0xa3, 0xf1, 0x78, 0x9e, 0x87, 0x88, 0x36, 0x63, 0xd6, + 0x09, 0xb4, 0x53, 0xd6, 0x5a, 0x94, 0xf3, 0xfc, 0x7d, 0x09, 0xda, 0xa9, 0x8d, 0x90, 0x8f, 0xa1, + 0x41, 0x03, 0x11, 0x31, 0x2a, 0x53, 0x79, 0xb5, 0x58, 0x4b, 0x9f, 0xc2, 0x77, 0x0f, 0x03, 0x11, + 0x5d, 0xd8, 0x46, 0x47, 0xe7, 0x3f, 0x15, 0xa8, 0xa3, 0x88, 0x10, 0xa8, 0x45, 0xdc, 0x37, 0xcb, + 0xc4, 0x67, 0x99, 0x34, 0x86, 0x11, 0x9f, 0x84, 0x7d, 0x3a, 0x76, 0x98, 0xaf, 0x97, 0x07, 0x28, + 0x3a, 0x94, 0x12, 0x99, 0x9a, 0x27, 0x31, 0x8d, 0xf4, 0x78, 0x55, 0xa5, 0x66, 0x29, 0x51, 0xc3, + 0x6f, 0xc1, 0xb2, 0xc7, 0xc7, 0x0e, 0x0b, 0x74, 0x71, 0xd6, 0x6f, 0xd2, 0x36, 0x71, 0x48, 0x5d, + 0xe6, 0xf8, 0x7d, 0x54, 0x86, 0x8e, 0xd0, 0xb2, 0x57, 0xb4, 0xf0, 0x99, 0x94, 0x91, 0x23, 0x68, + 0xc9, 0x6c, 0xa2, 0x8c, 0xb7, 0x5c, 0x3e, 0x7b, 0x36, 0x25, 0x5a, 0x3e, 0x59, 0xff, 0x5c, 0x82, + 0xea, 0x0b, 0x3e, 0x20, 0x07, 0xd0, 0x94, 0x57, 0xa3, 0x54, 0x2c, 0xdd, 0x2f, 0x54, 0x2e, 0x50, + 0x5d, 0x63, 0xa4, 0x1e, 0xcc, 0x47, 0x23, 0x97, 0x07, 0x67, 0x6c, 0x68, 0x3c, 0x67, 0xa9, 0xc4, + 0xcd, 0xf5, 0x69, 0x1a, 0x89, 0xf7, 0xd6, 0x8c, 0x44, 0xd6, 0x0d, 0x49, 0xa0, 0xbe, 0x9c, 0xea, + 0x4e, 0x69, 0xa7, 0x90, 0xe6, 0x5f, 0x21, 0xc4, 0x6e, 0x8d, 0xcc, 0x23, 0x39, 0x85, 0x9b, 0x46, + 0x17, 0x8b, 0x05, 0x73, 0x63, 0x1d, 0xb0, 0xbd, 0xc2, 0xfa, 0x14, 0xcc, 0x5e, 0x1d, 0xa5, 0x5f, + 0xad, 0xbf, 0xb5, 0xb1, 0x87, 0xca, 0x2e, 0xfc, 0xd8, 0x64, 0x34, 0x15, 0xd7, 0x3f, 0x29, 0x6f, + 0x8d, 0x2e, 0xfa, 0xe9, 0xd1, 0x0d, 0xd3, 0x5a, 0x7e, 0x0c, 0x35, 0x9f, 0x3b, 0x9e, 0x3e, 0xfe, + 0x47, 0xd7, 0x50, 0xf8, 0x11, 0x77, 0xbc, 0xa3, 0x1b, 0x36, 0xaa, 0x21, 0xa7, 0xd0, 0xa0, 0x6f, + 0x44, 0xe4, 0xb8, 0xe6, 0xc3, 0xdb, 0xcf, 0xae, 0xa1, 0xf1, 0x50, 0x69, 0x38, 0xba, 0x61, 0x1b, + 0x65, 0xe4, 0x73, 0x53, 0xe9, 0x5d, 0x1e, 0x5e, 0xe8, 0x2c, 0xf2, 0xc1, 0x35, 0x54, 0xa3, 0xf3, + 0x3e, 0xe5, 0xa1, 0xb4, 0x80, 0x2a, 0xfe, 0xf2, 0x25, 0xdd, 0xc8, 0xb6, 0xd2, 0x8d, 0x6c, 0xe7, + 0x4f, 0x55, 0xa8, 0xa3, 0xc5, 0x2e, 0xe9, 0xcc, 0x7f, 0x0d, 0x1b, 0x1e, 0x8d, 0x05, 0x0b, 0x50, + 0xbd, 0x4a, 0x7a, 0x25, 0x5b, 0x08, 0xf4, 0xfd, 0xf5, 0x94, 0x16, 0xd5, 0xdb, 0x3c, 0x00, 0xa2, + 0xab, 0x80, 0xc7, 0xe2, 0x90, 0xc7, 0x58, 0xd2, 0x74, 0x02, 0xd8, 0x50, 0x23, 0x07, 0xd3, 0x01, + 0xb2, 0x03, 0x1b, 0xe7, 0x11, 0xcb, 0xcd, 0x56, 0x39, 0x61, 0x1d, 0x07, 0xd2, 0x93, 0xe7, 0x5c, + 0x17, 0xea, 0x8b, 0xb8, 0x2e, 0x7c, 0x01, 0x1b, 0xfa, 0x5b, 0x68, 0x52, 0x84, 0xe3, 0x3b, 0xcb, + 0x98, 0x44, 0x77, 0x8b, 0x7e, 0x05, 0x9d, 0xb6, 0x3a, 0xeb, 0x22, 0x2b, 0x88, 0x3b, 0xff, 0xad, + 0x40, 0x4d, 0xba, 0x1b, 0xf6, 0x5a, 0x78, 0x89, 0xea, 0x4f, 0x22, 0xa6, 0xf2, 0xb4, 0xec, 0xb5, + 0x50, 0xf4, 0x32, 0x62, 0x71, 0xbe, 0x19, 0x5b, 0x9e, 0x69, 0xc6, 0xe6, 0x1e, 0x5c, 0xf5, 0xeb, + 0x3b, 0xb8, 0x5a, 0xa9, 0x83, 0xab, 0xcf, 0x3f, 0xb8, 0xce, 0x1f, 0x2b, 0xd0, 0xd0, 0xd1, 0x41, + 0xee, 0x43, 0x9a, 0x3b, 0x6d, 0x88, 0xb5, 0x94, 0x1c, 0xad, 0xf1, 0x09, 0xac, 0x68, 0x73, 0x5d, + 0xdb, 0x41, 0xb5, 0xbd, 0x51, 0xd0, 0xf9, 0xf3, 0x92, 0x6e, 0xe2, 0x30, 0x78, 0x8e, 0x61, 0x35, + 0xad, 0xdd, 0x94, 0xcd, 0x52, 0xea, 0x57, 0x52, 0xea, 0xe3, 0x6f, 0x66, 0x54, 0xed, 0xaf, 0xc1, + 0x6a, 0xa6, 0x64, 0x59, 0x3f, 0x87, 0xb5, 0x9c, 0x4f, 0xcb, 0x1e, 0x20, 0xd5, 0xaa, 0xe0, 0x73, + 0xde, 0x99, 0x97, 0xf2, 0xce, 0x6c, 0x7d, 0x08, 0xad, 0xa4, 0xf4, 0xc8, 0x3c, 0x24, 0x6b, 0x8d, + 0x51, 0xa1, 0x5e, 0xc8, 0xbb, 0x50, 0xa7, 0x51, 0xc4, 0x23, 0x6d, 0x25, 0x62, 0xac, 0x14, 0x85, + 0x6e, 0x57, 0xd7, 0x2c, 0x35, 0xc1, 0xfa, 0xc7, 0x12, 0xac, 0x66, 0x0a, 0x4f, 0xbe, 0xdf, 0xac, + 0x94, 0xea, 0x37, 0x7f, 0x0a, 0xa0, 0xae, 0x72, 0x88, 0x5d, 0xba, 0x12, 0xab, 0x2e, 0x7e, 0x08, + 0x7d, 0x1f, 0x9a, 0x34, 0xf0, 0x14, 0xb0, 0x7a, 0x25, 0xb0, 0x41, 0x03, 0x0f, 0x61, 0x0f, 0x61, + 0x4b, 0x5d, 0xb8, 0xc3, 0x88, 0xbb, 0x34, 0x8e, 0xa9, 0xd7, 0x1f, 0x5c, 0x08, 0xaa, 0xea, 0x6e, + 0xd5, 0xbe, 0x85, 0x83, 0xc7, 0x66, 0x6c, 0x5f, 0x0e, 0x91, 0x1f, 0x02, 0x51, 0x98, 0x01, 0xf3, + 0xfd, 0x04, 0x50, 0x47, 0xc0, 0x3a, 0x8e, 0xec, 0xe3, 0x80, 0x9a, 0xfd, 0x1d, 0x58, 0x91, 0xf3, + 0x58, 0x30, 0xec, 0x0b, 0x46, 0x23, 0xac, 0x64, 0x75, 0xbb, 0xad, 0x65, 0x27, 0x8c, 0x46, 0xd6, + 0x87, 0x49, 0x2b, 0x8a, 0x1d, 0x4b, 0xf6, 0x3b, 0x49, 0x25, 0xff, 0x9d, 0xe4, 0x2e, 0x40, 0xf2, + 0x63, 0xc3, 0xd3, 0x4d, 0x5e, 0xcb, 0xfc, 0x90, 0xf0, 0x2c, 0x4f, 0x47, 0xd3, 0xff, 0xaf, 0x8a, + 0x6c, 0x43, 0x53, 0xff, 0x1c, 0xf2, 0xb4, 0x57, 0x37, 0xd4, 0x6f, 0x1d, 0xcf, 0x7a, 0x02, 0x0d, + 0xdd, 0x69, 0x5d, 0xc5, 0xb1, 0x05, 0xcb, 0xf8, 0x87, 0xc2, 0xe8, 0xaf, 0x8f, 0xf8, 0xe0, 0xb9, + 0xb7, 0xff, 0xfb, 0x0a, 0x7c, 0xcf, 0xe5, 0xe3, 0x2b, 0x03, 0x70, 0xff, 0x66, 0xf2, 0x07, 0xfa, + 0x58, 0x9e, 0xe3, 0x71, 0xe5, 0xb3, 0x67, 0x1a, 0x33, 0xe4, 0xbe, 0x13, 0x0c, 0xbb, 0x3c, 0x1a, + 0xf6, 0x86, 0x34, 0xc0, 0x53, 0xee, 0xa9, 0x21, 0x27, 0x64, 0xf1, 0xe5, 0xbf, 0xe0, 0x1f, 0xeb, + 0xc7, 0xc1, 0x32, 0x62, 0xde, 0xfb, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x76, 0x60, 0x19, 0x87, + 0xb5, 0x1f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/billing/v1/cloud_billing.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/billing/v1/cloud_billing.pb.go new file mode 100644 index 000000000..238193b8c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/billing/v1/cloud_billing.pb.go @@ -0,0 +1,688 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/billing/v1/cloud_billing.proto + +/* +Package billing is a generated protocol buffer package. + +It is generated from these files: + google/cloud/billing/v1/cloud_billing.proto + +It has these top-level messages: + BillingAccount + ProjectBillingInfo + GetBillingAccountRequest + ListBillingAccountsRequest + ListBillingAccountsResponse + ListProjectBillingInfoRequest + ListProjectBillingInfoResponse + GetProjectBillingInfoRequest + UpdateProjectBillingInfoRequest +*/ +package billing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A billing account in [Google Cloud +// Console](https://console.cloud.google.com/). You can assign a billing account +// to one or more projects. +type BillingAccount struct { + // The resource name of the billing account. The resource name has the form + // `billingAccounts/{billing_account_id}`. For example, + // `billingAccounts/012345-567890-ABCDEF` would be the resource name for + // billing account `012345-567890-ABCDEF`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // True if the billing account is open, and will therefore be charged for any + // usage on associated projects. False if the billing account is closed, and + // therefore projects associated with it will be unable to use paid services. + Open bool `protobuf:"varint,2,opt,name=open" json:"open,omitempty"` + // The display name given to the billing account, such as `My Billing + // Account`. This name is displayed in the Google Cloud Console. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *BillingAccount) Reset() { *m = BillingAccount{} } +func (m *BillingAccount) String() string { return proto.CompactTextString(m) } +func (*BillingAccount) ProtoMessage() {} +func (*BillingAccount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *BillingAccount) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BillingAccount) GetOpen() bool { + if m != nil { + return m.Open + } + return false +} + +func (m *BillingAccount) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// Encapsulation of billing information for a Cloud Console project. A project +// has at most one associated billing account at a time (but a billing account +// can be assigned to multiple projects). +type ProjectBillingInfo struct { + // The resource name for the `ProjectBillingInfo`; has the form + // `projects/{project_id}/billingInfo`. For example, the resource name for the + // billing information for project `tokyo-rain-123` would be + // `projects/tokyo-rain-123/billingInfo`. This field is read-only. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The ID of the project that this `ProjectBillingInfo` represents, such as + // `tokyo-rain-123`. This is a convenience field so that you don't need to + // parse the `name` field to obtain a project ID. This field is read-only. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The resource name of the billing account associated with the project, if + // any. For example, `billingAccounts/012345-567890-ABCDEF`. + BillingAccountName string `protobuf:"bytes,3,opt,name=billing_account_name,json=billingAccountName" json:"billing_account_name,omitempty"` + // True if the project is associated with an open billing account, to which + // usage on the project is charged. False if the project is associated with a + // closed billing account, or no billing account at all, and therefore cannot + // use paid services. This field is read-only. + BillingEnabled bool `protobuf:"varint,4,opt,name=billing_enabled,json=billingEnabled" json:"billing_enabled,omitempty"` +} + +func (m *ProjectBillingInfo) Reset() { *m = ProjectBillingInfo{} } +func (m *ProjectBillingInfo) String() string { return proto.CompactTextString(m) } +func (*ProjectBillingInfo) ProtoMessage() {} +func (*ProjectBillingInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ProjectBillingInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ProjectBillingInfo) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ProjectBillingInfo) GetBillingAccountName() string { + if m != nil { + return m.BillingAccountName + } + return "" +} + +func (m *ProjectBillingInfo) GetBillingEnabled() bool { + if m != nil { + return m.BillingEnabled + } + return false +} + +// Request message for `GetBillingAccount`. +type GetBillingAccountRequest struct { + // The resource name of the billing account to retrieve. For example, + // `billingAccounts/012345-567890-ABCDEF`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetBillingAccountRequest) Reset() { *m = GetBillingAccountRequest{} } +func (m *GetBillingAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetBillingAccountRequest) ProtoMessage() {} +func (*GetBillingAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetBillingAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `ListBillingAccounts`. +type ListBillingAccountsRequest struct { + // Requested page size. The maximum page size is 100; this is also the + // default. + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying a page of results to return. This should be a + // `next_page_token` value returned from a previous `ListBillingAccounts` + // call. If unspecified, the first page of results is returned. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListBillingAccountsRequest) Reset() { *m = ListBillingAccountsRequest{} } +func (m *ListBillingAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBillingAccountsRequest) ProtoMessage() {} +func (*ListBillingAccountsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListBillingAccountsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBillingAccountsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for `ListBillingAccounts`. +type ListBillingAccountsResponse struct { + // A list of billing accounts. + BillingAccounts []*BillingAccount `protobuf:"bytes,1,rep,name=billing_accounts,json=billingAccounts" json:"billing_accounts,omitempty"` + // A token to retrieve the next page of results. To retrieve the next page, + // call `ListBillingAccounts` again with the `page_token` field set to this + // value. This field is empty if there are no more results to retrieve. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListBillingAccountsResponse) Reset() { *m = ListBillingAccountsResponse{} } +func (m *ListBillingAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBillingAccountsResponse) ProtoMessage() {} +func (*ListBillingAccountsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListBillingAccountsResponse) GetBillingAccounts() []*BillingAccount { + if m != nil { + return m.BillingAccounts + } + return nil +} + +func (m *ListBillingAccountsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `ListProjectBillingInfo`. +type ListProjectBillingInfoRequest struct { + // The resource name of the billing account associated with the projects that + // you want to list. For example, `billingAccounts/012345-567890-ABCDEF`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Requested page size. The maximum page size is 100; this is also the + // default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying a page of results to be returned. This should be a + // `next_page_token` value returned from a previous `ListProjectBillingInfo` + // call. If unspecified, the first page of results is returned. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListProjectBillingInfoRequest) Reset() { *m = ListProjectBillingInfoRequest{} } +func (m *ListProjectBillingInfoRequest) String() string { return proto.CompactTextString(m) } +func (*ListProjectBillingInfoRequest) ProtoMessage() {} +func (*ListProjectBillingInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListProjectBillingInfoRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListProjectBillingInfoRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListProjectBillingInfoRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Request message for `ListProjectBillingInfoResponse`. +type ListProjectBillingInfoResponse struct { + // A list of `ProjectBillingInfo` resources representing the projects + // associated with the billing account. + ProjectBillingInfo []*ProjectBillingInfo `protobuf:"bytes,1,rep,name=project_billing_info,json=projectBillingInfo" json:"project_billing_info,omitempty"` + // A token to retrieve the next page of results. To retrieve the next page, + // call `ListProjectBillingInfo` again with the `page_token` field set to this + // value. This field is empty if there are no more results to retrieve. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListProjectBillingInfoResponse) Reset() { *m = ListProjectBillingInfoResponse{} } +func (m *ListProjectBillingInfoResponse) String() string { return proto.CompactTextString(m) } +func (*ListProjectBillingInfoResponse) ProtoMessage() {} +func (*ListProjectBillingInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ListProjectBillingInfoResponse) GetProjectBillingInfo() []*ProjectBillingInfo { + if m != nil { + return m.ProjectBillingInfo + } + return nil +} + +func (m *ListProjectBillingInfoResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for `GetProjectBillingInfo`. +type GetProjectBillingInfoRequest struct { + // The resource name of the project for which billing information is + // retrieved. For example, `projects/tokyo-rain-123`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetProjectBillingInfoRequest) Reset() { *m = GetProjectBillingInfoRequest{} } +func (m *GetProjectBillingInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetProjectBillingInfoRequest) ProtoMessage() {} +func (*GetProjectBillingInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *GetProjectBillingInfoRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `UpdateProjectBillingInfo`. +type UpdateProjectBillingInfoRequest struct { + // The resource name of the project associated with the billing information + // that you want to update. For example, `projects/tokyo-rain-123`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The new billing information for the project. Read-only fields are ignored; + // thus, you may leave empty all fields except `billing_account_name`. + ProjectBillingInfo *ProjectBillingInfo `protobuf:"bytes,2,opt,name=project_billing_info,json=projectBillingInfo" json:"project_billing_info,omitempty"` +} + +func (m *UpdateProjectBillingInfoRequest) Reset() { *m = UpdateProjectBillingInfoRequest{} } +func (m *UpdateProjectBillingInfoRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProjectBillingInfoRequest) ProtoMessage() {} +func (*UpdateProjectBillingInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *UpdateProjectBillingInfoRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateProjectBillingInfoRequest) GetProjectBillingInfo() *ProjectBillingInfo { + if m != nil { + return m.ProjectBillingInfo + } + return nil +} + +func init() { + proto.RegisterType((*BillingAccount)(nil), "google.cloud.billing.v1.BillingAccount") + proto.RegisterType((*ProjectBillingInfo)(nil), "google.cloud.billing.v1.ProjectBillingInfo") + proto.RegisterType((*GetBillingAccountRequest)(nil), "google.cloud.billing.v1.GetBillingAccountRequest") + proto.RegisterType((*ListBillingAccountsRequest)(nil), "google.cloud.billing.v1.ListBillingAccountsRequest") + proto.RegisterType((*ListBillingAccountsResponse)(nil), "google.cloud.billing.v1.ListBillingAccountsResponse") + proto.RegisterType((*ListProjectBillingInfoRequest)(nil), "google.cloud.billing.v1.ListProjectBillingInfoRequest") + proto.RegisterType((*ListProjectBillingInfoResponse)(nil), "google.cloud.billing.v1.ListProjectBillingInfoResponse") + proto.RegisterType((*GetProjectBillingInfoRequest)(nil), "google.cloud.billing.v1.GetProjectBillingInfoRequest") + proto.RegisterType((*UpdateProjectBillingInfoRequest)(nil), "google.cloud.billing.v1.UpdateProjectBillingInfoRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CloudBilling service + +type CloudBillingClient interface { + // Gets information about a billing account. The current authenticated user + // must be an [owner of the billing + // account](https://support.google.com/cloud/answer/4430947). + GetBillingAccount(ctx context.Context, in *GetBillingAccountRequest, opts ...grpc.CallOption) (*BillingAccount, error) + // Lists the billing accounts that the current authenticated user + // [owns](https://support.google.com/cloud/answer/4430947). + ListBillingAccounts(ctx context.Context, in *ListBillingAccountsRequest, opts ...grpc.CallOption) (*ListBillingAccountsResponse, error) + // Lists the projects associated with a billing account. The current + // authenticated user must be an [owner of the billing + // account](https://support.google.com/cloud/answer/4430947). + ListProjectBillingInfo(ctx context.Context, in *ListProjectBillingInfoRequest, opts ...grpc.CallOption) (*ListProjectBillingInfoResponse, error) + // Gets the billing information for a project. The current authenticated user + // must have [permission to view the + // project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo + // ). + GetProjectBillingInfo(ctx context.Context, in *GetProjectBillingInfoRequest, opts ...grpc.CallOption) (*ProjectBillingInfo, error) + // Sets or updates the billing account associated with a project. You specify + // the new billing account by setting the `billing_account_name` in the + // `ProjectBillingInfo` resource to the resource name of a billing account. + // Associating a project with an open billing account enables billing on the + // project and allows charges for resource usage. If the project already had a + // billing account, this method changes the billing account used for resource + // usage charges. + // + // *Note:* Incurred charges that have not yet been reported in the transaction + // history of the Google Cloud Console may be billed to the new billing + // account, even if the charge occurred before the new billing account was + // assigned to the project. + // + // The current authenticated user must have ownership privileges for both the + // [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo + // ) and the [billing + // account](https://support.google.com/cloud/answer/4430947). + // + // You can disable billing on the project by setting the + // `billing_account_name` field to empty. This action disassociates the + // current billing account from the project. Any billable activity of your + // in-use services will stop, and your application could stop functioning as + // expected. Any unbilled charges to date will be billed to the previously + // associated account. The current authenticated user must be either an owner + // of the project or an owner of the billing account for the project. + // + // Note that associating a project with a *closed* billing account will have + // much the same effect as disabling billing on the project: any paid + // resources used by the project will be shut down. Thus, unless you wish to + // disable billing, you should always call this method with the name of an + // *open* billing account. + UpdateProjectBillingInfo(ctx context.Context, in *UpdateProjectBillingInfoRequest, opts ...grpc.CallOption) (*ProjectBillingInfo, error) +} + +type cloudBillingClient struct { + cc *grpc.ClientConn +} + +func NewCloudBillingClient(cc *grpc.ClientConn) CloudBillingClient { + return &cloudBillingClient{cc} +} + +func (c *cloudBillingClient) GetBillingAccount(ctx context.Context, in *GetBillingAccountRequest, opts ...grpc.CallOption) (*BillingAccount, error) { + out := new(BillingAccount) + err := grpc.Invoke(ctx, "/google.cloud.billing.v1.CloudBilling/GetBillingAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBillingClient) ListBillingAccounts(ctx context.Context, in *ListBillingAccountsRequest, opts ...grpc.CallOption) (*ListBillingAccountsResponse, error) { + out := new(ListBillingAccountsResponse) + err := grpc.Invoke(ctx, "/google.cloud.billing.v1.CloudBilling/ListBillingAccounts", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBillingClient) ListProjectBillingInfo(ctx context.Context, in *ListProjectBillingInfoRequest, opts ...grpc.CallOption) (*ListProjectBillingInfoResponse, error) { + out := new(ListProjectBillingInfoResponse) + err := grpc.Invoke(ctx, "/google.cloud.billing.v1.CloudBilling/ListProjectBillingInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBillingClient) GetProjectBillingInfo(ctx context.Context, in *GetProjectBillingInfoRequest, opts ...grpc.CallOption) (*ProjectBillingInfo, error) { + out := new(ProjectBillingInfo) + err := grpc.Invoke(ctx, "/google.cloud.billing.v1.CloudBilling/GetProjectBillingInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBillingClient) UpdateProjectBillingInfo(ctx context.Context, in *UpdateProjectBillingInfoRequest, opts ...grpc.CallOption) (*ProjectBillingInfo, error) { + out := new(ProjectBillingInfo) + err := grpc.Invoke(ctx, "/google.cloud.billing.v1.CloudBilling/UpdateProjectBillingInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CloudBilling service + +type CloudBillingServer interface { + // Gets information about a billing account. The current authenticated user + // must be an [owner of the billing + // account](https://support.google.com/cloud/answer/4430947). + GetBillingAccount(context.Context, *GetBillingAccountRequest) (*BillingAccount, error) + // Lists the billing accounts that the current authenticated user + // [owns](https://support.google.com/cloud/answer/4430947). + ListBillingAccounts(context.Context, *ListBillingAccountsRequest) (*ListBillingAccountsResponse, error) + // Lists the projects associated with a billing account. The current + // authenticated user must be an [owner of the billing + // account](https://support.google.com/cloud/answer/4430947). + ListProjectBillingInfo(context.Context, *ListProjectBillingInfoRequest) (*ListProjectBillingInfoResponse, error) + // Gets the billing information for a project. The current authenticated user + // must have [permission to view the + // project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo + // ). + GetProjectBillingInfo(context.Context, *GetProjectBillingInfoRequest) (*ProjectBillingInfo, error) + // Sets or updates the billing account associated with a project. You specify + // the new billing account by setting the `billing_account_name` in the + // `ProjectBillingInfo` resource to the resource name of a billing account. + // Associating a project with an open billing account enables billing on the + // project and allows charges for resource usage. If the project already had a + // billing account, this method changes the billing account used for resource + // usage charges. + // + // *Note:* Incurred charges that have not yet been reported in the transaction + // history of the Google Cloud Console may be billed to the new billing + // account, even if the charge occurred before the new billing account was + // assigned to the project. + // + // The current authenticated user must have ownership privileges for both the + // [project](https://cloud.google.com/docs/permissions-overview#h.bgs0oxofvnoo + // ) and the [billing + // account](https://support.google.com/cloud/answer/4430947). + // + // You can disable billing on the project by setting the + // `billing_account_name` field to empty. This action disassociates the + // current billing account from the project. Any billable activity of your + // in-use services will stop, and your application could stop functioning as + // expected. Any unbilled charges to date will be billed to the previously + // associated account. The current authenticated user must be either an owner + // of the project or an owner of the billing account for the project. + // + // Note that associating a project with a *closed* billing account will have + // much the same effect as disabling billing on the project: any paid + // resources used by the project will be shut down. Thus, unless you wish to + // disable billing, you should always call this method with the name of an + // *open* billing account. + UpdateProjectBillingInfo(context.Context, *UpdateProjectBillingInfoRequest) (*ProjectBillingInfo, error) +} + +func RegisterCloudBillingServer(s *grpc.Server, srv CloudBillingServer) { + s.RegisterService(&_CloudBilling_serviceDesc, srv) +} + +func _CloudBilling_GetBillingAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBillingAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBillingServer).GetBillingAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.billing.v1.CloudBilling/GetBillingAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBillingServer).GetBillingAccount(ctx, req.(*GetBillingAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBilling_ListBillingAccounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBillingAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBillingServer).ListBillingAccounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.billing.v1.CloudBilling/ListBillingAccounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBillingServer).ListBillingAccounts(ctx, req.(*ListBillingAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBilling_ListProjectBillingInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListProjectBillingInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBillingServer).ListProjectBillingInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.billing.v1.CloudBilling/ListProjectBillingInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBillingServer).ListProjectBillingInfo(ctx, req.(*ListProjectBillingInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBilling_GetProjectBillingInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProjectBillingInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBillingServer).GetProjectBillingInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.billing.v1.CloudBilling/GetProjectBillingInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBillingServer).GetProjectBillingInfo(ctx, req.(*GetProjectBillingInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBilling_UpdateProjectBillingInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProjectBillingInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBillingServer).UpdateProjectBillingInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.billing.v1.CloudBilling/UpdateProjectBillingInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBillingServer).UpdateProjectBillingInfo(ctx, req.(*UpdateProjectBillingInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CloudBilling_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.billing.v1.CloudBilling", + HandlerType: (*CloudBillingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetBillingAccount", + Handler: _CloudBilling_GetBillingAccount_Handler, + }, + { + MethodName: "ListBillingAccounts", + Handler: _CloudBilling_ListBillingAccounts_Handler, + }, + { + MethodName: "ListProjectBillingInfo", + Handler: _CloudBilling_ListProjectBillingInfo_Handler, + }, + { + MethodName: "GetProjectBillingInfo", + Handler: _CloudBilling_GetProjectBillingInfo_Handler, + }, + { + MethodName: "UpdateProjectBillingInfo", + Handler: _CloudBilling_UpdateProjectBillingInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/billing/v1/cloud_billing.proto", +} + +func init() { proto.RegisterFile("google/cloud/billing/v1/cloud_billing.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 667 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x4e, 0xd4, 0x40, + 0x14, 0xce, 0x00, 0x12, 0xf6, 0x80, 0x20, 0x03, 0xe8, 0x66, 0x17, 0x10, 0xea, 0x0f, 0x28, 0xb1, + 0x15, 0xf0, 0xdf, 0xa8, 0x11, 0x63, 0x08, 0x89, 0x31, 0x9b, 0xaa, 0x89, 0xd1, 0x98, 0x66, 0x76, + 0x3b, 0x34, 0xd5, 0x32, 0x53, 0x77, 0x0a, 0x51, 0x8c, 0x37, 0xbe, 0x82, 0x7a, 0xe1, 0x85, 0x37, + 0x5e, 0xe8, 0x2b, 0x78, 0xed, 0x2b, 0xf8, 0x0a, 0xde, 0xfb, 0x0a, 0x66, 0xa6, 0x53, 0x65, 0xbb, + 0x9d, 0x85, 0xc6, 0xbb, 0xe6, 0x9b, 0x73, 0xfa, 0x7d, 0xe7, 0x9b, 0xef, 0x6c, 0x17, 0x96, 0x02, + 0xce, 0x83, 0x88, 0x3a, 0xad, 0x88, 0x6f, 0xfb, 0x4e, 0x33, 0x8c, 0xa2, 0x90, 0x05, 0xce, 0xce, + 0x72, 0x0a, 0x78, 0x1a, 0xb0, 0xe3, 0x36, 0x4f, 0x38, 0x3e, 0x96, 0x16, 0xdb, 0xea, 0xcc, 0xce, + 0xce, 0x76, 0x96, 0x6b, 0xd3, 0xfa, 0x2d, 0x24, 0x0e, 0x1d, 0xc2, 0x18, 0x4f, 0x48, 0x12, 0x72, + 0x26, 0xd2, 0x36, 0xeb, 0x29, 0x8c, 0xae, 0xa5, 0xb5, 0xb7, 0x5b, 0x2d, 0xbe, 0xcd, 0x12, 0x8c, + 0x61, 0x80, 0x91, 0x2d, 0x5a, 0x45, 0x73, 0x68, 0xb1, 0xe2, 0xaa, 0x67, 0x89, 0xf1, 0x98, 0xb2, + 0x6a, 0xdf, 0x1c, 0x5a, 0x1c, 0x72, 0xd5, 0x33, 0x9e, 0x87, 0x11, 0x3f, 0x14, 0x71, 0x44, 0x5e, + 0x7b, 0xaa, 0xbe, 0x5f, 0xd5, 0x0f, 0x6b, 0xec, 0x3e, 0xd9, 0xa2, 0xd6, 0x17, 0x04, 0xb8, 0xd1, + 0xe6, 0xcf, 0x69, 0x2b, 0xd1, 0x24, 0x1b, 0x6c, 0x93, 0x17, 0x32, 0xcc, 0x00, 0xc4, 0x69, 0xa5, + 0x17, 0xfa, 0x8a, 0xa7, 0xe2, 0x56, 0x34, 0xb2, 0xe1, 0xe3, 0xf3, 0x30, 0xa9, 0x47, 0xf2, 0x48, + 0xaa, 0x73, 0x2f, 0x29, 0x6e, 0x76, 0x8c, 0x20, 0xb9, 0xf1, 0x02, 0x8c, 0x65, 0x1d, 0x94, 0x91, + 0x66, 0x44, 0xfd, 0xea, 0x80, 0x52, 0x3f, 0xaa, 0xe1, 0xbb, 0x29, 0x6a, 0xd9, 0x50, 0x5d, 0xa7, + 0x49, 0xa7, 0x09, 0x2e, 0x7d, 0xb9, 0x4d, 0x45, 0xa1, 0x17, 0xd6, 0x63, 0xa8, 0xdd, 0x0b, 0x45, + 0xae, 0x41, 0x64, 0x1d, 0x75, 0xa8, 0xc4, 0x24, 0xa0, 0x9e, 0x08, 0x77, 0xd3, 0xb6, 0x43, 0xee, + 0x90, 0x04, 0x1e, 0x84, 0xbb, 0xe9, 0x90, 0xf2, 0x30, 0xe1, 0x2f, 0xb4, 0x99, 0x72, 0x48, 0x12, + 0xd0, 0x87, 0x12, 0xb0, 0x3e, 0x21, 0xa8, 0x17, 0xbe, 0x5a, 0xc4, 0x9c, 0x09, 0x8a, 0x5d, 0x38, + 0x92, 0x33, 0x41, 0x54, 0xd1, 0x5c, 0xff, 0xe2, 0xf0, 0xca, 0x82, 0x6d, 0xb8, 0x7d, 0x3b, 0x37, + 0xd7, 0x58, 0xa7, 0x53, 0x02, 0x9f, 0x86, 0x31, 0x46, 0x5f, 0x25, 0x5e, 0x97, 0xae, 0xc3, 0x12, + 0x6e, 0xfc, 0xd5, 0xc6, 0x61, 0x46, 0x4a, 0xeb, 0xbe, 0xcd, 0x1e, 0x56, 0x75, 0x9a, 0xd1, 0xd7, + 0xd3, 0x8c, 0xfe, 0xbc, 0x19, 0xdf, 0x10, 0xcc, 0x9a, 0x18, 0xb5, 0x1f, 0xcf, 0x60, 0x32, 0xcb, + 0x4c, 0xe6, 0x4b, 0xc8, 0x36, 0xb9, 0xf6, 0x64, 0xc9, 0xe8, 0x49, 0xc1, 0x2b, 0x71, 0xdc, 0x1d, + 0xd3, 0x83, 0x5a, 0xb3, 0x02, 0xd3, 0xeb, 0xb4, 0x9c, 0x33, 0xd6, 0x07, 0x04, 0xc7, 0x1f, 0xc5, + 0x3e, 0x49, 0x68, 0x39, 0x47, 0x4d, 0x23, 0x4b, 0x61, 0xff, 0x3f, 0xf2, 0xca, 0xef, 0x41, 0x18, + 0xb9, 0x23, 0x7b, 0x35, 0x88, 0x3f, 0x22, 0x18, 0xef, 0xda, 0x0e, 0xbc, 0x6c, 0xe4, 0x31, 0x6d, + 0x52, 0xed, 0xa0, 0x09, 0xb5, 0x4e, 0xbe, 0xfb, 0xf9, 0xeb, 0x7d, 0xdf, 0x2c, 0x9e, 0x96, 0x3f, + 0x74, 0x6f, 0xe4, 0xd0, 0x37, 0x72, 0x99, 0x75, 0xce, 0xbe, 0xc5, 0x9f, 0x11, 0x4c, 0x14, 0xac, + 0x0a, 0x5e, 0x35, 0xd2, 0x98, 0x77, 0xb6, 0x76, 0xa1, 0x5c, 0x53, 0x9a, 0x3e, 0xab, 0xae, 0x84, + 0x4e, 0xe1, 0x09, 0x29, 0x34, 0xbf, 0x56, 0xdf, 0x11, 0x1c, 0x2d, 0x4e, 0x2f, 0xbe, 0xd4, 0x93, + 0xcd, 0x18, 0x87, 0xda, 0xe5, 0xd2, 0x7d, 0x5a, 0xe8, 0x39, 0x25, 0x74, 0x01, 0x9f, 0xea, 0xe5, + 0xa8, 0xa3, 0xd3, 0x20, 0xf0, 0x57, 0x04, 0x53, 0x85, 0x79, 0xc6, 0x17, 0x7b, 0x5d, 0xbb, 0x59, + 0x78, 0x99, 0x54, 0x5a, 0x67, 0x94, 0xd8, 0x13, 0x78, 0xfe, 0x9f, 0xd8, 0x4c, 0x99, 0x54, 0xd9, + 0xdc, 0x23, 0xe7, 0x07, 0x82, 0xaa, 0x69, 0x87, 0xf0, 0x15, 0x23, 0xe9, 0x3e, 0x6b, 0x57, 0x4e, + 0xee, 0x2d, 0x25, 0xf7, 0x6a, 0x6d, 0x7f, 0xb9, 0xd7, 0x0a, 0x17, 0x77, 0xad, 0x0d, 0xf5, 0x16, + 0xdf, 0x32, 0x51, 0xae, 0x8d, 0xef, 0xdd, 0xc6, 0x86, 0xfc, 0x62, 0x37, 0xd0, 0x93, 0x9b, 0xba, + 0x3a, 0xe0, 0x11, 0x61, 0x81, 0xcd, 0xdb, 0x81, 0x13, 0x50, 0xa6, 0xbe, 0xe7, 0x4e, 0x7a, 0x44, + 0xe2, 0x50, 0x74, 0xfd, 0x6d, 0xb8, 0xae, 0x1f, 0x9b, 0x83, 0xaa, 0x74, 0xf5, 0x4f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xc0, 0x01, 0x24, 0x32, 0x60, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go new file mode 100644 index 000000000..a8f3d5899 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go @@ -0,0 +1,1332 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1/clusters.proto + +/* +Package dataproc is a generated protocol buffer package. + +It is generated from these files: + google/cloud/dataproc/v1/clusters.proto + google/cloud/dataproc/v1/jobs.proto + google/cloud/dataproc/v1/operations.proto + +It has these top-level messages: + Cluster + ClusterConfig + GceClusterConfig + InstanceGroupConfig + ManagedGroupConfig + DiskConfig + NodeInitializationAction + ClusterStatus + SoftwareConfig + CreateClusterRequest + UpdateClusterRequest + DeleteClusterRequest + GetClusterRequest + ListClustersRequest + ListClustersResponse + DiagnoseClusterRequest + DiagnoseClusterResults + LoggingConfig + HadoopJob + SparkJob + PySparkJob + QueryList + HiveJob + SparkSqlJob + PigJob + JobPlacement + JobStatus + JobReference + Job + SubmitJobRequest + GetJobRequest + ListJobsRequest + ListJobsResponse + CancelJobRequest + DeleteJobRequest + ClusterOperationStatus + ClusterOperationMetadata +*/ +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf4 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The cluster state. +type ClusterStatus_State int32 + +const ( + // The cluster state is unknown. + ClusterStatus_UNKNOWN ClusterStatus_State = 0 + // The cluster is being created and set up. It is not ready for use. + ClusterStatus_CREATING ClusterStatus_State = 1 + // The cluster is currently running and healthy. It is ready for use. + ClusterStatus_RUNNING ClusterStatus_State = 2 + // The cluster encountered an error. It is not ready for use. + ClusterStatus_ERROR ClusterStatus_State = 3 + // The cluster is being deleted. It cannot be used. + ClusterStatus_DELETING ClusterStatus_State = 4 + // The cluster is being updated. It continues to accept and process jobs. + ClusterStatus_UPDATING ClusterStatus_State = 5 +) + +var ClusterStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "DELETING", + 5: "UPDATING", +} +var ClusterStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "DELETING": 4, + "UPDATING": 5, +} + +func (x ClusterStatus_State) String() string { + return proto.EnumName(ClusterStatus_State_name, int32(x)) +} +func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Describes the identifying information, config, and status of +// a cluster of Google Compute Engine instances. +type Cluster struct { + // [Required] The Google Cloud Platform project ID that the cluster belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The cluster name. Cluster names within a project must be + // unique. Names of deleted clusters can be reused. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // [Required] The cluster config. Note that Cloud Dataproc may set + // default values, and values may change when clusters are updated. + Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` + // [Output-only] Cluster status. + Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + // [Output-only] The previous cluster status. + StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // generates this value when it creates the cluster. + ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Cluster) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Cluster) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetStatus() *ClusterStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *Cluster) GetStatusHistory() []*ClusterStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *Cluster) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +// The cluster config. +type ClusterConfig struct { + // [Optional] A Google Cloud Storage staging bucket used for sharing generated + // SSH keys and config. If you do not specify a staging bucket, Cloud + // Dataproc will determine an appropriate Cloud Storage location (US, + // ASIA, or EU) for your cluster's staging bucket according to the Google + // Compute Engine zone where your cluster is deployed, and then it will create + // and manage this project-level, per-location bucket for you. + ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"` + // [Required] The shared Google Compute Engine config settings for + // all instances in a cluster. + GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"` + // [Optional] The Google Compute Engine config settings for + // the master instance in a cluster. + MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"` + // [Optional] The Google Compute Engine config settings for + // worker instances in a cluster. + WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"` + // [Optional] The Google Compute Engine config settings for + // additional worker instances in a cluster. + SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"` + // [Optional] The config settings for software inside the cluster. + SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"` + // [Optional] Commands to execute on each node after config is + // completed. By default, executables are run on master and all worker nodes. + // You can test a node's role metadata to run an executable on + // a master or worker node, as shown below using `curl` (you can also use `wget`): + // + // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions" json:"initialization_actions,omitempty"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ClusterConfig) GetConfigBucket() string { + if m != nil { + return m.ConfigBucket + } + return "" +} + +func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig { + if m != nil { + return m.GceClusterConfig + } + return nil +} + +func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig { + if m != nil { + return m.MasterConfig + } + return nil +} + +func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig { + if m != nil { + return m.WorkerConfig + } + return nil +} + +func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig { + if m != nil { + return m.SecondaryWorkerConfig + } + return nil +} + +func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig { + if m != nil { + return m.SoftwareConfig + } + return nil +} + +func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction { + if m != nil { + return m.InitializationActions + } + return nil +} + +// Common config settings for resources of Google Compute Engine cluster +// instances, applicable to all instances in the cluster. +type GceClusterConfig struct { + // [Required] The zone where the Google Compute Engine cluster will be located. + // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. + ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"` + // [Optional] The Google Compute Engine network to be used for machine + // communications. Cannot be specified with subnetwork_uri. If neither + // `network_uri` nor `subnetwork_uri` is specified, the "default" network of + // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see + // [Using Subnetworks](/compute/docs/subnetworks) for more information). + // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`. + NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"` + // [Optional] The Google Compute Engine subnetwork to be used for machine + // communications. Cannot be specified with network_uri. + // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`. + SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"` + // [Optional] If true, all instances in the cluster will only have internal IP + // addresses. By default, clusters are not restricted to internal IP addresses, + // and will have ephemeral external IP addresses assigned to each instance. + // This `internal_ip_only` restriction can only be enabled for subnetwork + // enabled networks, and all off-cluster dependencies must be configured to be + // accessible without external IP addresses. + InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"` + // [Optional] The URIs of service account scopes to be included in Google + // Compute Engine instances. The following base set of scopes is always + // included: + // + // * https://www.googleapis.com/auth/cloud.useraccounts.readonly + // * https://www.googleapis.com/auth/devstorage.read_write + // * https://www.googleapis.com/auth/logging.write + // + // If no scopes are specified, the following defaults are also provided: + // + // * https://www.googleapis.com/auth/bigquery + // * https://www.googleapis.com/auth/bigtable.admin.table + // * https://www.googleapis.com/auth/bigtable.data + // * https://www.googleapis.com/auth/devstorage.full_control + ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"` + // The Google Compute Engine tags to add to all instances (see + // [Labeling instances](/compute/docs/label-or-tag-resources#labeling_instances)). + Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` + // The Google Compute Engine metadata entries to add to all instances (see + // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GceClusterConfig) Reset() { *m = GceClusterConfig{} } +func (m *GceClusterConfig) String() string { return proto.CompactTextString(m) } +func (*GceClusterConfig) ProtoMessage() {} +func (*GceClusterConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GceClusterConfig) GetZoneUri() string { + if m != nil { + return m.ZoneUri + } + return "" +} + +func (m *GceClusterConfig) GetNetworkUri() string { + if m != nil { + return m.NetworkUri + } + return "" +} + +func (m *GceClusterConfig) GetSubnetworkUri() string { + if m != nil { + return m.SubnetworkUri + } + return "" +} + +func (m *GceClusterConfig) GetInternalIpOnly() bool { + if m != nil { + return m.InternalIpOnly + } + return false +} + +func (m *GceClusterConfig) GetServiceAccountScopes() []string { + if m != nil { + return m.ServiceAccountScopes + } + return nil +} + +func (m *GceClusterConfig) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *GceClusterConfig) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +// [Optional] The config settings for Google Compute Engine resources in +// an instance group, such as a master or worker group. +type InstanceGroupConfig struct { + // [Required] The number of VM instances in the instance group. + // For master instance groups, must be set to 1. + NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"` + // [Optional] The list of instance names. Cloud Dataproc derives the names from + // `cluster_name`, `num_instances`, and the instance group if not set by user + // (recommended practice is to let Cloud Dataproc derive the name). + InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"` + // [Output-only] The Google Compute Engine image resource used for cluster + // instances. Inferred from `SoftwareConfig.image_version`. + ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` + // [Required] The Google Compute Engine machine type used for cluster instances. + // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`. + MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"` + // [Optional] Disk option config settings. + DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"` + // [Optional] Specifies that this instance group contains preemptible instances. + IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"` + // [Output-only] The config for Google Compute Engine Instance Group + // Manager that manages this group. + // This is only used for preemptible instance groups. + ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"` +} + +func (m *InstanceGroupConfig) Reset() { *m = InstanceGroupConfig{} } +func (m *InstanceGroupConfig) String() string { return proto.CompactTextString(m) } +func (*InstanceGroupConfig) ProtoMessage() {} +func (*InstanceGroupConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *InstanceGroupConfig) GetNumInstances() int32 { + if m != nil { + return m.NumInstances + } + return 0 +} + +func (m *InstanceGroupConfig) GetInstanceNames() []string { + if m != nil { + return m.InstanceNames + } + return nil +} + +func (m *InstanceGroupConfig) GetImageUri() string { + if m != nil { + return m.ImageUri + } + return "" +} + +func (m *InstanceGroupConfig) GetMachineTypeUri() string { + if m != nil { + return m.MachineTypeUri + } + return "" +} + +func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig { + if m != nil { + return m.DiskConfig + } + return nil +} + +func (m *InstanceGroupConfig) GetIsPreemptible() bool { + if m != nil { + return m.IsPreemptible + } + return false +} + +func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig { + if m != nil { + return m.ManagedGroupConfig + } + return nil +} + +// Specifies the resources used to actively manage an instance group. +type ManagedGroupConfig struct { + // [Output-only] The name of the Instance Template used for the Managed + // Instance Group. + InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"` + // [Output-only] The name of the Instance Group Manager for this group. + InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName" json:"instance_group_manager_name,omitempty"` +} + +func (m *ManagedGroupConfig) Reset() { *m = ManagedGroupConfig{} } +func (m *ManagedGroupConfig) String() string { return proto.CompactTextString(m) } +func (*ManagedGroupConfig) ProtoMessage() {} +func (*ManagedGroupConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ManagedGroupConfig) GetInstanceTemplateName() string { + if m != nil { + return m.InstanceTemplateName + } + return "" +} + +func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string { + if m != nil { + return m.InstanceGroupManagerName + } + return "" +} + +// Specifies the config of disk options for a group of VM instances. +type DiskConfig struct { + // [Optional] Size in GB of the boot disk (default is 500GB). + BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` + // [Optional] Number of attached SSDs, from 0 to 4 (default is 0). + // If SSDs are not attached, the boot disk is used to store runtime logs and + // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. + // If one or more SSDs are attached, this runtime bulk + // data is spread across them, and the boot disk contains only basic + // config and installed binaries. + NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds" json:"num_local_ssds,omitempty"` +} + +func (m *DiskConfig) Reset() { *m = DiskConfig{} } +func (m *DiskConfig) String() string { return proto.CompactTextString(m) } +func (*DiskConfig) ProtoMessage() {} +func (*DiskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DiskConfig) GetBootDiskSizeGb() int32 { + if m != nil { + return m.BootDiskSizeGb + } + return 0 +} + +func (m *DiskConfig) GetNumLocalSsds() int32 { + if m != nil { + return m.NumLocalSsds + } + return 0 +} + +// Specifies an executable to run on a fully configured node and a +// timeout period for executable completion. +type NodeInitializationAction struct { + // [Required] Google Cloud Storage URI of executable file. + ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"` + // [Optional] Amount of time executable has to complete. Default is + // 10 minutes. Cluster creation fails with an explanatory error message (the + // name of the executable that caused the error and the exceeded timeout + // period) if the executable is not completed at end of the timeout period. + ExecutionTimeout *google_protobuf4.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout" json:"execution_timeout,omitempty"` +} + +func (m *NodeInitializationAction) Reset() { *m = NodeInitializationAction{} } +func (m *NodeInitializationAction) String() string { return proto.CompactTextString(m) } +func (*NodeInitializationAction) ProtoMessage() {} +func (*NodeInitializationAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *NodeInitializationAction) GetExecutableFile() string { + if m != nil { + return m.ExecutableFile + } + return "" +} + +func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf4.Duration { + if m != nil { + return m.ExecutionTimeout + } + return nil +} + +// The status of a cluster and its instances. +type ClusterStatus struct { + // [Output-only] The cluster's state. + State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"` + // [Output-only] Optional details of cluster's state. + Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + // [Output-only] Time when this state was entered. + StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` +} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} +func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ClusterStatus) GetState() ClusterStatus_State { + if m != nil { + return m.State + } + return ClusterStatus_UNKNOWN +} + +func (m *ClusterStatus) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *ClusterStatus) GetStateStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +// Specifies the selection and config of software inside the cluster. +type SoftwareConfig struct { + // [Optional] The version of software inside the cluster. It must match the + // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the + // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). + ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"` + // [Optional] The properties to set on daemon config files. + // + // Property keys are specified in `prefix:property` format, such as + // `core:fs.defaultFS`. The following are supported prefixes + // and their mappings: + // + // * core: `core-site.xml` + // * hdfs: `hdfs-site.xml` + // * mapred: `mapred-site.xml` + // * yarn: `yarn-site.xml` + // * hive: `hive-site.xml` + // * pig: `pig.properties` + // * spark: `spark-defaults.conf` + Properties map[string]string `protobuf:"bytes,2,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *SoftwareConfig) Reset() { *m = SoftwareConfig{} } +func (m *SoftwareConfig) String() string { return proto.CompactTextString(m) } +func (*SoftwareConfig) ProtoMessage() {} +func (*SoftwareConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *SoftwareConfig) GetImageVersion() string { + if m != nil { + return m.ImageVersion + } + return "" +} + +func (m *SoftwareConfig) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +// A request to create a cluster. +type CreateClusterRequest struct { + // [Required] The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The cluster to create. + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CreateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// A request to update a cluster. +type UpdateClusterRequest struct { + // [Required] The ID of the Google Cloud Platform project the + // cluster belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"` + // [Required] The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // [Required] The changes to the cluster. + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` + // [Required] Specifies the path, relative to Cluster, of + // the field to update. For example, to change the number of workers + // in a cluster to 5, the update_mask parameter would be + // specified as config.worker_config.num_instances, + // and the `PATCH` request body would specify the new value, as follows: + // + // { + // "config":{ + // "workerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Similarly, to change the number of preemptible workers in a cluster to 5, the + // update_mask parameter would be config.secondary_worker_config.num_instances, + // and the `PATCH` request body would be set as follows: + // + // { + // "config":{ + // "secondaryWorkerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Note: Currently, config.worker_config.num_instances + // and config.secondary_worker_config.num_instances are the only + // fields that can be updated. + UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *UpdateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *UpdateClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *UpdateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf5.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// A request to delete a cluster. +type DeleteClusterRequest struct { + // [Required] The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *DeleteClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DeleteClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +// Request to get the resource representation for a cluster in a project. +type GetClusterRequest struct { + // [Required] The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *GetClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *GetClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +// A request to list the clusters in a project. +type ListClustersRequest struct { + // [Required] The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"` + // [Optional] The standard List page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // [Optional] The standard List page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *ListClustersRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListClustersRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The list of all clusters in a project. +type ListClustersResponse struct { + // [Output-only] The clusters in the project. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // [Output-only] This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListClustersRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// A request to collect cluster diagnostic information. +type DiagnoseClusterRequest struct { + // [Required] The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *DiagnoseClusterRequest) Reset() { *m = DiagnoseClusterRequest{} } +func (m *DiagnoseClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DiagnoseClusterRequest) ProtoMessage() {} +func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *DiagnoseClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DiagnoseClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DiagnoseClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +// The location of diagnostic output. +type DiagnoseClusterResults struct { + // [Output-only] The Google Cloud Storage URI of the diagnostic output. + // The output report is a plain text file with a summary of collected + // diagnostics. + OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` +} + +func (m *DiagnoseClusterResults) Reset() { *m = DiagnoseClusterResults{} } +func (m *DiagnoseClusterResults) String() string { return proto.CompactTextString(m) } +func (*DiagnoseClusterResults) ProtoMessage() {} +func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *DiagnoseClusterResults) GetOutputUri() string { + if m != nil { + return m.OutputUri + } + return "" +} + +func init() { + proto.RegisterType((*Cluster)(nil), "google.cloud.dataproc.v1.Cluster") + proto.RegisterType((*ClusterConfig)(nil), "google.cloud.dataproc.v1.ClusterConfig") + proto.RegisterType((*GceClusterConfig)(nil), "google.cloud.dataproc.v1.GceClusterConfig") + proto.RegisterType((*InstanceGroupConfig)(nil), "google.cloud.dataproc.v1.InstanceGroupConfig") + proto.RegisterType((*ManagedGroupConfig)(nil), "google.cloud.dataproc.v1.ManagedGroupConfig") + proto.RegisterType((*DiskConfig)(nil), "google.cloud.dataproc.v1.DiskConfig") + proto.RegisterType((*NodeInitializationAction)(nil), "google.cloud.dataproc.v1.NodeInitializationAction") + proto.RegisterType((*ClusterStatus)(nil), "google.cloud.dataproc.v1.ClusterStatus") + proto.RegisterType((*SoftwareConfig)(nil), "google.cloud.dataproc.v1.SoftwareConfig") + proto.RegisterType((*CreateClusterRequest)(nil), "google.cloud.dataproc.v1.CreateClusterRequest") + proto.RegisterType((*UpdateClusterRequest)(nil), "google.cloud.dataproc.v1.UpdateClusterRequest") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.cloud.dataproc.v1.DeleteClusterRequest") + proto.RegisterType((*GetClusterRequest)(nil), "google.cloud.dataproc.v1.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.cloud.dataproc.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.cloud.dataproc.v1.ListClustersResponse") + proto.RegisterType((*DiagnoseClusterRequest)(nil), "google.cloud.dataproc.v1.DiagnoseClusterRequest") + proto.RegisterType((*DiagnoseClusterResults)(nil), "google.cloud.dataproc.v1.DiagnoseClusterResults") + proto.RegisterEnum("google.cloud.dataproc.v1.ClusterStatus_State", ClusterStatus_State_name, ClusterStatus_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ClusterController service + +type ClusterControllerClient interface { + // Creates a cluster in a project. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates a cluster in a project. + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes a cluster in a project. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets the resource representation for a cluster in a project. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Lists all regions/{region}/clusters in a project. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Gets cluster diagnostic information. + // After the operation completes, the Operation.response field + // contains `DiagnoseClusterOutputLocation`. + DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type clusterControllerClient struct { + cc *grpc.ClientConn +} + +func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient { + return &clusterControllerClient{cc} +} + +func (c *clusterControllerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ClusterController service + +type ClusterControllerServer interface { + // Creates a cluster in a project. + CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error) + // Updates a cluster in a project. + UpdateCluster(context.Context, *UpdateClusterRequest) (*google_longrunning.Operation, error) + // Deletes a cluster in a project. + DeleteCluster(context.Context, *DeleteClusterRequest) (*google_longrunning.Operation, error) + // Gets the resource representation for a cluster in a project. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Lists all regions/{region}/clusters in a project. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Gets cluster diagnostic information. + // After the operation completes, the Operation.response field + // contains `DiagnoseClusterOutputLocation`. + DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*google_longrunning.Operation, error) +} + +func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer) { + s.RegisterService(&_ClusterController_serviceDesc, srv) +} + +func _ClusterController_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_DiagnoseCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiagnoseClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).DiagnoseCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).DiagnoseCluster(ctx, req.(*DiagnoseClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.dataproc.v1.ClusterController", + HandlerType: (*ClusterControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateCluster", + Handler: _ClusterController_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _ClusterController_UpdateCluster_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _ClusterController_DeleteCluster_Handler, + }, + { + MethodName: "GetCluster", + Handler: _ClusterController_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _ClusterController_ListClusters_Handler, + }, + { + MethodName: "DiagnoseCluster", + Handler: _ClusterController_DiagnoseCluster_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/dataproc/v1/clusters.proto", +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1/clusters.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1667 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x73, 0x23, 0x47, + 0x15, 0x67, 0x64, 0xcb, 0x96, 0x9f, 0x2c, 0x59, 0xdb, 0x51, 0x8c, 0xa2, 0x4d, 0x88, 0x33, 0x09, + 0xac, 0xb3, 0x80, 0x44, 0x1c, 0x28, 0x52, 0xeb, 0x0a, 0xb0, 0x6b, 0x79, 0x8d, 0xc9, 0xae, 0xd6, + 0x8c, 0xed, 0x4d, 0x8a, 0x2a, 0x98, 0x6a, 0xcd, 0xb4, 0x27, 0x8d, 0x66, 0xa6, 0x27, 0xd3, 0x3d, + 0x4e, 0xbc, 0x5b, 0x7b, 0xe1, 0x04, 0xe1, 0xc8, 0x57, 0xe0, 0x40, 0xe5, 0x08, 0x37, 0x4e, 0x7c, + 0x02, 0x2e, 0x1c, 0xb9, 0x72, 0xe2, 0x03, 0x70, 0xe2, 0x40, 0xf5, 0x9f, 0x91, 0x34, 0xb6, 0x24, + 0x7b, 0x17, 0xd7, 0x9e, 0xd4, 0xfd, 0xde, 0xef, 0xfd, 0xe9, 0xd7, 0xef, 0xbd, 0x7e, 0x23, 0xb8, + 0x15, 0x30, 0x16, 0x84, 0xa4, 0xeb, 0x85, 0x2c, 0xf3, 0xbb, 0x3e, 0x16, 0x38, 0x49, 0x99, 0xd7, + 0x3d, 0x7d, 0xaf, 0xeb, 0x85, 0x19, 0x17, 0x24, 0xe5, 0x9d, 0x24, 0x65, 0x82, 0xa1, 0x96, 0x06, + 0x76, 0x14, 0xb0, 0x93, 0x03, 0x3b, 0xa7, 0xef, 0xb5, 0x5f, 0x37, 0x2a, 0x70, 0x42, 0xbb, 0x38, + 0x8e, 0x99, 0xc0, 0x82, 0xb2, 0xd8, 0xc8, 0xb5, 0xdf, 0x9d, 0x69, 0x80, 0x25, 0x24, 0x2d, 0x40, + 0xdf, 0x36, 0xd0, 0x90, 0xc5, 0x41, 0x9a, 0xc5, 0x31, 0x8d, 0x83, 0x8b, 0xa0, 0x6f, 0x18, 0x90, + 0xda, 0x0d, 0xb2, 0x93, 0xae, 0x9f, 0x69, 0x80, 0xe1, 0x6f, 0x9c, 0xe7, 0x9f, 0x50, 0x12, 0xfa, + 0x6e, 0x84, 0xf9, 0xd0, 0x20, 0xde, 0x3c, 0x8f, 0x10, 0x34, 0x22, 0x5c, 0xe0, 0x28, 0xd1, 0x00, + 0xfb, 0x6f, 0x25, 0x58, 0xde, 0xd1, 0xa7, 0x47, 0x6f, 0x00, 0x24, 0x29, 0xfb, 0x35, 0xf1, 0x84, + 0x4b, 0xfd, 0x96, 0xb5, 0x61, 0x6d, 0xae, 0x38, 0x2b, 0x86, 0xb2, 0xef, 0xa3, 0xb7, 0x60, 0xd5, + 0xc4, 0xc9, 0x8d, 0x71, 0x44, 0x5a, 0x25, 0x05, 0xa8, 0x1a, 0x5a, 0x1f, 0x47, 0x04, 0xfd, 0x18, + 0x96, 0x3c, 0x16, 0x9f, 0xd0, 0xa0, 0xb5, 0xb0, 0x61, 0x6d, 0x56, 0xb7, 0x6e, 0x75, 0x66, 0x45, + 0xb2, 0x63, 0x8c, 0xee, 0x28, 0xb8, 0x63, 0xc4, 0xa4, 0x02, 0x2e, 0xb0, 0xc8, 0x78, 0x6b, 0xf1, + 0x8a, 0x0a, 0x0e, 0x15, 0xdc, 0x31, 0x62, 0xa8, 0x0f, 0x75, 0xbd, 0x72, 0x3f, 0xa5, 0x5c, 0xb0, + 0xf4, 0xac, 0xb5, 0xbc, 0xb1, 0xf0, 0x3c, 0x8a, 0x6a, 0x5a, 0xfc, 0xa7, 0x5a, 0x7a, 0xf2, 0xd0, + 0x59, 0x46, 0xfd, 0xd6, 0x52, 0xe1, 0xd0, 0xc7, 0x19, 0xf5, 0xed, 0x7f, 0x2e, 0x42, 0xad, 0x70, + 0x1a, 0xf4, 0x36, 0xd4, 0xf4, 0x79, 0xdc, 0x41, 0xe6, 0x0d, 0x89, 0x30, 0xb1, 0x5c, 0xd5, 0xc4, + 0x7b, 0x8a, 0x86, 0x3e, 0x01, 0x14, 0x78, 0xc4, 0xcd, 0xb5, 0x9b, 0xb8, 0x55, 0xd4, 0xb1, 0x6f, + 0xcf, 0xf6, 0x76, 0xcf, 0x23, 0xc5, 0xd0, 0x35, 0x82, 0x73, 0x14, 0xe4, 0x40, 0x2d, 0xc2, 0x93, + 0x4a, 0x57, 0x94, 0xd2, 0xef, 0xce, 0x56, 0xba, 0x1f, 0x73, 0x81, 0x63, 0x8f, 0xec, 0xa5, 0x2c, + 0x4b, 0x8c, 0xde, 0x55, 0xad, 0x63, 0xac, 0xf3, 0x73, 0x96, 0x0e, 0xc7, 0x3a, 0xe1, 0x85, 0x74, + 0x6a, 0x1d, 0x46, 0x27, 0x81, 0xaf, 0x73, 0xe2, 0xb1, 0xd8, 0xc7, 0xe9, 0x99, 0x5b, 0xd4, 0xbe, + 0xfa, 0x22, 0xda, 0x5f, 0x1d, 0x69, 0xfb, 0x78, 0xd2, 0xcc, 0xcf, 0x61, 0x8d, 0xb3, 0x13, 0xf1, + 0x39, 0x4e, 0x49, 0xae, 0xbe, 0xa6, 0xd4, 0x6f, 0xce, 0x56, 0x7f, 0x68, 0x04, 0x8c, 0xe6, 0x3a, + 0x2f, 0xec, 0x11, 0x85, 0x75, 0x1a, 0x53, 0x41, 0x71, 0x48, 0x9f, 0xa8, 0x82, 0x74, 0xb1, 0xa7, + 0x0a, 0xb7, 0x55, 0x55, 0xd9, 0xb6, 0x35, 0x5b, 0x73, 0x9f, 0xf9, 0x64, 0xbf, 0x20, 0x7b, 0x57, + 0x89, 0x3a, 0xaf, 0xd2, 0x29, 0x54, 0x6e, 0xff, 0xb7, 0x04, 0x8d, 0xf3, 0x77, 0x8e, 0x5e, 0x83, + 0xca, 0x13, 0x16, 0x13, 0x37, 0x4b, 0xa9, 0xc9, 0xad, 0x65, 0xb9, 0x3f, 0x4e, 0x29, 0x7a, 0x13, + 0xaa, 0x31, 0x11, 0x32, 0x9a, 0x8a, 0xab, 0x8b, 0x14, 0x0c, 0x49, 0x02, 0xbe, 0x09, 0x75, 0x9e, + 0x0d, 0x26, 0x31, 0x3a, 0xa7, 0x6b, 0x63, 0xaa, 0x84, 0x6d, 0x42, 0x83, 0xc6, 0x82, 0xa4, 0x31, + 0x0e, 0x5d, 0x9a, 0xb8, 0x2c, 0x0e, 0x65, 0x29, 0x59, 0x9b, 0x15, 0xa7, 0x9e, 0xd3, 0xf7, 0x93, + 0x47, 0x71, 0x78, 0x86, 0xbe, 0x0f, 0xeb, 0x9c, 0xa4, 0xa7, 0xd4, 0x23, 0x2e, 0xf6, 0x3c, 0x96, + 0xc5, 0xc2, 0xe5, 0x1e, 0x4b, 0x08, 0x6f, 0x2d, 0x6c, 0x2c, 0x6c, 0xae, 0x38, 0x4d, 0xc3, 0xbd, + 0xab, 0x99, 0x87, 0x8a, 0x87, 0x10, 0x2c, 0x0a, 0x1c, 0xc8, 0x3a, 0x97, 0x18, 0xb5, 0x46, 0x47, + 0x50, 0x89, 0x88, 0xc0, 0x32, 0x5c, 0xad, 0xb2, 0x0a, 0xe4, 0x07, 0x57, 0x2f, 0x84, 0xce, 0x43, + 0x23, 0xba, 0x1b, 0x8b, 0xf4, 0xcc, 0x19, 0x69, 0x6a, 0x6f, 0x43, 0xad, 0xc0, 0x42, 0x0d, 0x58, + 0x18, 0x92, 0x33, 0x13, 0x38, 0xb9, 0x44, 0x4d, 0x28, 0x9f, 0xe2, 0x30, 0xcb, 0x7b, 0x9a, 0xde, + 0xdc, 0x29, 0x7d, 0x60, 0xd9, 0xff, 0x29, 0xc1, 0x2b, 0x53, 0x72, 0x4d, 0x96, 0x78, 0x9c, 0x45, + 0x2e, 0x35, 0x2c, 0xae, 0xb4, 0x95, 0x9d, 0xd5, 0x38, 0x8b, 0x72, 0x38, 0x97, 0xa1, 0xce, 0x01, + 0xaa, 0x65, 0xf2, 0x56, 0x49, 0x9d, 0xb6, 0x96, 0x53, 0x65, 0xd3, 0xe4, 0xe8, 0x26, 0xac, 0xd0, + 0x08, 0x07, 0xfa, 0x3a, 0x17, 0x94, 0x07, 0x15, 0x45, 0x30, 0xf7, 0x10, 0x61, 0xef, 0x53, 0x1a, + 0x13, 0x57, 0x9c, 0x25, 0x1a, 0xb3, 0xa8, 0x30, 0x75, 0x43, 0x3f, 0x3a, 0x4b, 0x14, 0x72, 0x17, + 0xaa, 0x3e, 0xe5, 0xc3, 0x3c, 0xc7, 0xcb, 0x2a, 0xc7, 0xdf, 0x99, 0x1d, 0xc0, 0x1e, 0xe5, 0x43, + 0x93, 0xdf, 0xe0, 0x8f, 0xd6, 0xca, 0x69, 0xee, 0x26, 0x29, 0x21, 0x51, 0x22, 0xe8, 0x20, 0x24, + 0x2a, 0x3f, 0x2a, 0x4e, 0x8d, 0xf2, 0x83, 0x31, 0x11, 0xfd, 0x0a, 0x9a, 0x11, 0x8e, 0x71, 0x40, + 0x7c, 0x37, 0x90, 0x71, 0xc9, 0xcd, 0x2e, 0x2b, 0xb3, 0xdf, 0x99, 0x6d, 0xf6, 0xa1, 0x96, 0x9a, + 0x2c, 0x5c, 0x14, 0x5d, 0xa0, 0xd9, 0xbf, 0xb3, 0x00, 0x5d, 0x84, 0xca, 0x64, 0x1b, 0x85, 0x54, + 0x90, 0x28, 0x09, 0xb1, 0xd0, 0xb1, 0x35, 0xd7, 0xd9, 0xcc, 0xb9, 0x47, 0x86, 0xa9, 0xde, 0xa5, + 0x0f, 0xe1, 0xe6, 0x48, 0x4a, 0x7b, 0xab, 0x2d, 0x16, 0x5e, 0xb2, 0x16, 0x9d, 0xbc, 0x67, 0x6d, + 0x5b, 0x3d, 0x6b, 0xf6, 0x2f, 0x01, 0xc6, 0xc1, 0x42, 0xef, 0xc2, 0x8d, 0x01, 0x63, 0xc2, 0x55, + 0xc1, 0xe6, 0xf4, 0x09, 0x71, 0x83, 0x81, 0xb9, 0xfe, 0xba, 0x64, 0x48, 0xe8, 0x21, 0x7d, 0x42, + 0xf6, 0x06, 0xe8, 0x1d, 0xa8, 0xcb, 0x2c, 0x09, 0x99, 0x87, 0x43, 0x97, 0x73, 0x9f, 0x2b, 0x53, + 0x3a, 0x4d, 0x1e, 0x48, 0xe2, 0x21, 0xf7, 0xb9, 0xfd, 0x7b, 0x0b, 0x5a, 0xb3, 0xda, 0x02, 0xba, + 0x05, 0x6b, 0xe4, 0x0b, 0xe2, 0x65, 0x02, 0x0f, 0x42, 0xe2, 0x9e, 0xd0, 0x30, 0x3f, 0x69, 0x7d, + 0x4c, 0xbe, 0x4f, 0x43, 0x82, 0xee, 0xc3, 0x0d, 0x4d, 0x91, 0xed, 0x48, 0x3e, 0xf3, 0x2c, 0x13, + 0xca, 0x5c, 0x75, 0xeb, 0xb5, 0xfc, 0x36, 0xf2, 0x31, 0xa0, 0xd3, 0x33, 0x83, 0x84, 0xd3, 0x18, + 0xc9, 0x1c, 0x69, 0x11, 0xfb, 0xcb, 0xd2, 0xe8, 0x39, 0xd3, 0x4f, 0x22, 0xda, 0x81, 0xb2, 0x7c, + 0x14, 0xb5, 0xe1, 0xfa, 0xbc, 0xae, 0x5c, 0x90, 0xeb, 0xc8, 0x1f, 0xe2, 0x68, 0x59, 0xb4, 0x0e, + 0x4b, 0x3e, 0x11, 0x98, 0x86, 0x26, 0xda, 0x66, 0x87, 0x7a, 0xd0, 0x50, 0x00, 0x97, 0x0b, 0x9c, + 0x0a, 0xe5, 0xb8, 0x19, 0x1e, 0xda, 0x17, 0xbc, 0x3e, 0xca, 0x87, 0x17, 0x47, 0x3d, 0xf2, 0xe4, + 0x50, 0x8a, 0x48, 0xa2, 0xfd, 0x18, 0xca, 0xca, 0x1a, 0xaa, 0xc2, 0xf2, 0x71, 0xff, 0xa3, 0xfe, + 0xa3, 0x8f, 0xfb, 0x8d, 0xaf, 0xa1, 0x55, 0xa8, 0xec, 0x38, 0xbb, 0x77, 0x8f, 0xf6, 0xfb, 0x7b, + 0x0d, 0x4b, 0xb2, 0x9c, 0xe3, 0x7e, 0x5f, 0x6e, 0x4a, 0x68, 0x05, 0xca, 0xbb, 0x8e, 0xf3, 0xc8, + 0x69, 0x2c, 0x48, 0x54, 0x6f, 0xf7, 0xc1, 0xae, 0x42, 0x2d, 0xca, 0xdd, 0xf1, 0x41, 0x4f, 0xcb, + 0x94, 0xed, 0xbf, 0x5b, 0x50, 0x2f, 0xbe, 0x05, 0xb2, 0xf2, 0x75, 0xb5, 0x9e, 0x92, 0x94, 0x53, + 0x16, 0xe7, 0x8f, 0xbb, 0x22, 0x3e, 0xd6, 0x34, 0xf4, 0x89, 0x1a, 0xa5, 0x12, 0x92, 0x0a, 0x6a, + 0xaa, 0x7e, 0x6e, 0x2f, 0x2b, 0x9a, 0xe8, 0x1c, 0x8c, 0x44, 0x75, 0x2f, 0x9b, 0xd0, 0xd5, 0xfe, + 0x10, 0xd6, 0xce, 0xb1, 0x9f, 0xab, 0x9f, 0x7d, 0x69, 0x41, 0x73, 0x27, 0x25, 0x58, 0xe4, 0xcd, + 0xd3, 0x21, 0x9f, 0x65, 0x84, 0x8b, 0xcb, 0x86, 0xbf, 0x75, 0x58, 0x4a, 0x49, 0x20, 0x8f, 0xab, + 0x1b, 0x94, 0xd9, 0xa1, 0x6d, 0x58, 0x36, 0x13, 0x8c, 0xc9, 0xb5, 0xb7, 0x2e, 0xcd, 0x0e, 0x27, + 0x97, 0xb0, 0xff, 0x6d, 0x41, 0xf3, 0x38, 0xf1, 0xff, 0x0f, 0x67, 0xca, 0x05, 0x67, 0xae, 0x30, + 0xa1, 0x4e, 0xf8, 0xbb, 0xf0, 0xbc, 0xfe, 0xa2, 0x6d, 0xa8, 0x66, 0xca, 0x5d, 0x35, 0x62, 0x9b, + 0x11, 0xf5, 0x62, 0x9a, 0xde, 0x97, 0x53, 0xf8, 0x43, 0xcc, 0x87, 0x0e, 0x68, 0xb8, 0x5c, 0xdb, + 0x09, 0x34, 0x7b, 0x24, 0x24, 0xd7, 0x15, 0xf8, 0xcb, 0xcf, 0x6a, 0x47, 0x70, 0x63, 0x8f, 0x88, + 0x97, 0x66, 0xee, 0xb7, 0x16, 0xbc, 0xf2, 0x80, 0xf2, 0xdc, 0x20, 0x7f, 0x6e, 0x8b, 0x8b, 0x05, + 0x8b, 0x37, 0x61, 0x25, 0x91, 0x65, 0x26, 0x3b, 0xac, 0x69, 0x9b, 0x15, 0x49, 0x90, 0xad, 0x55, + 0xe9, 0x94, 0x4c, 0xc1, 0x86, 0x24, 0x77, 0x55, 0xc1, 0x8f, 0x24, 0xc1, 0x7e, 0x06, 0xcd, 0xa2, + 0x27, 0x3c, 0x61, 0x31, 0x97, 0xef, 0x40, 0x25, 0xff, 0xd4, 0x6b, 0x59, 0xaa, 0x28, 0xaf, 0x70, + 0xfd, 0x23, 0x11, 0xf4, 0x2d, 0x58, 0x8b, 0xc9, 0x17, 0xc2, 0x9d, 0x30, 0xad, 0xe3, 0x50, 0x93, + 0xe4, 0x83, 0x91, 0xf9, 0x14, 0xd6, 0x7b, 0x14, 0x07, 0x31, 0xe3, 0x2f, 0xef, 0xb2, 0x7f, 0x38, + 0xc5, 0x26, 0xcf, 0x42, 0xc1, 0xa5, 0x4d, 0x96, 0x89, 0x24, 0x13, 0x13, 0xe3, 0xe2, 0x8a, 0xa6, + 0x1c, 0xa7, 0x74, 0xeb, 0xcf, 0x15, 0xb8, 0x31, 0x1e, 0xa4, 0x44, 0xca, 0xc2, 0x90, 0xa4, 0xe8, + 0x8f, 0x16, 0xd4, 0x0a, 0x7d, 0x02, 0x75, 0xe6, 0x44, 0x6a, 0x4a, 0x43, 0x69, 0xbf, 0x91, 0xe3, + 0x27, 0x3e, 0x71, 0x3b, 0x8f, 0xf2, 0x4f, 0x5c, 0xbb, 0xf7, 0x9b, 0x7f, 0xfc, 0xeb, 0x0f, 0xa5, + 0x1f, 0xd9, 0xef, 0xcb, 0xcf, 0x63, 0x13, 0x01, 0xde, 0x7d, 0x3a, 0x8e, 0xce, 0xb3, 0xae, 0x3e, + 0x3c, 0xef, 0x3e, 0xd5, 0x8b, 0x67, 0xa3, 0xcf, 0xf4, 0x3b, 0xa3, 0x8a, 0xfc, 0x8b, 0x05, 0xb5, + 0x42, 0x07, 0x99, 0xe7, 0xe6, 0xb4, 0x56, 0x73, 0x99, 0x9b, 0x87, 0xca, 0xcd, 0x87, 0x5b, 0xf7, + 0x5e, 0xc0, 0xcd, 0xee, 0xd3, 0xc9, 0x4b, 0x7b, 0x36, 0xf6, 0xfa, 0x2b, 0x0b, 0x6a, 0x85, 0x5e, + 0x30, 0xcf, 0xeb, 0x69, 0x4d, 0xe3, 0x32, 0xaf, 0x7f, 0xa6, 0xbc, 0xee, 0xdd, 0xbe, 0x06, 0xaf, + 0xd1, 0x9f, 0x2c, 0x80, 0x71, 0x1b, 0x41, 0xdf, 0x9e, 0x33, 0x91, 0x9f, 0x6f, 0x36, 0xed, 0xcb, + 0xab, 0x2b, 0x77, 0x15, 0x5d, 0x87, 0xab, 0x5f, 0x59, 0xb0, 0x3a, 0x59, 0xf7, 0x68, 0xce, 0xa8, + 0x32, 0xa5, 0x53, 0xb5, 0x3b, 0x57, 0x85, 0xeb, 0x76, 0x62, 0x6f, 0x2b, 0xdf, 0x7f, 0x80, 0x5e, + 0x24, 0x87, 0xd1, 0x5f, 0x2d, 0x58, 0x3b, 0x57, 0xb1, 0xe8, 0x7b, 0xf3, 0xa6, 0xf5, 0x69, 0x0d, + 0xe5, 0xb2, 0x44, 0x78, 0xac, 0x3c, 0x3c, 0xb0, 0x3f, 0xba, 0x86, 0xf4, 0xf5, 0x8d, 0x07, 0x77, + 0xac, 0xdb, 0xf7, 0x3e, 0x83, 0xd7, 0x3d, 0x16, 0xcd, 0xf4, 0xf6, 0x5e, 0x3e, 0x41, 0xf2, 0x03, + 0xf9, 0x28, 0x1e, 0x58, 0xbf, 0xf8, 0x89, 0x81, 0x06, 0x2c, 0xc4, 0x71, 0xd0, 0x61, 0x69, 0xd0, + 0x0d, 0x48, 0xac, 0x9e, 0xcc, 0xae, 0x66, 0xe1, 0x84, 0xf2, 0x8b, 0xff, 0x9c, 0x6d, 0xe7, 0xeb, + 0xc1, 0x92, 0x02, 0xbf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0xab, 0x60, 0x26, 0xc6, + 0x13, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go new file mode 100644 index 000000000..e84b749f1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go @@ -0,0 +1,2196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1/jobs.proto + +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The Log4j level for job execution. When running an +// [Apache Hive](http://hive.apache.org/) job, Cloud +// Dataproc configures the Hive client to an equivalent verbosity level. +type LoggingConfig_Level int32 + +const ( + // Level is unspecified. Use default level for log4j. + LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 + // Use ALL level for log4j. + LoggingConfig_ALL LoggingConfig_Level = 1 + // Use TRACE level for log4j. + LoggingConfig_TRACE LoggingConfig_Level = 2 + // Use DEBUG level for log4j. + LoggingConfig_DEBUG LoggingConfig_Level = 3 + // Use INFO level for log4j. + LoggingConfig_INFO LoggingConfig_Level = 4 + // Use WARN level for log4j. + LoggingConfig_WARN LoggingConfig_Level = 5 + // Use ERROR level for log4j. + LoggingConfig_ERROR LoggingConfig_Level = 6 + // Use FATAL level for log4j. + LoggingConfig_FATAL LoggingConfig_Level = 7 + // Turn off log4j. + LoggingConfig_OFF LoggingConfig_Level = 8 +) + +var LoggingConfig_Level_name = map[int32]string{ + 0: "LEVEL_UNSPECIFIED", + 1: "ALL", + 2: "TRACE", + 3: "DEBUG", + 4: "INFO", + 5: "WARN", + 6: "ERROR", + 7: "FATAL", + 8: "OFF", +} +var LoggingConfig_Level_value = map[string]int32{ + "LEVEL_UNSPECIFIED": 0, + "ALL": 1, + "TRACE": 2, + "DEBUG": 3, + "INFO": 4, + "WARN": 5, + "ERROR": 6, + "FATAL": 7, + "OFF": 8, +} + +func (x LoggingConfig_Level) String() string { + return proto.EnumName(LoggingConfig_Level_name, int32(x)) +} +func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// The job state. +type JobStatus_State int32 + +const ( + // The job state is unknown. + JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 + // The job is pending; it has been submitted, but is not yet running. + JobStatus_PENDING JobStatus_State = 1 + // Job has been received by the service and completed initial setup; + // it will soon be submitted to the cluster. + JobStatus_SETUP_DONE JobStatus_State = 8 + // The job is running on the cluster. + JobStatus_RUNNING JobStatus_State = 2 + // A CancelJob request has been received, but is pending. + JobStatus_CANCEL_PENDING JobStatus_State = 3 + // Transient in-flight resources have been canceled, and the request to + // cancel the running job has been issued to the cluster. + JobStatus_CANCEL_STARTED JobStatus_State = 7 + // The job cancellation was successful. + JobStatus_CANCELLED JobStatus_State = 4 + // The job has completed successfully. + JobStatus_DONE JobStatus_State = 5 + // The job has completed, but encountered an error. + JobStatus_ERROR JobStatus_State = 6 +) + +var JobStatus_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "PENDING", + 8: "SETUP_DONE", + 2: "RUNNING", + 3: "CANCEL_PENDING", + 7: "CANCEL_STARTED", + 4: "CANCELLED", + 5: "DONE", + 6: "ERROR", +} +var JobStatus_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "PENDING": 1, + "SETUP_DONE": 8, + "RUNNING": 2, + "CANCEL_PENDING": 3, + "CANCEL_STARTED": 7, + "CANCELLED": 4, + "DONE": 5, + "ERROR": 6, +} + +func (x JobStatus_State) String() string { + return proto.EnumName(JobStatus_State_name, int32(x)) +} +func (JobStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } + +// A matcher that specifies categories of job states. +type ListJobsRequest_JobStateMatcher int32 + +const ( + // Match all jobs, regardless of state. + ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 + // Only match jobs in non-terminal states: PENDING, RUNNING, or + // CANCEL_PENDING. + ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 + // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. + ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 +) + +var ListJobsRequest_JobStateMatcher_name = map[int32]string{ + 0: "ALL", + 1: "ACTIVE", + 2: "NON_ACTIVE", +} +var ListJobsRequest_JobStateMatcher_value = map[string]int32{ + "ALL": 0, + "ACTIVE": 1, + "NON_ACTIVE": 2, +} + +func (x ListJobsRequest_JobStateMatcher) String() string { + return proto.EnumName(ListJobsRequest_JobStateMatcher_name, int32(x)) +} +func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int) { + return fileDescriptor1, []int{14, 0} +} + +// The runtime logging config of the job. +type LoggingConfig struct { + // The per-package log levels for the driver. This may include + // "root" package name to configure rootLogger. + // Examples: + // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=google.cloud.dataproc.v1.LoggingConfig_Level"` +} + +func (m *LoggingConfig) Reset() { *m = LoggingConfig{} } +func (m *LoggingConfig) String() string { return proto.CompactTextString(m) } +func (*LoggingConfig) ProtoMessage() {} +func (*LoggingConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level { + if m != nil { + return m.DriverLogLevels + } + return nil +} + +// A Cloud Dataproc job for running +// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) +// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). +type HadoopJob struct { + // [Required] Indicates the location of the driver's main class. Specify + // either the jar file that contains the main class or the main class name. + // To specify both, add the jar file to `jar_file_uris`, and then specify + // the main class name in this property. + // + // Types that are valid to be assigned to Driver: + // *HadoopJob_MainJarFileUri + // *HadoopJob_MainClass + Driver isHadoopJob_Driver `protobuf_oneof:"driver"` + // [Optional] The arguments to pass to the driver. Do not + // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job + // properties, since a collision may occur that causes an incorrect job + // submission. + Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + // [Optional] Jar file URIs to add to the CLASSPATHs of the + // Hadoop driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied + // to the working directory of Hadoop drivers and distributed tasks. Useful + // for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // [Optional] HCFS URIs of archives to be extracted in the working directory of + // Hadoop drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, or .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // [Optional] A mapping of property names to values, used to configure Hadoop. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site and + // classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *HadoopJob) Reset() { *m = HadoopJob{} } +func (m *HadoopJob) String() string { return proto.CompactTextString(m) } +func (*HadoopJob) ProtoMessage() {} +func (*HadoopJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isHadoopJob_Driver interface { + isHadoopJob_Driver() +} + +type HadoopJob_MainJarFileUri struct { + MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` +} +type HadoopJob_MainClass struct { + MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` +} + +func (*HadoopJob_MainJarFileUri) isHadoopJob_Driver() {} +func (*HadoopJob_MainClass) isHadoopJob_Driver() {} + +func (m *HadoopJob) GetDriver() isHadoopJob_Driver { + if m != nil { + return m.Driver + } + return nil +} + +func (m *HadoopJob) GetMainJarFileUri() string { + if x, ok := m.GetDriver().(*HadoopJob_MainJarFileUri); ok { + return x.MainJarFileUri + } + return "" +} + +func (m *HadoopJob) GetMainClass() string { + if x, ok := m.GetDriver().(*HadoopJob_MainClass); ok { + return x.MainClass + } + return "" +} + +func (m *HadoopJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *HadoopJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *HadoopJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *HadoopJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *HadoopJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *HadoopJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HadoopJob_OneofMarshaler, _HadoopJob_OneofUnmarshaler, _HadoopJob_OneofSizer, []interface{}{ + (*HadoopJob_MainJarFileUri)(nil), + (*HadoopJob_MainClass)(nil), + } +} + +func _HadoopJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HadoopJob) + // driver + switch x := m.Driver.(type) { + case *HadoopJob_MainJarFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainJarFileUri) + case *HadoopJob_MainClass: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainClass) + case nil: + default: + return fmt.Errorf("HadoopJob.Driver has unexpected type %T", x) + } + return nil +} + +func _HadoopJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HadoopJob) + switch tag { + case 1: // driver.main_jar_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &HadoopJob_MainJarFileUri{x} + return true, err + case 2: // driver.main_class + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &HadoopJob_MainClass{x} + return true, err + default: + return false, nil + } +} + +func _HadoopJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HadoopJob) + // driver + switch x := m.Driver.(type) { + case *HadoopJob_MainJarFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) + n += len(x.MainJarFileUri) + case *HadoopJob_MainClass: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainClass))) + n += len(x.MainClass) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) +// applications on YARN. +type SparkJob struct { + // [Required] The specification of the main method to call to drive the job. + // Specify either the jar file that contains the main class or the main class + // name. To pass both a main jar and a main class in that jar, add the jar to + // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. + // + // Types that are valid to be assigned to Driver: + // *SparkJob_MainJarFileUri + // *SparkJob_MainClass + Driver isSparkJob_Driver `protobuf_oneof:"driver"` + // [Optional] The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the + // Spark driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // [Optional] HCFS URIs of files to be copied to the working directory of + // Spark drivers and distributed tasks. Useful for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // [Optional] HCFS URIs of archives to be extracted in the working directory + // of Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // [Optional] A mapping of property names to values, used to configure Spark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *SparkJob) Reset() { *m = SparkJob{} } +func (m *SparkJob) String() string { return proto.CompactTextString(m) } +func (*SparkJob) ProtoMessage() {} +func (*SparkJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +type isSparkJob_Driver interface { + isSparkJob_Driver() +} + +type SparkJob_MainJarFileUri struct { + MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` +} +type SparkJob_MainClass struct { + MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` +} + +func (*SparkJob_MainJarFileUri) isSparkJob_Driver() {} +func (*SparkJob_MainClass) isSparkJob_Driver() {} + +func (m *SparkJob) GetDriver() isSparkJob_Driver { + if m != nil { + return m.Driver + } + return nil +} + +func (m *SparkJob) GetMainJarFileUri() string { + if x, ok := m.GetDriver().(*SparkJob_MainJarFileUri); ok { + return x.MainJarFileUri + } + return "" +} + +func (m *SparkJob) GetMainClass() string { + if x, ok := m.GetDriver().(*SparkJob_MainClass); ok { + return x.MainClass + } + return "" +} + +func (m *SparkJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *SparkJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *SparkJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *SparkJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *SparkJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *SparkJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SparkJob_OneofMarshaler, _SparkJob_OneofUnmarshaler, _SparkJob_OneofSizer, []interface{}{ + (*SparkJob_MainJarFileUri)(nil), + (*SparkJob_MainClass)(nil), + } +} + +func _SparkJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SparkJob) + // driver + switch x := m.Driver.(type) { + case *SparkJob_MainJarFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainJarFileUri) + case *SparkJob_MainClass: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainClass) + case nil: + default: + return fmt.Errorf("SparkJob.Driver has unexpected type %T", x) + } + return nil +} + +func _SparkJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SparkJob) + switch tag { + case 1: // driver.main_jar_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &SparkJob_MainJarFileUri{x} + return true, err + case 2: // driver.main_class + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &SparkJob_MainClass{x} + return true, err + default: + return false, nil + } +} + +func _SparkJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SparkJob) + // driver + switch x := m.Driver.(type) { + case *SparkJob_MainJarFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) + n += len(x.MainJarFileUri) + case *SparkJob_MainClass: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainClass))) + n += len(x.MainClass) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running +// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. +type PySparkJob struct { + // [Required] The HCFS URI of the main Python file to use as the driver. Must + // be a .py file. + MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"` + // [Optional] The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` + // [Optional] HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: .py, .egg, and .zip. + PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"` + // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the + // Python driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // [Optional] HCFS URIs of files to be copied to the working directory of + // Python drivers and distributed tasks. Useful for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // [Optional] HCFS URIs of archives to be extracted in the working directory of + // .jar, .tar, .tar.gz, .tgz, and .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // [Optional] A mapping of property names to values, used to configure PySpark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *PySparkJob) Reset() { *m = PySparkJob{} } +func (m *PySparkJob) String() string { return proto.CompactTextString(m) } +func (*PySparkJob) ProtoMessage() {} +func (*PySparkJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *PySparkJob) GetMainPythonFileUri() string { + if m != nil { + return m.MainPythonFileUri + } + return "" +} + +func (m *PySparkJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *PySparkJob) GetPythonFileUris() []string { + if m != nil { + return m.PythonFileUris + } + return nil +} + +func (m *PySparkJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *PySparkJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *PySparkJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *PySparkJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *PySparkJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// A list of queries to run on a cluster. +type QueryList struct { + // [Required] The queries to execute. You do not need to terminate a query + // with a semicolon. Multiple queries can be specified in one string + // by separating each with a semicolon. Here is an example of an Cloud + // Dataproc API snippet that uses a QueryList to specify a HiveJob: + // + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + Queries []string `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` +} + +func (m *QueryList) Reset() { *m = QueryList{} } +func (m *QueryList) String() string { return proto.CompactTextString(m) } +func (*QueryList) ProtoMessage() {} +func (*QueryList) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *QueryList) GetQueries() []string { + if m != nil { + return m.Queries + } + return nil +} + +// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) +// queries on YARN. +type HiveJob struct { + // [Required] The sequence of Hive queries to execute, specified as either + // an HCFS file URI or a list of queries. + // + // Types that are valid to be assigned to Queries: + // *HiveJob_QueryFileUri + // *HiveJob_QueryList + Queries isHiveJob_Queries `protobuf_oneof:"queries"` + // [Optional] Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` + // [Optional] Mapping of query variable names to values (equivalent to the + // Hive command: `SET name="value";`). + ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] A mapping of property names and values, used to configure Hive. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/hive/conf/hive-site.xml, and classes in user code. + Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] HCFS URIs of jar files to add to the CLASSPATH of the + // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes + // and UDFs. + JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` +} + +func (m *HiveJob) Reset() { *m = HiveJob{} } +func (m *HiveJob) String() string { return proto.CompactTextString(m) } +func (*HiveJob) ProtoMessage() {} +func (*HiveJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +type isHiveJob_Queries interface { + isHiveJob_Queries() +} + +type HiveJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type HiveJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*HiveJob_QueryFileUri) isHiveJob_Queries() {} +func (*HiveJob_QueryList) isHiveJob_Queries() {} + +func (m *HiveJob) GetQueries() isHiveJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *HiveJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*HiveJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *HiveJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*HiveJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *HiveJob) GetContinueOnFailure() bool { + if m != nil { + return m.ContinueOnFailure + } + return false +} + +func (m *HiveJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *HiveJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *HiveJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HiveJob_OneofMarshaler, _HiveJob_OneofUnmarshaler, _HiveJob_OneofSizer, []interface{}{ + (*HiveJob_QueryFileUri)(nil), + (*HiveJob_QueryList)(nil), + } +} + +func _HiveJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HiveJob) + // queries + switch x := m.Queries.(type) { + case *HiveJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *HiveJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HiveJob.Queries has unexpected type %T", x) + } + return nil +} + +func _HiveJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HiveJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &HiveJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &HiveJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _HiveJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HiveJob) + // queries + switch x := m.Queries.(type) { + case *HiveJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *HiveJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) +// queries. +type SparkSqlJob struct { + // [Required] The sequence of Spark SQL queries to execute, specified as + // either an HCFS file URI or as a list of queries. + // + // Types that are valid to be assigned to Queries: + // *SparkSqlJob_QueryFileUri + // *SparkSqlJob_QueryList + Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` + // [Optional] Mapping of query variable names to values (equivalent to the + // Spark SQL command: SET `name="value";`). + ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] A mapping of property names to values, used to configure + // Spark SQL's SparkConf. Properties that conflict with values set by the + // Cloud Dataproc API may be overwritten. + Properties map[string]string `protobuf:"bytes,4,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. + JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // [Optional] The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *SparkSqlJob) Reset() { *m = SparkSqlJob{} } +func (m *SparkSqlJob) String() string { return proto.CompactTextString(m) } +func (*SparkSqlJob) ProtoMessage() {} +func (*SparkSqlJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +type isSparkSqlJob_Queries interface { + isSparkSqlJob_Queries() +} + +type SparkSqlJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type SparkSqlJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*SparkSqlJob_QueryFileUri) isSparkSqlJob_Queries() {} +func (*SparkSqlJob_QueryList) isSparkSqlJob_Queries() {} + +func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *SparkSqlJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*SparkSqlJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *SparkSqlJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*SparkSqlJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *SparkSqlJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *SparkSqlJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *SparkSqlJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SparkSqlJob_OneofMarshaler, _SparkSqlJob_OneofUnmarshaler, _SparkSqlJob_OneofSizer, []interface{}{ + (*SparkSqlJob_QueryFileUri)(nil), + (*SparkSqlJob_QueryList)(nil), + } +} + +func _SparkSqlJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SparkSqlJob) + // queries + switch x := m.Queries.(type) { + case *SparkSqlJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *SparkSqlJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SparkSqlJob.Queries has unexpected type %T", x) + } + return nil +} + +func _SparkSqlJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SparkSqlJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &SparkSqlJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &SparkSqlJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _SparkSqlJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SparkSqlJob) + // queries + switch x := m.Queries.(type) { + case *SparkSqlJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *SparkSqlJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) +// queries on YARN. +type PigJob struct { + // [Required] The sequence of Pig queries to execute, specified as an HCFS + // file URI or a list of queries. + // + // Types that are valid to be assigned to Queries: + // *PigJob_QueryFileUri + // *PigJob_QueryList + Queries isPigJob_Queries `protobuf_oneof:"queries"` + // [Optional] Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` + // [Optional] Mapping of query variable names to values (equivalent to the Pig + // command: `name=[value]`). + ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] A mapping of property names to values, used to configure Pig. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/pig/conf/pig.properties, and classes in user code. + Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // [Optional] HCFS URIs of jar files to add to the CLASSPATH of + // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // [Optional] The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *PigJob) Reset() { *m = PigJob{} } +func (m *PigJob) String() string { return proto.CompactTextString(m) } +func (*PigJob) ProtoMessage() {} +func (*PigJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +type isPigJob_Queries interface { + isPigJob_Queries() +} + +type PigJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type PigJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*PigJob_QueryFileUri) isPigJob_Queries() {} +func (*PigJob_QueryList) isPigJob_Queries() {} + +func (m *PigJob) GetQueries() isPigJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *PigJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*PigJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *PigJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*PigJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *PigJob) GetContinueOnFailure() bool { + if m != nil { + return m.ContinueOnFailure + } + return false +} + +func (m *PigJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *PigJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *PigJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *PigJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PigJob_OneofMarshaler, _PigJob_OneofUnmarshaler, _PigJob_OneofSizer, []interface{}{ + (*PigJob_QueryFileUri)(nil), + (*PigJob_QueryList)(nil), + } +} + +func _PigJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PigJob) + // queries + switch x := m.Queries.(type) { + case *PigJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *PigJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PigJob.Queries has unexpected type %T", x) + } + return nil +} + +func _PigJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PigJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &PigJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &PigJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _PigJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PigJob) + // queries + switch x := m.Queries.(type) { + case *PigJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *PigJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Cloud Dataproc job config. +type JobPlacement struct { + // [Required] The name of the cluster where the job will be submitted. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // [Output-only] A cluster UUID generated by the Cloud Dataproc service when + // the job is submitted. + ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` +} + +func (m *JobPlacement) Reset() { *m = JobPlacement{} } +func (m *JobPlacement) String() string { return proto.CompactTextString(m) } +func (*JobPlacement) ProtoMessage() {} +func (*JobPlacement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *JobPlacement) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *JobPlacement) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +// Cloud Dataproc job status. +type JobStatus struct { + // [Output-only] A state message specifying the overall job state. + State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"` + // [Output-only] Optional job state details, such as an error + // description if the state is ERROR. + Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` + // [Output-only] The time when this state was entered. + StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` +} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *JobStatus) GetState() JobStatus_State { + if m != nil { + return m.State + } + return JobStatus_STATE_UNSPECIFIED +} + +func (m *JobStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +func (m *JobStatus) GetStateStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +// Encapsulates the full scoping used to reference a job. +type JobReference struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Optional] The job ID, which must be unique within the project. The job ID + // is generated by the server upon job submission or provided by the user as a + // means to perform retries without creating duplicate jobs. The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or + // hyphens (-). The maximum length is 512 characters. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *JobReference) Reset() { *m = JobReference{} } +func (m *JobReference) String() string { return proto.CompactTextString(m) } +func (*JobReference) ProtoMessage() {} +func (*JobReference) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *JobReference) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *JobReference) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A Cloud Dataproc job resource. +type Job struct { + // [Optional] The fully qualified reference to the job, which can be used to + // obtain the equivalent REST path of the job resource. If this property + // is not specified when a job is created, the server generates a + // job_id. + Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"` + // [Required] Job information, including how, when, and where to + // run the job. + Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"` + // [Required] The application/framework-specific portion of the job. + // + // Types that are valid to be assigned to TypeJob: + // *Job_HadoopJob + // *Job_SparkJob + // *Job_PysparkJob + // *Job_HiveJob + // *Job_PigJob + // *Job_SparkSqlJob + TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` + // [Output-only] The job status. Additional application-specific + // status information may be contained in the type_job + // and yarn_applications fields. + Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` + // [Output-only] The previous job status. + StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // [Output-only] A URI pointing to the location of the stdout of the job's + // driver program. + DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri" json:"driver_output_resource_uri,omitempty"` + // [Output-only] If present, the location of miscellaneous control files + // which may be used as part of job setup and handling. If not present, + // control files may be placed in the same location as `driver_output_uri`. + DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"` +} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +type isJob_TypeJob interface { + isJob_TypeJob() +} + +type Job_HadoopJob struct { + HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,oneof"` +} +type Job_SparkJob struct { + SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,oneof"` +} +type Job_PysparkJob struct { + PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,oneof"` +} +type Job_HiveJob struct { + HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,oneof"` +} +type Job_PigJob struct { + PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,oneof"` +} +type Job_SparkSqlJob struct { + SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,oneof"` +} + +func (*Job_HadoopJob) isJob_TypeJob() {} +func (*Job_SparkJob) isJob_TypeJob() {} +func (*Job_PysparkJob) isJob_TypeJob() {} +func (*Job_HiveJob) isJob_TypeJob() {} +func (*Job_PigJob) isJob_TypeJob() {} +func (*Job_SparkSqlJob) isJob_TypeJob() {} + +func (m *Job) GetTypeJob() isJob_TypeJob { + if m != nil { + return m.TypeJob + } + return nil +} + +func (m *Job) GetReference() *JobReference { + if m != nil { + return m.Reference + } + return nil +} + +func (m *Job) GetPlacement() *JobPlacement { + if m != nil { + return m.Placement + } + return nil +} + +func (m *Job) GetHadoopJob() *HadoopJob { + if x, ok := m.GetTypeJob().(*Job_HadoopJob); ok { + return x.HadoopJob + } + return nil +} + +func (m *Job) GetSparkJob() *SparkJob { + if x, ok := m.GetTypeJob().(*Job_SparkJob); ok { + return x.SparkJob + } + return nil +} + +func (m *Job) GetPysparkJob() *PySparkJob { + if x, ok := m.GetTypeJob().(*Job_PysparkJob); ok { + return x.PysparkJob + } + return nil +} + +func (m *Job) GetHiveJob() *HiveJob { + if x, ok := m.GetTypeJob().(*Job_HiveJob); ok { + return x.HiveJob + } + return nil +} + +func (m *Job) GetPigJob() *PigJob { + if x, ok := m.GetTypeJob().(*Job_PigJob); ok { + return x.PigJob + } + return nil +} + +func (m *Job) GetSparkSqlJob() *SparkSqlJob { + if x, ok := m.GetTypeJob().(*Job_SparkSqlJob); ok { + return x.SparkSqlJob + } + return nil +} + +func (m *Job) GetStatus() *JobStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *Job) GetStatusHistory() []*JobStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *Job) GetDriverOutputResourceUri() string { + if m != nil { + return m.DriverOutputResourceUri + } + return "" +} + +func (m *Job) GetDriverControlFilesUri() string { + if m != nil { + return m.DriverControlFilesUri + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ + (*Job_HadoopJob)(nil), + (*Job_SparkJob)(nil), + (*Job_PysparkJob)(nil), + (*Job_HiveJob)(nil), + (*Job_PigJob)(nil), + (*Job_SparkSqlJob)(nil), + } +} + +func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Job) + // type_job + switch x := m.TypeJob.(type) { + case *Job_HadoopJob: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HadoopJob); err != nil { + return err + } + case *Job_SparkJob: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkJob); err != nil { + return err + } + case *Job_PysparkJob: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PysparkJob); err != nil { + return err + } + case *Job_HiveJob: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HiveJob); err != nil { + return err + } + case *Job_PigJob: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PigJob); err != nil { + return err + } + case *Job_SparkSqlJob: + b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkSqlJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.TypeJob has unexpected type %T", x) + } + return nil +} + +func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Job) + switch tag { + case 3: // type_job.hadoop_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HadoopJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_HadoopJob{msg} + return true, err + case 4: // type_job.spark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_SparkJob{msg} + return true, err + case 5: // type_job.pyspark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PySparkJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_PysparkJob{msg} + return true, err + case 6: // type_job.hive_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HiveJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_HiveJob{msg} + return true, err + case 7: // type_job.pig_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PigJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_PigJob{msg} + return true, err + case 12: // type_job.spark_sql_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkSqlJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_SparkSqlJob{msg} + return true, err + default: + return false, nil + } +} + +func _Job_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Job) + // type_job + switch x := m.TypeJob.(type) { + case *Job_HadoopJob: + s := proto.Size(x.HadoopJob) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_SparkJob: + s := proto.Size(x.SparkJob) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PysparkJob: + s := proto.Size(x.PysparkJob) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_HiveJob: + s := proto.Size(x.HiveJob) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PigJob: + s := proto.Size(x.PigJob) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_SparkSqlJob: + s := proto.Size(x.SparkSqlJob) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A request to submit a job. +type SubmitJobRequest struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The job resource. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *SubmitJobRequest) Reset() { *m = SubmitJobRequest{} } +func (m *SubmitJobRequest) String() string { return proto.CompactTextString(m) } +func (*SubmitJobRequest) ProtoMessage() {} +func (*SubmitJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *SubmitJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SubmitJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *SubmitJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// A request to get the resource representation for a job in a project. +type GetJobRequest struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } +func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobRequest) ProtoMessage() {} +func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *GetJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *GetJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A request to list jobs in a project. +type ListJobsRequest struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"` + // [Optional] The number of results to return in each response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // [Optional] The page token, returned by a previous call, to request the + // next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // [Optional] If set, the returned jobs list includes only jobs that were + // submitted to the named cluster. + ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // [Optional] Specifies enumerated categories of jobs to list + // (default = match ALL jobs). + JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,enum=google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` +} + +func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } +func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobsRequest) ProtoMessage() {} +func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *ListJobsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListJobsRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *ListJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobsRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher { + if m != nil { + return m.JobStateMatcher + } + return ListJobsRequest_ALL +} + +// A list of jobs in a project. +type ListJobsResponse struct { + // [Output-only] Jobs list. + Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` + // [Optional] This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListJobsRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } +func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobsResponse) ProtoMessage() {} +func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *ListJobsResponse) GetJobs() []*Job { + if m != nil { + return m.Jobs + } + return nil +} + +func (m *ListJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// A request to cancel a job. +type CancelJobRequest struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } +func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } +func (*CancelJobRequest) ProtoMessage() {} +func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *CancelJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CancelJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *CancelJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A request to delete a job. +type DeleteJobRequest struct { + // [Required] The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // [Required] The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // [Required] The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *DeleteJobRequest) Reset() { *m = DeleteJobRequest{} } +func (m *DeleteJobRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteJobRequest) ProtoMessage() {} +func (*DeleteJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *DeleteJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DeleteJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func init() { + proto.RegisterType((*LoggingConfig)(nil), "google.cloud.dataproc.v1.LoggingConfig") + proto.RegisterType((*HadoopJob)(nil), "google.cloud.dataproc.v1.HadoopJob") + proto.RegisterType((*SparkJob)(nil), "google.cloud.dataproc.v1.SparkJob") + proto.RegisterType((*PySparkJob)(nil), "google.cloud.dataproc.v1.PySparkJob") + proto.RegisterType((*QueryList)(nil), "google.cloud.dataproc.v1.QueryList") + proto.RegisterType((*HiveJob)(nil), "google.cloud.dataproc.v1.HiveJob") + proto.RegisterType((*SparkSqlJob)(nil), "google.cloud.dataproc.v1.SparkSqlJob") + proto.RegisterType((*PigJob)(nil), "google.cloud.dataproc.v1.PigJob") + proto.RegisterType((*JobPlacement)(nil), "google.cloud.dataproc.v1.JobPlacement") + proto.RegisterType((*JobStatus)(nil), "google.cloud.dataproc.v1.JobStatus") + proto.RegisterType((*JobReference)(nil), "google.cloud.dataproc.v1.JobReference") + proto.RegisterType((*Job)(nil), "google.cloud.dataproc.v1.Job") + proto.RegisterType((*SubmitJobRequest)(nil), "google.cloud.dataproc.v1.SubmitJobRequest") + proto.RegisterType((*GetJobRequest)(nil), "google.cloud.dataproc.v1.GetJobRequest") + proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.dataproc.v1.ListJobsRequest") + proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.dataproc.v1.ListJobsResponse") + proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.dataproc.v1.CancelJobRequest") + proto.RegisterType((*DeleteJobRequest)(nil), "google.cloud.dataproc.v1.DeleteJobRequest") + proto.RegisterEnum("google.cloud.dataproc.v1.LoggingConfig_Level", LoggingConfig_Level_name, LoggingConfig_Level_value) + proto.RegisterEnum("google.cloud.dataproc.v1.JobStatus_State", JobStatus_State_name, JobStatus_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher", ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for JobController service + +type JobControllerClient interface { + // Submits a job to a cluster. + SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) + // Gets the resource representation for a job in a project. + GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) + // Lists regions/{region}/jobs in a project. + ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). + CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type jobControllerClient struct { + cc *grpc.ClientConn +} + +func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient { + return &jobControllerClient{cc} +} + +func (c *jobControllerClient) SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/SubmitJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/GetJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { + out := new(ListJobsResponse) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/ListJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/CancelJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/DeleteJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for JobController service + +type JobControllerServer interface { + // Submits a job to a cluster. + SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) + // Gets the resource representation for a job in a project. + GetJob(context.Context, *GetJobRequest) (*Job, error) + // Lists regions/{region}/jobs in a project. + ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). + CancelJob(context.Context, *CancelJobRequest) (*Job, error) + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + DeleteJob(context.Context, *DeleteJobRequest) (*google_protobuf2.Empty, error) +} + +func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer) { + s.RegisterService(&_JobController_serviceDesc, srv) +} + +func _JobController_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SubmitJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).SubmitJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/SubmitJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).SubmitJob(ctx, req.(*SubmitJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).GetJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/GetJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).GetJob(ctx, req.(*GetJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).ListJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/ListJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).ListJobs(ctx, req.(*ListJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).CancelJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/CancelJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).CancelJob(ctx, req.(*CancelJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_DeleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).DeleteJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/DeleteJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).DeleteJob(ctx, req.(*DeleteJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _JobController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.dataproc.v1.JobController", + HandlerType: (*JobControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SubmitJob", + Handler: _JobController_SubmitJob_Handler, + }, + { + MethodName: "GetJob", + Handler: _JobController_GetJob_Handler, + }, + { + MethodName: "ListJobs", + Handler: _JobController_ListJobs_Handler, + }, + { + MethodName: "CancelJob", + Handler: _JobController_CancelJob_Handler, + }, + { + MethodName: "DeleteJob", + Handler: _JobController_DeleteJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/dataproc/v1/jobs.proto", +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1/jobs.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1862 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x5b, 0x6f, 0x23, 0x49, + 0x15, 0x8e, 0xef, 0xee, 0xe3, 0xb1, 0xd3, 0x29, 0x66, 0x17, 0xcb, 0xb3, 0xab, 0xcd, 0xf6, 0xb0, + 0x43, 0x76, 0x10, 0x36, 0xf1, 0xc2, 0xec, 0x90, 0x00, 0xbb, 0x8e, 0xed, 0x8c, 0x13, 0x8c, 0xe3, + 0x6d, 0x3b, 0x83, 0x84, 0x84, 0x7a, 0xda, 0x76, 0xc5, 0x69, 0x4f, 0xbb, 0xab, 0xd3, 0xd5, 0x6d, + 0xe1, 0x19, 0xcd, 0x0b, 0x7f, 0x00, 0x71, 0x11, 0x12, 0x3c, 0xf2, 0x2b, 0x90, 0x10, 0xe2, 0x01, + 0xc4, 0x1f, 0xe0, 0x15, 0xf1, 0xc4, 0x0f, 0x41, 0x55, 0xd5, 0xed, 0xf8, 0x12, 0x5f, 0xb2, 0xc3, + 0xae, 0x76, 0xf7, 0x29, 0xd5, 0xe7, 0x56, 0xa7, 0xea, 0xfb, 0xce, 0xa9, 0x2a, 0x07, 0xee, 0xf7, + 0x09, 0xe9, 0x9b, 0xb8, 0xd0, 0x35, 0x89, 0xd7, 0x2b, 0xf4, 0x74, 0x57, 0xb7, 0x1d, 0xd2, 0x2d, + 0x8c, 0xf6, 0x0b, 0x03, 0xd2, 0xa1, 0x79, 0xdb, 0x21, 0x2e, 0x41, 0x59, 0x61, 0x94, 0xe7, 0x46, + 0xf9, 0xc0, 0x28, 0x3f, 0xda, 0xcf, 0xbd, 0xe5, 0xbb, 0xeb, 0xb6, 0x51, 0xd0, 0x2d, 0x8b, 0xb8, + 0xba, 0x6b, 0x10, 0xcb, 0xf7, 0xcb, 0xdd, 0xf3, 0xb5, 0xfc, 0xab, 0xe3, 0x5d, 0x14, 0xf0, 0xd0, + 0x76, 0xc7, 0xbe, 0xf2, 0x9d, 0x79, 0xa5, 0x6b, 0x0c, 0x31, 0x75, 0xf5, 0xa1, 0x2d, 0x0c, 0x94, + 0xff, 0x84, 0x21, 0x5d, 0x27, 0xfd, 0xbe, 0x61, 0xf5, 0xcb, 0xc4, 0xba, 0x30, 0xfa, 0xe8, 0x12, + 0x76, 0x7a, 0x8e, 0x31, 0xc2, 0x8e, 0x66, 0x92, 0xbe, 0x66, 0xe2, 0x11, 0x36, 0x69, 0x36, 0xbc, + 0x1b, 0xd9, 0x4b, 0x15, 0x7f, 0x90, 0x5f, 0x96, 0x63, 0x7e, 0x26, 0x46, 0xbe, 0xc2, 0x03, 0xd4, + 0x49, 0xbf, 0xce, 0xdd, 0xab, 0x96, 0xeb, 0x8c, 0xd5, 0xed, 0xde, 0xac, 0x34, 0x77, 0x05, 0x77, + 0x6f, 0x32, 0x44, 0x32, 0x44, 0x9e, 0xe3, 0x71, 0x36, 0xb4, 0x1b, 0xda, 0x93, 0x54, 0x36, 0x44, + 0x65, 0x88, 0x8d, 0x74, 0xd3, 0xc3, 0xd9, 0xf0, 0x6e, 0x68, 0x2f, 0x53, 0xfc, 0xf6, 0xa6, 0x79, + 0xf0, 0xa8, 0xaa, 0xf0, 0x3d, 0x08, 0x3f, 0x0e, 0x29, 0x36, 0xc4, 0xb8, 0x0c, 0xbd, 0x01, 0x3b, + 0xf5, 0xea, 0xd3, 0x6a, 0x5d, 0x3b, 0x6f, 0xb4, 0x9a, 0xd5, 0xf2, 0xc9, 0xf1, 0x49, 0xb5, 0x22, + 0x6f, 0xa1, 0x04, 0x44, 0x4a, 0xf5, 0xba, 0x1c, 0x42, 0x12, 0xc4, 0xda, 0x6a, 0xa9, 0x5c, 0x95, + 0xc3, 0x6c, 0x58, 0xa9, 0x1e, 0x9d, 0x3f, 0x91, 0x23, 0x28, 0x09, 0xd1, 0x93, 0xc6, 0xf1, 0x99, + 0x1c, 0x65, 0xa3, 0x9f, 0x96, 0xd4, 0x86, 0x1c, 0x63, 0xea, 0xaa, 0xaa, 0x9e, 0xa9, 0x72, 0x9c, + 0x0d, 0x8f, 0x4b, 0xed, 0x52, 0x5d, 0x4e, 0xb0, 0x40, 0x67, 0xc7, 0xc7, 0x72, 0x52, 0xf9, 0x5b, + 0x04, 0xa4, 0x9a, 0xde, 0x23, 0xc4, 0x3e, 0x25, 0x1d, 0xf4, 0x2d, 0xd8, 0x19, 0xea, 0x86, 0xa5, + 0x0d, 0x74, 0x47, 0xbb, 0x30, 0x4c, 0xac, 0x79, 0x8e, 0x21, 0x16, 0x5a, 0xdb, 0x52, 0x33, 0x4c, + 0x75, 0xaa, 0x3b, 0xc7, 0x86, 0x89, 0xcf, 0x1d, 0x03, 0xbd, 0x03, 0xc0, 0x8d, 0xbb, 0xa6, 0x4e, + 0x29, 0x5f, 0x3a, 0xb3, 0x92, 0x98, 0xac, 0xcc, 0x44, 0x08, 0x41, 0x54, 0x77, 0xfa, 0x34, 0x1b, + 0xd9, 0x8d, 0xec, 0x49, 0x2a, 0x1f, 0x23, 0x05, 0xd2, 0xd3, 0xc1, 0x69, 0x36, 0xca, 0x95, 0xa9, + 0xc1, 0x24, 0x2e, 0x45, 0xf7, 0x40, 0xba, 0xd6, 0xc7, 0xb8, 0x3e, 0x79, 0x11, 0x28, 0xdf, 0x85, + 0x3b, 0xba, 0xd3, 0xbd, 0x34, 0x46, 0xbe, 0x3e, 0x2e, 0xfc, 0x7d, 0x19, 0x37, 0x69, 0x01, 0xd8, + 0x0e, 0xb1, 0xb1, 0xe3, 0x1a, 0x98, 0x66, 0x13, 0x9c, 0x1b, 0x1f, 0x2c, 0xc7, 0x64, 0xb2, 0xfc, + 0x7c, 0x73, 0xe2, 0x25, 0x28, 0x31, 0x15, 0x06, 0x35, 0x20, 0x63, 0x0a, 0xf0, 0xb4, 0x2e, 0x47, + 0x2f, 0x9b, 0xdc, 0x0d, 0xed, 0xa5, 0x8a, 0xdf, 0xdc, 0x10, 0x6c, 0x35, 0x6d, 0x4e, 0x7f, 0xe6, + 0x7e, 0x08, 0xdb, 0x73, 0xd3, 0xdd, 0x40, 0xac, 0xbb, 0xd3, 0xc4, 0x92, 0xa6, 0x98, 0x72, 0x94, + 0x84, 0xb8, 0xe0, 0xab, 0xf2, 0xd7, 0x08, 0x24, 0x5b, 0xb6, 0xee, 0x3c, 0xff, 0xea, 0x00, 0xa8, + 0xde, 0x00, 0x60, 0x71, 0xf9, 0x3e, 0x07, 0xab, 0xff, 0x72, 0xe2, 0xf7, 0x8f, 0x08, 0x40, 0x73, + 0x3c, 0x41, 0xb0, 0x00, 0x77, 0x39, 0x28, 0xf6, 0xd8, 0xbd, 0x24, 0xd6, 0x1c, 0x88, 0x2a, 0x47, + 0xb7, 0xc9, 0x55, 0x01, 0x8a, 0x01, 0x48, 0xe1, 0x29, 0x90, 0xf6, 0x40, 0x9e, 0xf3, 0x0f, 0x40, + 0xcc, 0xd8, 0xd3, 0xce, 0x9f, 0x0f, 0x9c, 0xed, 0x1b, 0xe0, 0xfc, 0xee, 0xf2, 0x6d, 0xbf, 0xde, + 0x8c, 0x2f, 0x11, 0xa0, 0xca, 0x7b, 0x20, 0x7d, 0xe2, 0x61, 0x67, 0x5c, 0x37, 0xa8, 0x8b, 0xb2, + 0x90, 0xb8, 0xf2, 0xb0, 0xc3, 0x96, 0x1b, 0xe2, 0xfb, 0x11, 0x7c, 0x2a, 0xbf, 0x8a, 0x42, 0xa2, + 0x66, 0x8c, 0x30, 0x83, 0xfa, 0x01, 0x64, 0x98, 0x78, 0xbc, 0x58, 0xa9, 0x77, 0xb8, 0x3c, 0x40, + 0xb8, 0x02, 0x20, 0xec, 0x4c, 0x83, 0xba, 0x7c, 0xe6, 0x54, 0xf1, 0xfe, 0xf2, 0x55, 0x4e, 0xd2, + 0x60, 0xc5, 0x7c, 0x35, 0xc9, 0x29, 0x0f, 0x5f, 0xeb, 0x12, 0xcb, 0x35, 0x2c, 0x0f, 0x6b, 0x8c, + 0x18, 0xba, 0x61, 0x7a, 0x0e, 0xce, 0x46, 0x76, 0x43, 0x7b, 0x49, 0x75, 0x27, 0x50, 0x9d, 0x59, + 0xc7, 0x42, 0x81, 0x74, 0x90, 0x69, 0xd7, 0x31, 0x6c, 0x57, 0x1b, 0xe9, 0x8e, 0xa1, 0x77, 0x4c, + 0x2c, 0xc8, 0x91, 0x2a, 0x3e, 0x5a, 0xd1, 0x4b, 0xc5, 0xd2, 0xf2, 0x2d, 0xee, 0xf9, 0x34, 0x70, + 0xf4, 0x4f, 0x58, 0x3a, 0x2b, 0x45, 0x9f, 0xcc, 0x10, 0x23, 0xc6, 0x83, 0xef, 0xaf, 0x0f, 0xbe, + 0x8a, 0x15, 0x0b, 0x7c, 0x8e, 0x2f, 0xf0, 0x39, 0x77, 0x04, 0x77, 0x6f, 0xca, 0xef, 0x36, 0x70, + 0xbf, 0x6e, 0xf9, 0x4b, 0x13, 0x82, 0x28, 0x7f, 0x89, 0x42, 0x8a, 0x13, 0xbe, 0x75, 0x65, 0x7e, + 0xfe, 0xac, 0xc0, 0x37, 0xa0, 0x1c, 0xe1, 0x40, 0x1c, 0xac, 0x69, 0xb8, 0x22, 0xdd, 0x0d, 0x91, + 0x3e, 0x9f, 0x41, 0x5a, 0xd0, 0xe8, 0x7b, 0x9b, 0x4d, 0x70, 0x2b, 0xb4, 0x1f, 0x2f, 0x76, 0xaf, + 0xc5, 0x3e, 0x11, 0x7f, 0xad, 0x3e, 0xf1, 0xc5, 0x62, 0xcf, 0xbf, 0xa3, 0x10, 0x6f, 0x1a, 0xfd, + 0x2f, 0x7e, 0x3b, 0x79, 0xb6, 0xb4, 0x9d, 0xac, 0xe0, 0x81, 0x58, 0xd9, 0x86, 0x1c, 0x6b, 0xde, + 0xd0, 0x4d, 0xbe, 0xb3, 0x36, 0xf6, 0x6b, 0x36, 0x93, 0x1b, 0xe8, 0x95, 0xf8, 0x0a, 0xd1, 0xab, + 0x0d, 0x77, 0x4e, 0x49, 0xa7, 0x69, 0xea, 0x5d, 0x3c, 0xc4, 0x96, 0xcb, 0x4e, 0xfb, 0xae, 0xe9, + 0x51, 0x17, 0x3b, 0x9a, 0xa5, 0x0f, 0xb1, 0x1f, 0x2f, 0xe5, 0xcb, 0x1a, 0xfa, 0x10, 0x4f, 0x9b, + 0x78, 0x9e, 0xd1, 0xf3, 0xc3, 0x07, 0x26, 0xe7, 0x9e, 0xd1, 0x53, 0xfe, 0x1e, 0x06, 0xe9, 0x94, + 0x74, 0x5a, 0xae, 0xee, 0x7a, 0x14, 0x7d, 0x04, 0x31, 0xea, 0xea, 0xae, 0x08, 0x96, 0x29, 0xbe, + 0xbf, 0x7c, 0xe3, 0x26, 0x3e, 0x79, 0xf6, 0x07, 0xab, 0xc2, 0x8f, 0x9d, 0xb6, 0x3d, 0xec, 0xea, + 0x86, 0xe9, 0x5f, 0x62, 0xd5, 0xe0, 0x13, 0x55, 0x40, 0xe6, 0x26, 0x1a, 0x75, 0x75, 0xc7, 0xd5, + 0xd8, 0xeb, 0xd2, 0xaf, 0xfe, 0x5c, 0x30, 0x4b, 0xf0, 0xf4, 0xcc, 0xb7, 0x83, 0xa7, 0xa7, 0x9a, + 0xe1, 0x3e, 0x2d, 0xe6, 0xc2, 0x84, 0xca, 0xef, 0x42, 0x10, 0xe3, 0x13, 0xb2, 0x67, 0x59, 0xab, + 0x5d, 0x6a, 0x57, 0xe7, 0x9e, 0x65, 0x29, 0x48, 0x34, 0xab, 0x8d, 0xca, 0x49, 0xe3, 0x89, 0x1c, + 0x42, 0x19, 0x80, 0x56, 0xb5, 0x7d, 0xde, 0xd4, 0x2a, 0x67, 0x8d, 0xaa, 0x9c, 0x64, 0x4a, 0xf5, + 0xbc, 0xd1, 0x60, 0xca, 0x30, 0x42, 0x90, 0x29, 0x97, 0x1a, 0xe5, 0x6a, 0x5d, 0x0b, 0x1c, 0x22, + 0x53, 0xb2, 0x56, 0xbb, 0xa4, 0xb6, 0xab, 0x15, 0x39, 0x81, 0xd2, 0x20, 0x09, 0x59, 0xbd, 0x5a, + 0x11, 0xcf, 0x39, 0x1e, 0x6d, 0xfa, 0x39, 0xa7, 0x54, 0x38, 0x36, 0x2a, 0xbe, 0xc0, 0x0e, 0xb6, + 0xba, 0x18, 0xbd, 0xcd, 0xf9, 0x3f, 0xc0, 0x5d, 0x57, 0x33, 0x7a, 0x3e, 0x32, 0x92, 0x2f, 0x39, + 0xe9, 0xa1, 0x37, 0x20, 0x3e, 0x20, 0x1d, 0x6d, 0x82, 0x48, 0x6c, 0x40, 0x3a, 0x27, 0x3d, 0xe5, + 0xcf, 0x71, 0x88, 0xb0, 0xee, 0x51, 0x01, 0xc9, 0x09, 0x42, 0x71, 0xe7, 0x54, 0xf1, 0xc1, 0x4a, + 0x24, 0x26, 0x13, 0xab, 0xd7, 0x8e, 0x2c, 0x8a, 0x1d, 0x90, 0xc5, 0x6f, 0x2d, 0xab, 0xa3, 0x4c, + 0xa8, 0xa5, 0x5e, 0x3b, 0xb2, 0x0e, 0x75, 0xc9, 0x1f, 0x65, 0xda, 0x80, 0x74, 0x78, 0x4b, 0x59, + 0xd9, 0xa1, 0x26, 0x0f, 0x38, 0xd6, 0xa1, 0x2e, 0x27, 0x8f, 0xd9, 0x12, 0x48, 0x94, 0x9d, 0x23, + 0x3c, 0x48, 0x94, 0x07, 0x51, 0xd6, 0x3f, 0x22, 0x6a, 0x5b, 0x6a, 0x92, 0x06, 0x97, 0xf1, 0x27, + 0x90, 0xb2, 0xc7, 0xd7, 0x41, 0x62, 0x3c, 0xc8, 0x37, 0x36, 0xb9, 0xba, 0xd6, 0xb6, 0x54, 0xf0, + 0x5d, 0x59, 0xa0, 0x1f, 0x41, 0x92, 0x5f, 0x91, 0x59, 0x14, 0x41, 0xc0, 0x77, 0xd7, 0xde, 0x73, + 0x6a, 0x5b, 0x6a, 0xe2, 0xd2, 0xbf, 0x2a, 0x1e, 0x42, 0xc2, 0x36, 0xfa, 0xdc, 0x5d, 0xb4, 0x97, + 0xdd, 0x75, 0x8d, 0xad, 0xb6, 0xa5, 0xc6, 0x6d, 0x71, 0x30, 0xfc, 0x18, 0xd2, 0x62, 0x0d, 0xf4, + 0xca, 0xe4, 0x21, 0xee, 0xf0, 0x10, 0xef, 0x6d, 0x74, 0xfe, 0xd6, 0xb6, 0xd4, 0x14, 0x9d, 0xba, + 0x9e, 0x1c, 0x42, 0x9c, 0xf2, 0x1a, 0xf4, 0xaf, 0xdb, 0xf7, 0x37, 0x28, 0x57, 0xd5, 0x77, 0x41, + 0xa7, 0x90, 0x11, 0x23, 0xed, 0xd2, 0xa0, 0x2e, 0x71, 0xc6, 0xd9, 0x34, 0x6f, 0xd3, 0x1b, 0x05, + 0x49, 0x0b, 0xd7, 0x9a, 0xf0, 0x44, 0x87, 0x90, 0xf3, 0x7f, 0x08, 0x22, 0x9e, 0x6b, 0x7b, 0xae, + 0xe6, 0x60, 0x4a, 0x3c, 0xa7, 0x2b, 0x8e, 0xbe, 0x1d, 0xce, 0xf1, 0xaf, 0x0b, 0x8b, 0x33, 0x6e, + 0xa0, 0xfa, 0x7a, 0x76, 0x06, 0x7e, 0x08, 0x59, 0xdf, 0x99, 0x9d, 0x54, 0x0e, 0x31, 0x79, 0x93, + 0xa7, 0xdc, 0x75, 0x9b, 0xbb, 0xbe, 0x21, 0xf4, 0x65, 0xa1, 0x66, 0xed, 0x9e, 0x9e, 0x3b, 0xc6, + 0x11, 0x40, 0xd2, 0x1d, 0xdb, 0x1c, 0x48, 0xe5, 0x05, 0xc8, 0x2d, 0xaf, 0x33, 0x34, 0x5c, 0x5e, + 0x0d, 0x57, 0x1e, 0xa6, 0xee, 0xba, 0x22, 0x7c, 0x13, 0xe2, 0x0e, 0xee, 0x1b, 0xc4, 0xe2, 0xac, + 0x96, 0x54, 0xff, 0x0b, 0x15, 0x20, 0xc2, 0x80, 0x11, 0x15, 0xf3, 0xf6, 0xea, 0xba, 0x63, 0x96, + 0xca, 0xcf, 0x21, 0xfd, 0x04, 0xff, 0x1f, 0x26, 0x5e, 0xd2, 0x15, 0xfe, 0x19, 0x86, 0x6d, 0x76, + 0xcc, 0x9f, 0x92, 0x0e, 0xbd, 0xf5, 0x0c, 0xf1, 0x99, 0x19, 0xee, 0x81, 0x64, 0xeb, 0x7d, 0xac, + 0x51, 0xe3, 0x85, 0x38, 0x6b, 0x62, 0x6a, 0x92, 0x09, 0x5a, 0xc6, 0x0b, 0xd1, 0xb3, 0x98, 0xd2, + 0x25, 0xcf, 0x71, 0x90, 0x1a, 0x37, 0x6f, 0x33, 0xc1, 0xc2, 0x71, 0x13, 0x5d, 0x3c, 0x6e, 0x30, + 0xec, 0xb0, 0x05, 0x88, 0x36, 0x3f, 0xd4, 0xdd, 0xee, 0x25, 0x76, 0x78, 0xa1, 0x66, 0x8a, 0xdf, + 0x5f, 0x71, 0x04, 0xcf, 0xae, 0x2d, 0x60, 0x19, 0xfe, 0x89, 0x08, 0xa0, 0x6e, 0x0f, 0x66, 0x05, + 0xca, 0x23, 0xd8, 0x9e, 0xb3, 0x09, 0x7e, 0x8c, 0xdb, 0x42, 0x00, 0xf1, 0x52, 0xb9, 0x7d, 0xf2, + 0xb4, 0x2a, 0xba, 0x7f, 0xe3, 0xac, 0xa1, 0xf9, 0xdf, 0x61, 0x65, 0x08, 0xf2, 0xf5, 0x5c, 0xd4, + 0x26, 0x16, 0xc5, 0x68, 0x1f, 0xa2, 0x03, 0xd2, 0x11, 0x4f, 0xc3, 0xb5, 0x68, 0x73, 0x53, 0xf4, + 0x00, 0xb6, 0x2d, 0xfc, 0x0b, 0x57, 0x9b, 0xda, 0x2c, 0x81, 0x57, 0x9a, 0x89, 0x9b, 0xc1, 0x86, + 0x29, 0xcf, 0x40, 0x2e, 0xeb, 0x56, 0x17, 0x9b, 0x9f, 0x19, 0x33, 0x9e, 0x81, 0x5c, 0xc1, 0x26, + 0x76, 0xf1, 0x67, 0x35, 0x43, 0xf1, 0xf7, 0x71, 0x48, 0x9f, 0x92, 0x8e, 0x5f, 0x79, 0x26, 0x76, + 0xd0, 0x1f, 0x42, 0x20, 0x4d, 0x2a, 0x0d, 0x3d, 0x5c, 0xd1, 0xb7, 0xe6, 0xca, 0x31, 0xb7, 0x7a, + 0x73, 0x95, 0xd2, 0x2f, 0xff, 0xf5, 0xdf, 0xdf, 0x86, 0x0f, 0x95, 0x47, 0x85, 0xd1, 0x7e, 0xc1, + 0x4f, 0x98, 0x16, 0x5e, 0x5e, 0x2f, 0xe6, 0x55, 0x41, 0xe4, 0x4a, 0x0b, 0x2f, 0xc5, 0xe0, 0x15, + 0xff, 0x4d, 0xfc, 0x80, 0xf2, 0x89, 0x0e, 0x42, 0x0f, 0xd1, 0x6f, 0x42, 0x10, 0x17, 0x95, 0x88, + 0x56, 0x5c, 0xf9, 0x66, 0x6a, 0x75, 0x5d, 0x56, 0x1f, 0xf3, 0xac, 0x0e, 0xd0, 0xe3, 0x5b, 0x66, + 0x55, 0x78, 0x29, 0xb6, 0xf3, 0x15, 0xfa, 0x63, 0x08, 0x92, 0x01, 0xed, 0xd0, 0xfb, 0x1b, 0x97, + 0x41, 0xee, 0xe1, 0x26, 0xa6, 0x82, 0xc5, 0xca, 0x87, 0x3c, 0xcb, 0x7d, 0x54, 0xb8, 0x65, 0x96, + 0xe8, 0x4f, 0x21, 0x90, 0x26, 0x24, 0x5d, 0x85, 0xe6, 0x3c, 0x93, 0xd7, 0xed, 0xdb, 0x29, 0xcf, + 0xa8, 0xa2, 0x7c, 0xf4, 0x69, 0xf7, 0xed, 0xa0, 0xcb, 0x67, 0x64, 0xb0, 0xfe, 0x3a, 0x04, 0xd2, + 0x84, 0xe7, 0xab, 0x92, 0x9c, 0x2f, 0x86, 0xdc, 0x9b, 0x0b, 0x37, 0xcb, 0xea, 0xd0, 0x76, 0xc7, + 0x01, 0xaa, 0x0f, 0x3f, 0x35, 0xaa, 0x47, 0x43, 0x78, 0xab, 0x4b, 0x86, 0x4b, 0x53, 0x39, 0x62, + 0x77, 0x6a, 0xda, 0x64, 0xb3, 0x36, 0x43, 0x3f, 0xfb, 0xd8, 0x37, 0xeb, 0x13, 0x53, 0xb7, 0xfa, + 0x79, 0xe2, 0xf4, 0x0b, 0x7d, 0x6c, 0xf1, 0x9c, 0x0a, 0x42, 0xa5, 0xdb, 0x06, 0x5d, 0xfc, 0x9f, + 0xcf, 0x61, 0x30, 0xee, 0xc4, 0xb9, 0xf1, 0x07, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x59, + 0x9b, 0xa1, 0x1f, 0x1a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go new file mode 100644 index 000000000..a66e72ad3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go @@ -0,0 +1,201 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1/operations.proto + +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The operation state. +type ClusterOperationStatus_State int32 + +const ( + // Unused. + ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0 + // The operation has been created. + ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1 + // The operation is running. + ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2 + // The operation is done; either cancelled or completed. + ClusterOperationStatus_DONE ClusterOperationStatus_State = 3 +) + +var ClusterOperationStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", +} +var ClusterOperationStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, +} + +func (x ClusterOperationStatus_State) String() string { + return proto.EnumName(ClusterOperationStatus_State_name, int32(x)) +} +func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +// The status of the operation. +type ClusterOperationStatus struct { + // [Output-only] A message containing the operation state. + State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"` + // [Output-only] A message containing the detailed operation state. + InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"` + // [Output-only]A message containing any operation metadata details. + Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` + // [Output-only] The time this state was entered. + StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` +} + +func (m *ClusterOperationStatus) Reset() { *m = ClusterOperationStatus{} } +func (m *ClusterOperationStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterOperationStatus) ProtoMessage() {} +func (*ClusterOperationStatus) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *ClusterOperationStatus) GetState() ClusterOperationStatus_State { + if m != nil { + return m.State + } + return ClusterOperationStatus_UNKNOWN +} + +func (m *ClusterOperationStatus) GetInnerState() string { + if m != nil { + return m.InnerState + } + return "" +} + +func (m *ClusterOperationStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +// Metadata describing the operation. +type ClusterOperationMetadata struct { + // [Output-only] Name of the cluster for the operation. + ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // [Output-only] Cluster UUID for the operation. + ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` + // [Output-only] Current operation status. + Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"` + // [Output-only] The previous operation status. + StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // [Output-only] The operation type. + OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"` + // [Output-only] Short description of operation. + Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` +} + +func (m *ClusterOperationMetadata) Reset() { *m = ClusterOperationMetadata{} } +func (m *ClusterOperationMetadata) String() string { return proto.CompactTextString(m) } +func (*ClusterOperationMetadata) ProtoMessage() {} +func (*ClusterOperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ClusterOperationMetadata) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *ClusterOperationMetadata) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +func (m *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *ClusterOperationMetadata) GetOperationType() string { + if m != nil { + return m.OperationType + } + return "" +} + +func (m *ClusterOperationMetadata) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*ClusterOperationStatus)(nil), "google.cloud.dataproc.v1.ClusterOperationStatus") + proto.RegisterType((*ClusterOperationMetadata)(nil), "google.cloud.dataproc.v1.ClusterOperationMetadata") + proto.RegisterEnum("google.cloud.dataproc.v1.ClusterOperationStatus_State", ClusterOperationStatus_State_name, ClusterOperationStatus_State_value) +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1/operations.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 479 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, + 0x14, 0x27, 0xed, 0xb6, 0x6e, 0xce, 0x56, 0x2a, 0x1f, 0x90, 0x55, 0x26, 0x2d, 0x14, 0x21, 0x95, + 0x4b, 0xc2, 0x86, 0x84, 0x90, 0xb8, 0xa0, 0xd1, 0x89, 0x21, 0xc0, 0xad, 0xb2, 0x55, 0x93, 0xb8, + 0x44, 0x5e, 0x62, 0x82, 0xa5, 0xc4, 0xb6, 0x6c, 0x67, 0x52, 0x3f, 0x0e, 0xdf, 0x87, 0x0f, 0x85, + 0x6c, 0x27, 0x55, 0xd9, 0xe8, 0x01, 0x4e, 0xf1, 0x7b, 0xbf, 0x3f, 0x79, 0xbf, 0xe7, 0x04, 0xbc, + 0x2c, 0x85, 0x28, 0x2b, 0x9a, 0xe4, 0x95, 0x68, 0x8a, 0xa4, 0x20, 0x86, 0x48, 0x25, 0xf2, 0xe4, + 0xee, 0x34, 0x11, 0x92, 0x2a, 0x62, 0x98, 0xe0, 0x3a, 0x96, 0x4a, 0x18, 0x01, 0x91, 0xa7, 0xc6, + 0x8e, 0x1a, 0x77, 0xd4, 0xf8, 0xee, 0x74, 0x7c, 0xdc, 0x9a, 0x10, 0xc9, 0x12, 0xc2, 0xb9, 0x30, + 0x9b, 0xba, 0xf1, 0xf3, 0x16, 0xad, 0x04, 0x2f, 0x55, 0xc3, 0x39, 0xe3, 0xe5, 0x03, 0xf3, 0xf1, + 0xd3, 0x96, 0xe4, 0xaa, 0xdb, 0xe6, 0x7b, 0x42, 0x6b, 0x69, 0x56, 0x2d, 0x78, 0x72, 0x1f, 0x34, + 0xac, 0xa6, 0xda, 0x90, 0x5a, 0x7a, 0xc2, 0xe4, 0x67, 0x0f, 0x3c, 0xf9, 0x50, 0x35, 0xda, 0x50, + 0x35, 0xef, 0x9c, 0xaf, 0x0c, 0x31, 0x8d, 0x86, 0x5f, 0xc0, 0xae, 0x36, 0xc4, 0x50, 0x14, 0x44, + 0xc1, 0x74, 0x78, 0xf6, 0x26, 0xde, 0x96, 0x22, 0xfe, 0xbb, 0x41, 0x6c, 0x1f, 0x34, 0xf5, 0x26, + 0xf0, 0x04, 0x84, 0x8c, 0x73, 0xaa, 0x32, 0xef, 0xd9, 0x8b, 0x82, 0xe9, 0x41, 0x0a, 0x5c, 0xcb, + 0xf1, 0x20, 0x02, 0x83, 0x82, 0x1a, 0xc2, 0x2a, 0x8d, 0xfa, 0x0e, 0xec, 0x4a, 0x38, 0x03, 0x23, + 0x27, 0xb2, 0x52, 0x65, 0x32, 0x1b, 0x01, 0xed, 0x44, 0xc1, 0x34, 0x3c, 0x1b, 0x77, 0x33, 0x75, + 0xf9, 0xe2, 0xeb, 0x2e, 0x5f, 0x3a, 0x74, 0x9a, 0x2b, 0x2b, 0xb1, 0xcd, 0xc9, 0x5b, 0xb0, 0xeb, + 0x5f, 0x14, 0x82, 0xc1, 0x12, 0x7f, 0xc6, 0xf3, 0x1b, 0x3c, 0x7a, 0x64, 0x8b, 0xc5, 0x05, 0x9e, + 0x7d, 0xc2, 0x1f, 0x47, 0x81, 0x2d, 0xd2, 0x25, 0xc6, 0xb6, 0xe8, 0xc1, 0x7d, 0xb0, 0x33, 0x9b, + 0xe3, 0x8b, 0x51, 0x7f, 0xf2, 0xab, 0x07, 0xd0, 0xfd, 0x88, 0x5f, 0xa9, 0x21, 0x76, 0x05, 0xf0, + 0x19, 0x38, 0xcc, 0x3d, 0x96, 0x71, 0x52, 0x53, 0x34, 0x70, 0xb3, 0x87, 0x6d, 0x0f, 0x93, 0x9a, + 0x6e, 0x52, 0x9a, 0x86, 0x15, 0x68, 0xff, 0x0f, 0xca, 0xb2, 0x61, 0x05, 0xbc, 0x04, 0x7b, 0xda, + 0x2d, 0x0d, 0x1d, 0xb8, 0x60, 0xaf, 0xfe, 0x75, 0xd9, 0x69, 0xab, 0x87, 0x37, 0x60, 0xe8, 0x4f, + 0xd9, 0x0f, 0xa6, 0x8d, 0x50, 0x2b, 0x04, 0xa2, 0xfe, 0x7f, 0x39, 0x1e, 0x79, 0x9f, 0x4b, 0x6f, + 0x03, 0x5f, 0x80, 0xe1, 0xfa, 0xdb, 0xcb, 0xcc, 0x4a, 0x52, 0x14, 0xba, 0x1c, 0x47, 0xeb, 0xee, + 0xf5, 0x4a, 0x52, 0x18, 0x81, 0xb0, 0xa0, 0x3a, 0x57, 0x4c, 0xda, 0x16, 0x3a, 0xf4, 0x59, 0x37, + 0x5a, 0xe7, 0x1a, 0x1c, 0xe7, 0xa2, 0xde, 0x3a, 0xce, 0xf9, 0xe3, 0xf5, 0x20, 0x7a, 0x61, 0xaf, + 0x75, 0x11, 0x7c, 0x7b, 0xdf, 0x92, 0x4b, 0x51, 0x11, 0x5e, 0xc6, 0x42, 0x95, 0x49, 0x49, 0xb9, + 0xbb, 0xf4, 0xc4, 0x43, 0x44, 0x32, 0xfd, 0xf0, 0x57, 0x7c, 0xd7, 0x9d, 0x6f, 0xf7, 0x1c, 0xf9, + 0xf5, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x8c, 0x74, 0x3e, 0xb6, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/functions.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/functions.pb.go new file mode 100644 index 000000000..53a9f562f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/functions.pb.go @@ -0,0 +1,1203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/functions/v1beta2/functions.proto + +/* +Package functions is a generated protocol buffer package. + +It is generated from these files: + google/cloud/functions/v1beta2/functions.proto + google/cloud/functions/v1beta2/operations.proto + +It has these top-level messages: + CloudFunction + HTTPSTrigger + EventTrigger + SourceRepository + CreateFunctionRequest + UpdateFunctionRequest + GetFunctionRequest + ListFunctionsRequest + ListFunctionsResponse + DeleteFunctionRequest + CallFunctionRequest + CallFunctionResponse + OperationMetadataV1Beta2 +*/ +package functions + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Describes the current stage of a deployment. +type CloudFunctionStatus int32 + +const ( + // Status not specified. + CloudFunctionStatus_STATUS_UNSPECIFIED CloudFunctionStatus = 0 + // Successfully deployed. + CloudFunctionStatus_READY CloudFunctionStatus = 1 + // Not deployed correctly - behavior is undefined. The item should be updated + // or deleted to move it out of this state. + CloudFunctionStatus_FAILED CloudFunctionStatus = 2 + // Creation or update in progress. + CloudFunctionStatus_DEPLOYING CloudFunctionStatus = 3 + // Deletion in progress. + CloudFunctionStatus_DELETING CloudFunctionStatus = 4 +) + +var CloudFunctionStatus_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "READY", + 2: "FAILED", + 3: "DEPLOYING", + 4: "DELETING", +} +var CloudFunctionStatus_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "READY": 1, + "FAILED": 2, + "DEPLOYING": 3, + "DELETING": 4, +} + +func (x CloudFunctionStatus) String() string { + return proto.EnumName(CloudFunctionStatus_name, int32(x)) +} +func (CloudFunctionStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Describes a Cloud Function that contains user computation executed in +// response to an event. It encapsulate function and triggers configurations. +type CloudFunction struct { + // A user-defined name of the function. Function names must be unique + // globally and match pattern `projects/*/locations/*/functions/*` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The location of the function source code. + // + // Types that are valid to be assigned to SourceCode: + // *CloudFunction_SourceArchiveUrl + // *CloudFunction_SourceRepository + SourceCode isCloudFunction_SourceCode `protobuf_oneof:"source_code"` + // An event that triggers the function. + // + // Types that are valid to be assigned to Trigger: + // *CloudFunction_HttpsTrigger + // *CloudFunction_EventTrigger + Trigger isCloudFunction_Trigger `protobuf_oneof:"trigger"` + // Output only. Status of the function deployment. + Status CloudFunctionStatus `protobuf:"varint,7,opt,name=status,enum=google.cloud.functions.v1beta2.CloudFunctionStatus" json:"status,omitempty"` + // Output only. Name of the most recent operation modifying the function. If + // the function status is `DEPLOYING` or `DELETING`, then it points to the + // active operation. + LatestOperation string `protobuf:"bytes,8,opt,name=latest_operation,json=latestOperation" json:"latest_operation,omitempty"` + // The name of the function (as defined in source code) that will be + // executed. Defaults to the resource name suffix, if not specified. For + // backward compatibility, if function with given name is not found, then the + // system will try to use function named "function". + // For Node.js this is name of a function exported by the module specified + // in `source_location`. + EntryPoint string `protobuf:"bytes,9,opt,name=entry_point,json=entryPoint" json:"entry_point,omitempty"` + // The function execution timeout. Execution is considered failed and + // can be terminated if the function is not completed at the end of the + // timeout period. Defaults to 60 seconds. + Timeout *google_protobuf3.Duration `protobuf:"bytes,10,opt,name=timeout" json:"timeout,omitempty"` + // The amount of memory in MB available for a function. + // Defaults to 256MB. + AvailableMemoryMb int32 `protobuf:"varint,11,opt,name=available_memory_mb,json=availableMemoryMb" json:"available_memory_mb,omitempty"` + // Output only. The service account of the function. + ServiceAccount string `protobuf:"bytes,13,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // Output only. The last update timestamp of a Cloud Function. + UpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,15,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *CloudFunction) Reset() { *m = CloudFunction{} } +func (m *CloudFunction) String() string { return proto.CompactTextString(m) } +func (*CloudFunction) ProtoMessage() {} +func (*CloudFunction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isCloudFunction_SourceCode interface { + isCloudFunction_SourceCode() +} +type isCloudFunction_Trigger interface { + isCloudFunction_Trigger() +} + +type CloudFunction_SourceArchiveUrl struct { + SourceArchiveUrl string `protobuf:"bytes,14,opt,name=source_archive_url,json=sourceArchiveUrl,oneof"` +} +type CloudFunction_SourceRepository struct { + SourceRepository *SourceRepository `protobuf:"bytes,3,opt,name=source_repository,json=sourceRepository,oneof"` +} +type CloudFunction_HttpsTrigger struct { + HttpsTrigger *HTTPSTrigger `protobuf:"bytes,6,opt,name=https_trigger,json=httpsTrigger,oneof"` +} +type CloudFunction_EventTrigger struct { + EventTrigger *EventTrigger `protobuf:"bytes,12,opt,name=event_trigger,json=eventTrigger,oneof"` +} + +func (*CloudFunction_SourceArchiveUrl) isCloudFunction_SourceCode() {} +func (*CloudFunction_SourceRepository) isCloudFunction_SourceCode() {} +func (*CloudFunction_HttpsTrigger) isCloudFunction_Trigger() {} +func (*CloudFunction_EventTrigger) isCloudFunction_Trigger() {} + +func (m *CloudFunction) GetSourceCode() isCloudFunction_SourceCode { + if m != nil { + return m.SourceCode + } + return nil +} +func (m *CloudFunction) GetTrigger() isCloudFunction_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func (m *CloudFunction) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CloudFunction) GetSourceArchiveUrl() string { + if x, ok := m.GetSourceCode().(*CloudFunction_SourceArchiveUrl); ok { + return x.SourceArchiveUrl + } + return "" +} + +func (m *CloudFunction) GetSourceRepository() *SourceRepository { + if x, ok := m.GetSourceCode().(*CloudFunction_SourceRepository); ok { + return x.SourceRepository + } + return nil +} + +func (m *CloudFunction) GetHttpsTrigger() *HTTPSTrigger { + if x, ok := m.GetTrigger().(*CloudFunction_HttpsTrigger); ok { + return x.HttpsTrigger + } + return nil +} + +func (m *CloudFunction) GetEventTrigger() *EventTrigger { + if x, ok := m.GetTrigger().(*CloudFunction_EventTrigger); ok { + return x.EventTrigger + } + return nil +} + +func (m *CloudFunction) GetStatus() CloudFunctionStatus { + if m != nil { + return m.Status + } + return CloudFunctionStatus_STATUS_UNSPECIFIED +} + +func (m *CloudFunction) GetLatestOperation() string { + if m != nil { + return m.LatestOperation + } + return "" +} + +func (m *CloudFunction) GetEntryPoint() string { + if m != nil { + return m.EntryPoint + } + return "" +} + +func (m *CloudFunction) GetTimeout() *google_protobuf3.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *CloudFunction) GetAvailableMemoryMb() int32 { + if m != nil { + return m.AvailableMemoryMb + } + return 0 +} + +func (m *CloudFunction) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *CloudFunction) GetUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CloudFunction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CloudFunction_OneofMarshaler, _CloudFunction_OneofUnmarshaler, _CloudFunction_OneofSizer, []interface{}{ + (*CloudFunction_SourceArchiveUrl)(nil), + (*CloudFunction_SourceRepository)(nil), + (*CloudFunction_HttpsTrigger)(nil), + (*CloudFunction_EventTrigger)(nil), + } +} + +func _CloudFunction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CloudFunction) + // source_code + switch x := m.SourceCode.(type) { + case *CloudFunction_SourceArchiveUrl: + b.EncodeVarint(14<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SourceArchiveUrl) + case *CloudFunction_SourceRepository: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SourceRepository); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CloudFunction.SourceCode has unexpected type %T", x) + } + // trigger + switch x := m.Trigger.(type) { + case *CloudFunction_HttpsTrigger: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpsTrigger); err != nil { + return err + } + case *CloudFunction_EventTrigger: + b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EventTrigger); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CloudFunction.Trigger has unexpected type %T", x) + } + return nil +} + +func _CloudFunction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CloudFunction) + switch tag { + case 14: // source_code.source_archive_url + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.SourceCode = &CloudFunction_SourceArchiveUrl{x} + return true, err + case 3: // source_code.source_repository + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SourceRepository) + err := b.DecodeMessage(msg) + m.SourceCode = &CloudFunction_SourceRepository{msg} + return true, err + case 6: // trigger.https_trigger + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HTTPSTrigger) + err := b.DecodeMessage(msg) + m.Trigger = &CloudFunction_HttpsTrigger{msg} + return true, err + case 12: // trigger.event_trigger + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(EventTrigger) + err := b.DecodeMessage(msg) + m.Trigger = &CloudFunction_EventTrigger{msg} + return true, err + default: + return false, nil + } +} + +func _CloudFunction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CloudFunction) + // source_code + switch x := m.SourceCode.(type) { + case *CloudFunction_SourceArchiveUrl: + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.SourceArchiveUrl))) + n += len(x.SourceArchiveUrl) + case *CloudFunction_SourceRepository: + s := proto.Size(x.SourceRepository) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // trigger + switch x := m.Trigger.(type) { + case *CloudFunction_HttpsTrigger: + s := proto.Size(x.HttpsTrigger) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CloudFunction_EventTrigger: + s := proto.Size(x.EventTrigger) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Describes HTTPSTrigger, could be used to connect web hooks to function. +type HTTPSTrigger struct { + // Output only. The deployed url for the function. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` +} + +func (m *HTTPSTrigger) Reset() { *m = HTTPSTrigger{} } +func (m *HTTPSTrigger) String() string { return proto.CompactTextString(m) } +func (*HTTPSTrigger) ProtoMessage() {} +func (*HTTPSTrigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HTTPSTrigger) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// Describes EventTrigger, used to request events be sent from another +// service. +type EventTrigger struct { + // `event_type` names contain the service that is sending an event and the + // kind of event that was fired. Must be of the form + // `providers/*/eventTypes/*` e.g. Directly handle a Message published to + // Google Cloud Pub/Sub `providers/cloud.pubsub/eventTypes/topic.publish` + // + // Handle an object changing in Google Cloud Storage + // `providers/cloud.storage/eventTypes/object.change` + // + // Handle a write to the Firebase Realtime Database + // `providers/firebase.database/eventTypes/data.write` + EventType string `protobuf:"bytes,1,opt,name=event_type,json=eventType" json:"event_type,omitempty"` + // Which instance of the source's service should send events. E.g. for Pub/Sub + // this would be a Pub/Sub topic at `projects/*/topics/*`. For Google Cloud + // Storage this would be a bucket at `projects/*/buckets/*`. For any source + // that only supports one instance per-project, this should be the name of the + // project (`projects/*`) + Resource string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` +} + +func (m *EventTrigger) Reset() { *m = EventTrigger{} } +func (m *EventTrigger) String() string { return proto.CompactTextString(m) } +func (*EventTrigger) ProtoMessage() {} +func (*EventTrigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *EventTrigger) GetEventType() string { + if m != nil { + return m.EventType + } + return "" +} + +func (m *EventTrigger) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +// Describes the location of the function source in a remote repository. +type SourceRepository struct { + // URL to the hosted repository where the function is defined. Only paths in + // https://source.developers.google.com domain are supported. The path should + // contain the name of the repository. + RepositoryUrl string `protobuf:"bytes,1,opt,name=repository_url,json=repositoryUrl" json:"repository_url,omitempty"` + // The path within the repository where the function is defined. The path + // should point to the directory where Cloud Functions files are located. Use + // "/" if the function is defined directly in the root directory of a + // repository. + SourcePath string `protobuf:"bytes,2,opt,name=source_path,json=sourcePath" json:"source_path,omitempty"` + // The version of a function. Defaults to the latest version of the master + // branch. + // + // Types that are valid to be assigned to Version: + // *SourceRepository_Branch + // *SourceRepository_Tag + // *SourceRepository_Revision + Version isSourceRepository_Version `protobuf_oneof:"version"` + // Output only. The id of the revision that was resolved at the moment of + // function creation or update. For example when a user deployed from a + // branch, it will be the revision id of the latest change on this branch at + // that time. If user deployed from revision then this value will be always + // equal to the revision specified by the user. + DeployedRevision string `protobuf:"bytes,6,opt,name=deployed_revision,json=deployedRevision" json:"deployed_revision,omitempty"` +} + +func (m *SourceRepository) Reset() { *m = SourceRepository{} } +func (m *SourceRepository) String() string { return proto.CompactTextString(m) } +func (*SourceRepository) ProtoMessage() {} +func (*SourceRepository) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type isSourceRepository_Version interface { + isSourceRepository_Version() +} + +type SourceRepository_Branch struct { + Branch string `protobuf:"bytes,3,opt,name=branch,oneof"` +} +type SourceRepository_Tag struct { + Tag string `protobuf:"bytes,4,opt,name=tag,oneof"` +} +type SourceRepository_Revision struct { + Revision string `protobuf:"bytes,5,opt,name=revision,oneof"` +} + +func (*SourceRepository_Branch) isSourceRepository_Version() {} +func (*SourceRepository_Tag) isSourceRepository_Version() {} +func (*SourceRepository_Revision) isSourceRepository_Version() {} + +func (m *SourceRepository) GetVersion() isSourceRepository_Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *SourceRepository) GetRepositoryUrl() string { + if m != nil { + return m.RepositoryUrl + } + return "" +} + +func (m *SourceRepository) GetSourcePath() string { + if m != nil { + return m.SourcePath + } + return "" +} + +func (m *SourceRepository) GetBranch() string { + if x, ok := m.GetVersion().(*SourceRepository_Branch); ok { + return x.Branch + } + return "" +} + +func (m *SourceRepository) GetTag() string { + if x, ok := m.GetVersion().(*SourceRepository_Tag); ok { + return x.Tag + } + return "" +} + +func (m *SourceRepository) GetRevision() string { + if x, ok := m.GetVersion().(*SourceRepository_Revision); ok { + return x.Revision + } + return "" +} + +func (m *SourceRepository) GetDeployedRevision() string { + if m != nil { + return m.DeployedRevision + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SourceRepository) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SourceRepository_OneofMarshaler, _SourceRepository_OneofUnmarshaler, _SourceRepository_OneofSizer, []interface{}{ + (*SourceRepository_Branch)(nil), + (*SourceRepository_Tag)(nil), + (*SourceRepository_Revision)(nil), + } +} + +func _SourceRepository_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SourceRepository) + // version + switch x := m.Version.(type) { + case *SourceRepository_Branch: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Branch) + case *SourceRepository_Tag: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Tag) + case *SourceRepository_Revision: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Revision) + case nil: + default: + return fmt.Errorf("SourceRepository.Version has unexpected type %T", x) + } + return nil +} + +func _SourceRepository_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SourceRepository) + switch tag { + case 3: // version.branch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Version = &SourceRepository_Branch{x} + return true, err + case 4: // version.tag + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Version = &SourceRepository_Tag{x} + return true, err + case 5: // version.revision + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Version = &SourceRepository_Revision{x} + return true, err + default: + return false, nil + } +} + +func _SourceRepository_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SourceRepository) + // version + switch x := m.Version.(type) { + case *SourceRepository_Branch: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Branch))) + n += len(x.Branch) + case *SourceRepository_Tag: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Tag))) + n += len(x.Tag) + case *SourceRepository_Revision: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Revision))) + n += len(x.Revision) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request for the `CreateFunction` method. +type CreateFunctionRequest struct { + // The project and location in which the function should be created, specified + // in the format `projects/*/locations/*` + Location string `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // Function to be created. + Function *CloudFunction `protobuf:"bytes,2,opt,name=function" json:"function,omitempty"` +} + +func (m *CreateFunctionRequest) Reset() { *m = CreateFunctionRequest{} } +func (m *CreateFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateFunctionRequest) ProtoMessage() {} +func (*CreateFunctionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CreateFunctionRequest) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *CreateFunctionRequest) GetFunction() *CloudFunction { + if m != nil { + return m.Function + } + return nil +} + +// Request for the `UpdateFunction` method. +type UpdateFunctionRequest struct { + // The name of the function to be updated. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // New version of the function. + Function *CloudFunction `protobuf:"bytes,2,opt,name=function" json:"function,omitempty"` +} + +func (m *UpdateFunctionRequest) Reset() { *m = UpdateFunctionRequest{} } +func (m *UpdateFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateFunctionRequest) ProtoMessage() {} +func (*UpdateFunctionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *UpdateFunctionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateFunctionRequest) GetFunction() *CloudFunction { + if m != nil { + return m.Function + } + return nil +} + +// Request for the `GetFunction` method. +type GetFunctionRequest struct { + // The name of the function which details should be obtained. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetFunctionRequest) Reset() { *m = GetFunctionRequest{} } +func (m *GetFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*GetFunctionRequest) ProtoMessage() {} +func (*GetFunctionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GetFunctionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for the `ListFunctions` method. +type ListFunctionsRequest struct { + // The project and location from which the function should be listed, + // specified in the format `projects/*/locations/*` + // If you want to list functions in all locations, use "-" in place of a + // location. + Location string `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // Maximum number of functions to return per call. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last + // `ListFunctionsResponse`; indicates that + // this is a continuation of a prior `ListFunctions` call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListFunctionsRequest) Reset() { *m = ListFunctionsRequest{} } +func (m *ListFunctionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListFunctionsRequest) ProtoMessage() {} +func (*ListFunctionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListFunctionsRequest) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *ListFunctionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListFunctionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListFunctions` method. +type ListFunctionsResponse struct { + // The functions that match the request. + Functions []*CloudFunction `protobuf:"bytes,1,rep,name=functions" json:"functions,omitempty"` + // If not empty, indicates that there may be more functions that match + // the request; this value should be passed in a new + // [google.cloud.functions.v1beta2.ListFunctionsRequest][] + // to get more functions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListFunctionsResponse) Reset() { *m = ListFunctionsResponse{} } +func (m *ListFunctionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListFunctionsResponse) ProtoMessage() {} +func (*ListFunctionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListFunctionsResponse) GetFunctions() []*CloudFunction { + if m != nil { + return m.Functions + } + return nil +} + +func (m *ListFunctionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `DeleteFunction` method. +type DeleteFunctionRequest struct { + // The name of the function which should be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteFunctionRequest) Reset() { *m = DeleteFunctionRequest{} } +func (m *DeleteFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteFunctionRequest) ProtoMessage() {} +func (*DeleteFunctionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeleteFunctionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for the `CallFunction` method. +type CallFunctionRequest struct { + // The name of the function to be called. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input to be passed to the function. + Data string `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` +} + +func (m *CallFunctionRequest) Reset() { *m = CallFunctionRequest{} } +func (m *CallFunctionRequest) String() string { return proto.CompactTextString(m) } +func (*CallFunctionRequest) ProtoMessage() {} +func (*CallFunctionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CallFunctionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CallFunctionRequest) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +// Response of `CallFunction` method. +type CallFunctionResponse struct { + // Execution id of function invocation. + ExecutionId string `protobuf:"bytes,1,opt,name=execution_id,json=executionId" json:"execution_id,omitempty"` + // Result populated for successful execution of synchronous function. Will + // not be populated if function does not return a result through context. + Result string `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + // Either system or user-function generated error. Set if execution + // was not successful. + Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` +} + +func (m *CallFunctionResponse) Reset() { *m = CallFunctionResponse{} } +func (m *CallFunctionResponse) String() string { return proto.CompactTextString(m) } +func (*CallFunctionResponse) ProtoMessage() {} +func (*CallFunctionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CallFunctionResponse) GetExecutionId() string { + if m != nil { + return m.ExecutionId + } + return "" +} + +func (m *CallFunctionResponse) GetResult() string { + if m != nil { + return m.Result + } + return "" +} + +func (m *CallFunctionResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterType((*CloudFunction)(nil), "google.cloud.functions.v1beta2.CloudFunction") + proto.RegisterType((*HTTPSTrigger)(nil), "google.cloud.functions.v1beta2.HTTPSTrigger") + proto.RegisterType((*EventTrigger)(nil), "google.cloud.functions.v1beta2.EventTrigger") + proto.RegisterType((*SourceRepository)(nil), "google.cloud.functions.v1beta2.SourceRepository") + proto.RegisterType((*CreateFunctionRequest)(nil), "google.cloud.functions.v1beta2.CreateFunctionRequest") + proto.RegisterType((*UpdateFunctionRequest)(nil), "google.cloud.functions.v1beta2.UpdateFunctionRequest") + proto.RegisterType((*GetFunctionRequest)(nil), "google.cloud.functions.v1beta2.GetFunctionRequest") + proto.RegisterType((*ListFunctionsRequest)(nil), "google.cloud.functions.v1beta2.ListFunctionsRequest") + proto.RegisterType((*ListFunctionsResponse)(nil), "google.cloud.functions.v1beta2.ListFunctionsResponse") + proto.RegisterType((*DeleteFunctionRequest)(nil), "google.cloud.functions.v1beta2.DeleteFunctionRequest") + proto.RegisterType((*CallFunctionRequest)(nil), "google.cloud.functions.v1beta2.CallFunctionRequest") + proto.RegisterType((*CallFunctionResponse)(nil), "google.cloud.functions.v1beta2.CallFunctionResponse") + proto.RegisterEnum("google.cloud.functions.v1beta2.CloudFunctionStatus", CloudFunctionStatus_name, CloudFunctionStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CloudFunctionsService service + +type CloudFunctionsServiceClient interface { + // Returns a list of functions that belong to the requested project. + ListFunctions(ctx context.Context, in *ListFunctionsRequest, opts ...grpc.CallOption) (*ListFunctionsResponse, error) + // Returns a function with the given name from the requested project. + GetFunction(ctx context.Context, in *GetFunctionRequest, opts ...grpc.CallOption) (*CloudFunction, error) + // Creates a new function. If a function with the given name already exists in + // the specified project, the long running operation will return + // `ALREADY_EXISTS` error. + CreateFunction(ctx context.Context, in *CreateFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates existing function. + UpdateFunction(ctx context.Context, in *UpdateFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes a function with the given name from the specified project. If the + // given function is used by some trigger, the trigger will be updated to + // remove this function. + DeleteFunction(ctx context.Context, in *DeleteFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Invokes synchronously deployed function. To be used for testing, very + // limited traffic allowed. + CallFunction(ctx context.Context, in *CallFunctionRequest, opts ...grpc.CallOption) (*CallFunctionResponse, error) +} + +type cloudFunctionsServiceClient struct { + cc *grpc.ClientConn +} + +func NewCloudFunctionsServiceClient(cc *grpc.ClientConn) CloudFunctionsServiceClient { + return &cloudFunctionsServiceClient{cc} +} + +func (c *cloudFunctionsServiceClient) ListFunctions(ctx context.Context, in *ListFunctionsRequest, opts ...grpc.CallOption) (*ListFunctionsResponse, error) { + out := new(ListFunctionsResponse) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/ListFunctions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudFunctionsServiceClient) GetFunction(ctx context.Context, in *GetFunctionRequest, opts ...grpc.CallOption) (*CloudFunction, error) { + out := new(CloudFunction) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/GetFunction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudFunctionsServiceClient) CreateFunction(ctx context.Context, in *CreateFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/CreateFunction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudFunctionsServiceClient) UpdateFunction(ctx context.Context, in *UpdateFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/UpdateFunction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudFunctionsServiceClient) DeleteFunction(ctx context.Context, in *DeleteFunctionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/DeleteFunction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudFunctionsServiceClient) CallFunction(ctx context.Context, in *CallFunctionRequest, opts ...grpc.CallOption) (*CallFunctionResponse, error) { + out := new(CallFunctionResponse) + err := grpc.Invoke(ctx, "/google.cloud.functions.v1beta2.CloudFunctionsService/CallFunction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CloudFunctionsService service + +type CloudFunctionsServiceServer interface { + // Returns a list of functions that belong to the requested project. + ListFunctions(context.Context, *ListFunctionsRequest) (*ListFunctionsResponse, error) + // Returns a function with the given name from the requested project. + GetFunction(context.Context, *GetFunctionRequest) (*CloudFunction, error) + // Creates a new function. If a function with the given name already exists in + // the specified project, the long running operation will return + // `ALREADY_EXISTS` error. + CreateFunction(context.Context, *CreateFunctionRequest) (*google_longrunning.Operation, error) + // Updates existing function. + UpdateFunction(context.Context, *UpdateFunctionRequest) (*google_longrunning.Operation, error) + // Deletes a function with the given name from the specified project. If the + // given function is used by some trigger, the trigger will be updated to + // remove this function. + DeleteFunction(context.Context, *DeleteFunctionRequest) (*google_longrunning.Operation, error) + // Invokes synchronously deployed function. To be used for testing, very + // limited traffic allowed. + CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error) +} + +func RegisterCloudFunctionsServiceServer(s *grpc.Server, srv CloudFunctionsServiceServer) { + s.RegisterService(&_CloudFunctionsService_serviceDesc, srv) +} + +func _CloudFunctionsService_ListFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListFunctionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).ListFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/ListFunctions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).ListFunctions(ctx, req.(*ListFunctionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudFunctionsService_GetFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).GetFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/GetFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).GetFunction(ctx, req.(*GetFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudFunctionsService_CreateFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).CreateFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/CreateFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).CreateFunction(ctx, req.(*CreateFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudFunctionsService_UpdateFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).UpdateFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/UpdateFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).UpdateFunction(ctx, req.(*UpdateFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudFunctionsService_DeleteFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).DeleteFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/DeleteFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).DeleteFunction(ctx, req.(*DeleteFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudFunctionsService_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudFunctionsServiceServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.functions.v1beta2.CloudFunctionsService/CallFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudFunctionsServiceServer).CallFunction(ctx, req.(*CallFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CloudFunctionsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.functions.v1beta2.CloudFunctionsService", + HandlerType: (*CloudFunctionsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListFunctions", + Handler: _CloudFunctionsService_ListFunctions_Handler, + }, + { + MethodName: "GetFunction", + Handler: _CloudFunctionsService_GetFunction_Handler, + }, + { + MethodName: "CreateFunction", + Handler: _CloudFunctionsService_CreateFunction_Handler, + }, + { + MethodName: "UpdateFunction", + Handler: _CloudFunctionsService_UpdateFunction_Handler, + }, + { + MethodName: "DeleteFunction", + Handler: _CloudFunctionsService_DeleteFunction_Handler, + }, + { + MethodName: "CallFunction", + Handler: _CloudFunctionsService_CallFunction_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/functions/v1beta2/functions.proto", +} + +func init() { proto.RegisterFile("google/cloud/functions/v1beta2/functions.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x73, 0xdb, 0x44, + 0x18, 0xae, 0xf2, 0xe1, 0xc6, 0xaf, 0x3f, 0xe2, 0x6e, 0x9b, 0x8e, 0x30, 0x2d, 0x0d, 0x62, 0x80, + 0x90, 0x82, 0x0d, 0x6e, 0x80, 0x99, 0x7e, 0x30, 0x24, 0xb1, 0x93, 0x78, 0x9a, 0xb6, 0x1e, 0xd9, + 0x39, 0x94, 0x8b, 0x66, 0x2d, 0x6f, 0x15, 0x81, 0xac, 0x15, 0xab, 0x95, 0xa7, 0x29, 0x53, 0x0e, + 0xcc, 0x70, 0xe3, 0xc6, 0x3f, 0xe8, 0x99, 0xe1, 0x07, 0x30, 0xc3, 0x9d, 0x3b, 0x57, 0x86, 0x13, + 0x3f, 0x84, 0xd9, 0xd5, 0x4a, 0x96, 0x93, 0x14, 0x25, 0x19, 0x6e, 0xda, 0xe7, 0xfd, 0x7a, 0x76, + 0xdf, 0x67, 0x5f, 0xaf, 0xa1, 0xe1, 0x50, 0xea, 0x78, 0xa4, 0x69, 0x7b, 0x34, 0x1a, 0x35, 0x9f, + 0x45, 0xbe, 0xcd, 0x5d, 0xea, 0x87, 0xcd, 0xc9, 0x27, 0x43, 0xc2, 0x71, 0x6b, 0x8a, 0x34, 0x02, + 0x46, 0x39, 0x45, 0x6f, 0xc5, 0xfe, 0x0d, 0xe9, 0xdf, 0x98, 0x5a, 0x95, 0x7f, 0xfd, 0x86, 0xca, + 0x87, 0x03, 0xb7, 0x89, 0x7d, 0x9f, 0x72, 0x9c, 0x89, 0xae, 0xaf, 0x64, 0xad, 0x11, 0x3f, 0x54, + 0x70, 0x33, 0x87, 0x04, 0x0d, 0x08, 0x9b, 0xc9, 0xf3, 0x8e, 0x0a, 0xf0, 0xa8, 0xef, 0xb0, 0xc8, + 0xf7, 0x5d, 0xdf, 0x39, 0xe9, 0xa4, 0xa8, 0x36, 0xe5, 0x6a, 0x18, 0x3d, 0x6b, 0x8e, 0xa2, 0xd8, + 0x41, 0xd9, 0x6f, 0x1d, 0xb7, 0x73, 0x77, 0x4c, 0x42, 0x8e, 0xc7, 0x41, 0xec, 0x60, 0xfc, 0xb5, + 0x08, 0x95, 0x6d, 0x41, 0x69, 0x47, 0x31, 0x42, 0x08, 0x16, 0x7c, 0x3c, 0x26, 0xba, 0xb6, 0xaa, + 0xad, 0x15, 0x4d, 0xf9, 0x8d, 0x1a, 0x80, 0x42, 0x1a, 0x31, 0x9b, 0x58, 0x98, 0xd9, 0x87, 0xee, + 0x84, 0x58, 0x11, 0xf3, 0xf4, 0xaa, 0xf0, 0xd8, 0xbb, 0x64, 0xd6, 0x62, 0xdb, 0x66, 0x6c, 0x3a, + 0x60, 0x1e, 0xb2, 0xe0, 0x8a, 0xf2, 0x67, 0x24, 0xa0, 0xa1, 0xcb, 0x29, 0x3b, 0xd2, 0xe7, 0x57, + 0xb5, 0xb5, 0x52, 0xeb, 0xe3, 0xc6, 0x7f, 0x9f, 0x6e, 0xa3, 0x2f, 0x03, 0xcd, 0x34, 0x6e, 0x5a, + 0x60, 0x8a, 0xa1, 0x3e, 0x54, 0x0e, 0x39, 0x0f, 0x42, 0x8b, 0x33, 0xd7, 0x71, 0x08, 0xd3, 0x0b, + 0x32, 0xf9, 0x87, 0x79, 0xc9, 0xf7, 0x06, 0x83, 0x5e, 0x7f, 0x10, 0xc7, 0xec, 0x69, 0x66, 0x59, + 0x26, 0x51, 0x6b, 0x91, 0x94, 0x4c, 0x88, 0xcf, 0xd3, 0xa4, 0xe5, 0xb3, 0x25, 0xed, 0x88, 0xa0, + 0x4c, 0x52, 0x92, 0x59, 0xa3, 0x87, 0x50, 0x08, 0x39, 0xe6, 0x51, 0xa8, 0x5f, 0x5e, 0xd5, 0xd6, + 0xaa, 0xad, 0x3b, 0x79, 0xd9, 0x66, 0xba, 0xd1, 0x97, 0xa1, 0xa6, 0x4a, 0x81, 0x3e, 0x80, 0x9a, + 0x87, 0x39, 0x09, 0xb9, 0x95, 0x2a, 0x41, 0x5f, 0x92, 0x7d, 0x5a, 0x8e, 0xf1, 0x27, 0x09, 0x8c, + 0x6e, 0x41, 0x89, 0xf8, 0x9c, 0x1d, 0x59, 0x01, 0x75, 0x7d, 0xae, 0x17, 0xa5, 0x17, 0x48, 0xa8, + 0x27, 0x10, 0x74, 0x07, 0x2e, 0x0b, 0x31, 0xd0, 0x88, 0xeb, 0x20, 0xf7, 0xf9, 0x46, 0xc2, 0x2c, + 0x11, 0x4b, 0xa3, 0xad, 0xc4, 0x64, 0x26, 0x9e, 0xa8, 0x01, 0x57, 0xf1, 0x04, 0xbb, 0x1e, 0x1e, + 0x7a, 0xc4, 0x1a, 0x93, 0x31, 0x65, 0x47, 0xd6, 0x78, 0xa8, 0x97, 0x56, 0xb5, 0xb5, 0x45, 0xf3, + 0x4a, 0x6a, 0x7a, 0x24, 0x2d, 0x8f, 0x86, 0xe8, 0x7d, 0x58, 0x0e, 0x09, 0x9b, 0xb8, 0x42, 0x39, + 0xb6, 0x4d, 0x23, 0x9f, 0xeb, 0x15, 0xc9, 0xa4, 0xaa, 0xe0, 0xcd, 0x18, 0x45, 0xf7, 0xa0, 0x14, + 0x05, 0x23, 0xcc, 0x89, 0x25, 0x4a, 0xe9, 0xcb, 0x92, 0x51, 0xfd, 0x04, 0xa3, 0x41, 0x22, 0x5f, + 0x13, 0x62, 0x77, 0x01, 0x6c, 0x55, 0xa0, 0xa4, 0xe4, 0x66, 0xd3, 0x11, 0xd9, 0x2a, 0xc2, 0x65, + 0xd5, 0x41, 0x63, 0x15, 0xca, 0xd9, 0x96, 0xa3, 0x1a, 0xcc, 0x0b, 0xe5, 0xc6, 0xda, 0x16, 0x9f, + 0x46, 0x17, 0xca, 0xd9, 0xfe, 0xa1, 0x9b, 0x00, 0x4a, 0x04, 0x47, 0x41, 0x72, 0x09, 0x8a, 0x71, + 0x47, 0x8f, 0x02, 0x82, 0xea, 0xb0, 0xc4, 0x48, 0x5c, 0x4c, 0x9f, 0x93, 0xc6, 0x74, 0x6d, 0xfc, + 0xad, 0x41, 0xed, 0xb8, 0x7a, 0xd1, 0xbb, 0x50, 0x9d, 0xde, 0x01, 0x6b, 0x5a, 0xbc, 0x32, 0x45, + 0xc5, 0x8d, 0xb9, 0x95, 0x6e, 0x21, 0xc0, 0xfc, 0x50, 0xa5, 0x86, 0x18, 0xea, 0x61, 0x7e, 0x88, + 0x74, 0x28, 0x0c, 0x19, 0xf6, 0xed, 0x43, 0x79, 0x8f, 0xc4, 0xb5, 0x53, 0x6b, 0x84, 0x60, 0x9e, + 0x63, 0x47, 0x5f, 0x50, 0xb0, 0x58, 0xa0, 0x1b, 0x82, 0xe6, 0xc4, 0x0d, 0x85, 0x40, 0x16, 0x95, + 0x21, 0x45, 0xd0, 0x6d, 0xb8, 0x32, 0x22, 0x81, 0x47, 0x8f, 0xc8, 0xc8, 0x4a, 0xdd, 0x0a, 0xb2, + 0x64, 0x2d, 0x31, 0x98, 0x0a, 0x17, 0xa7, 0x39, 0x21, 0x4c, 0x7c, 0x1a, 0xdf, 0xc3, 0xca, 0x36, + 0x23, 0x98, 0x93, 0x44, 0x9e, 0x26, 0xf9, 0x36, 0x22, 0x21, 0x17, 0xa7, 0xe2, 0x51, 0x3b, 0xd6, + 0x63, 0xbc, 0xbd, 0x74, 0x8d, 0xba, 0xb0, 0x94, 0x88, 0x5c, 0x6e, 0xab, 0xd4, 0xfa, 0xe8, 0x5c, + 0x57, 0xc0, 0x4c, 0xc3, 0x8d, 0x09, 0xac, 0x1c, 0xc8, 0xae, 0x1f, 0xaf, 0x7f, 0xda, 0xcc, 0xfa, + 0x1f, 0xeb, 0xae, 0x01, 0xda, 0x25, 0xfc, 0x0c, 0x45, 0x0d, 0x1f, 0xae, 0xed, 0xbb, 0x61, 0xea, + 0x1a, 0x9e, 0xe5, 0x80, 0xde, 0x84, 0x62, 0x80, 0x1d, 0x62, 0x85, 0xee, 0x8b, 0x58, 0x53, 0x8b, + 0xe6, 0x92, 0x00, 0xfa, 0xee, 0x0b, 0x22, 0xe4, 0x28, 0x8d, 0x9c, 0x7e, 0x43, 0xfc, 0xb8, 0xf5, + 0xa6, 0x74, 0x1f, 0x08, 0xc0, 0xf8, 0x49, 0x83, 0x95, 0x63, 0x05, 0xc3, 0x80, 0xfa, 0x21, 0x41, + 0x0f, 0xa1, 0x98, 0x6e, 0x50, 0xd7, 0x56, 0xe7, 0xcf, 0xbf, 0xff, 0x69, 0x3c, 0x7a, 0x0f, 0x96, + 0x7d, 0xf2, 0x9c, 0x5b, 0x19, 0x2a, 0xb1, 0x42, 0x2b, 0x02, 0xee, 0xa5, 0x74, 0x6e, 0xc3, 0x4a, + 0x9b, 0x78, 0xe4, 0x4c, 0x0d, 0x32, 0x1e, 0xc0, 0xd5, 0x6d, 0xec, 0x79, 0x67, 0xe9, 0x25, 0x82, + 0x85, 0x11, 0xe6, 0x58, 0x15, 0x95, 0xdf, 0x86, 0x03, 0xd7, 0x66, 0xc3, 0xd5, 0xc6, 0xdf, 0x86, + 0x32, 0x79, 0x4e, 0xec, 0x48, 0x80, 0x96, 0x3b, 0x52, 0x79, 0x4a, 0x29, 0xd6, 0x1d, 0xa1, 0xeb, + 0x50, 0x60, 0x24, 0x8c, 0x3c, 0xae, 0x12, 0xaa, 0x15, 0xba, 0x06, 0x8b, 0x84, 0x31, 0xca, 0xd4, + 0x39, 0xc7, 0x8b, 0x75, 0x0c, 0x57, 0x4f, 0x99, 0xc9, 0xe8, 0x3a, 0xa0, 0xfe, 0x60, 0x73, 0x70, + 0xd0, 0xb7, 0x0e, 0x1e, 0xf7, 0x7b, 0x9d, 0xed, 0xee, 0x4e, 0xb7, 0xd3, 0xae, 0x5d, 0x42, 0x45, + 0x58, 0x34, 0x3b, 0x9b, 0xed, 0xa7, 0x35, 0x0d, 0x01, 0x14, 0x76, 0x36, 0xbb, 0xfb, 0x9d, 0x76, + 0x6d, 0x0e, 0x55, 0xa0, 0xd8, 0xee, 0xf4, 0xf6, 0x9f, 0x3c, 0xed, 0x3e, 0xde, 0xad, 0xcd, 0xa3, + 0x32, 0x2c, 0xb5, 0x3b, 0xfb, 0x9d, 0x81, 0x58, 0x2d, 0xb4, 0xfe, 0x58, 0x82, 0x95, 0x99, 0x1a, + 0x61, 0x3f, 0x1e, 0x8f, 0xe8, 0x37, 0x0d, 0x2a, 0x33, 0x0d, 0x46, 0x1b, 0x79, 0x5d, 0x3c, 0x4d, + 0x80, 0xf5, 0x4f, 0xcf, 0x19, 0x15, 0x1f, 0xa6, 0x71, 0xff, 0x87, 0x3f, 0xff, 0xf9, 0x79, 0xee, + 0x33, 0xb4, 0x91, 0xbe, 0x53, 0xbe, 0x4b, 0x74, 0xfb, 0x20, 0x60, 0xf4, 0x6b, 0x62, 0xf3, 0xb0, + 0xb9, 0xde, 0x4c, 0xb0, 0xb0, 0xb9, 0xfe, 0x72, 0xfa, 0xb6, 0x41, 0xbf, 0x68, 0x50, 0xca, 0x5c, + 0x1c, 0xd4, 0xca, 0x23, 0x71, 0xf2, 0x96, 0xd5, 0xcf, 0x27, 0x5a, 0xe3, 0xae, 0x24, 0xbc, 0x81, + 0x5a, 0x53, 0xc2, 0x42, 0x41, 0xaf, 0x21, 0x9b, 0x79, 0x87, 0xad, 0xbf, 0x44, 0xbf, 0x6a, 0x50, + 0x9d, 0x9d, 0x6f, 0x28, 0xf7, 0xd8, 0x4e, 0x9d, 0x87, 0xf5, 0x9b, 0x49, 0x58, 0xe6, 0xf1, 0xd6, + 0x48, 0x7f, 0x9b, 0x8d, 0x1d, 0x49, 0xf2, 0x4b, 0xe3, 0x42, 0xa7, 0x7a, 0x37, 0x9d, 0x4b, 0xe2, + 0x7c, 0xab, 0xb3, 0x03, 0x31, 0x9f, 0xf0, 0xa9, 0x03, 0x34, 0x8f, 0x70, 0x5b, 0x12, 0xfe, 0xa2, + 0x7e, 0x81, 0x53, 0xcd, 0xd0, 0x7d, 0xa5, 0x41, 0x75, 0x76, 0x3c, 0xe4, 0xd3, 0x3d, 0x75, 0x9c, + 0xe4, 0xd1, 0x55, 0x22, 0x58, 0xbf, 0x88, 0x08, 0x7e, 0xd7, 0xa0, 0x9c, 0x9d, 0x2b, 0x28, 0xff, + 0xc1, 0x76, 0x72, 0x88, 0xd5, 0x37, 0xce, 0x17, 0xa4, 0x6e, 0xdb, 0x96, 0xe4, 0x7d, 0xdf, 0xf8, + 0xfc, 0x02, 0xc7, 0x6c, 0x63, 0xcf, 0xbb, 0xab, 0xad, 0x6f, 0xfd, 0xa8, 0x81, 0x61, 0xd3, 0x71, + 0x4e, 0xfd, 0xad, 0x6a, 0x7a, 0xd7, 0x7b, 0xe2, 0x6d, 0xd5, 0xd3, 0xbe, 0xda, 0x55, 0x11, 0x0e, + 0xf5, 0xb0, 0xef, 0x34, 0x28, 0x73, 0x9a, 0x0e, 0xf1, 0xe5, 0xcb, 0x4b, 0xfd, 0x77, 0xc1, 0x81, + 0x1b, 0xbe, 0xee, 0xff, 0xcb, 0xbd, 0x14, 0x79, 0x35, 0x37, 0xbf, 0xbb, 0xbd, 0x33, 0x2c, 0xc8, + 0xc8, 0x3b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x22, 0xa1, 0xda, 0x9f, 0x7d, 0x0d, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/operations.pb.go new file mode 100644 index 000000000..dee76e0c0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/functions/v1beta2/operations.pb.go @@ -0,0 +1,116 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/functions/v1beta2/operations.proto + +package functions + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A type of an operation. +type OperationType int32 + +const ( + // Unknown operation type. + OperationType_OPERATION_UNSPECIFIED OperationType = 0 + // Triggered by CreateFunction call + OperationType_CREATE_FUNCTION OperationType = 1 + // Triggered by UpdateFunction call + OperationType_UPDATE_FUNCTION OperationType = 2 + // Triggered by DeleteFunction call. + OperationType_DELETE_FUNCTION OperationType = 3 +) + +var OperationType_name = map[int32]string{ + 0: "OPERATION_UNSPECIFIED", + 1: "CREATE_FUNCTION", + 2: "UPDATE_FUNCTION", + 3: "DELETE_FUNCTION", +} +var OperationType_value = map[string]int32{ + "OPERATION_UNSPECIFIED": 0, + "CREATE_FUNCTION": 1, + "UPDATE_FUNCTION": 2, + "DELETE_FUNCTION": 3, +} + +func (x OperationType) String() string { + return proto.EnumName(OperationType_name, int32(x)) +} +func (OperationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Metadata describing an [Operation][google.longrunning.Operation] +type OperationMetadataV1Beta2 struct { + // Target of the operation - for example + // projects/project-1/locations/region-1/functions/function-1 + Target string `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` + // Type of operation. + Type OperationType `protobuf:"varint,2,opt,name=type,enum=google.cloud.functions.v1beta2.OperationType" json:"type,omitempty"` + // The original request that started the operation. + Request *google_protobuf1.Any `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` +} + +func (m *OperationMetadataV1Beta2) Reset() { *m = OperationMetadataV1Beta2{} } +func (m *OperationMetadataV1Beta2) String() string { return proto.CompactTextString(m) } +func (*OperationMetadataV1Beta2) ProtoMessage() {} +func (*OperationMetadataV1Beta2) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *OperationMetadataV1Beta2) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +func (m *OperationMetadataV1Beta2) GetType() OperationType { + if m != nil { + return m.Type + } + return OperationType_OPERATION_UNSPECIFIED +} + +func (m *OperationMetadataV1Beta2) GetRequest() *google_protobuf1.Any { + if m != nil { + return m.Request + } + return nil +} + +func init() { + proto.RegisterType((*OperationMetadataV1Beta2)(nil), "google.cloud.functions.v1beta2.OperationMetadataV1Beta2") + proto.RegisterEnum("google.cloud.functions.v1beta2.OperationType", OperationType_name, OperationType_value) +} + +func init() { proto.RegisterFile("google/cloud/functions/v1beta2/operations.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x4f, 0x4f, 0xf2, 0x30, + 0x1c, 0xc7, 0x9f, 0xc2, 0x13, 0x8c, 0x35, 0x2a, 0x99, 0x7f, 0x32, 0x88, 0x31, 0x84, 0x13, 0x31, + 0xb1, 0x0d, 0x78, 0xf4, 0x34, 0xa0, 0x18, 0x12, 0x85, 0x65, 0x82, 0x07, 0x2f, 0xa4, 0x40, 0x69, + 0x96, 0xcc, 0xb6, 0x6e, 0x9d, 0xc9, 0x5e, 0x82, 0x2f, 0xc4, 0xf7, 0x69, 0x56, 0xba, 0x05, 0x0e, + 0xea, 0xb1, 0x9f, 0xf6, 0xf3, 0xed, 0xf7, 0x97, 0x1f, 0xc4, 0x5c, 0x4a, 0x1e, 0x31, 0xbc, 0x8a, + 0x64, 0xba, 0xc6, 0x9b, 0x54, 0xac, 0x74, 0x28, 0x45, 0x82, 0x3f, 0xba, 0x4b, 0xa6, 0x69, 0x0f, + 0x4b, 0xc5, 0x62, 0x6a, 0x10, 0x52, 0xb1, 0xd4, 0xd2, 0xb9, 0xde, 0x0a, 0xc8, 0x08, 0xa8, 0x14, + 0x90, 0x15, 0x9a, 0x57, 0x36, 0x90, 0xaa, 0x10, 0x53, 0x21, 0xa4, 0xde, 0xb5, 0x9b, 0x0d, 0x7b, + 0x6b, 0x4e, 0xcb, 0x74, 0x83, 0xa9, 0xc8, 0xb6, 0x57, 0xed, 0x2f, 0x00, 0xdd, 0x69, 0xf1, 0xdb, + 0x13, 0xd3, 0x74, 0x4d, 0x35, 0x7d, 0xe9, 0xf6, 0xf3, 0x54, 0xe7, 0x12, 0xd6, 0x34, 0x8d, 0x39, + 0xd3, 0x2e, 0x68, 0x81, 0xce, 0x61, 0x60, 0x4f, 0x8e, 0x07, 0xff, 0xeb, 0x4c, 0x31, 0xb7, 0xd2, + 0x02, 0x9d, 0x93, 0xde, 0x2d, 0xfa, 0xbd, 0x1c, 0x2a, 0xf3, 0x67, 0x99, 0x62, 0x81, 0x51, 0x1d, + 0x04, 0x0f, 0x62, 0xf6, 0x9e, 0xb2, 0x44, 0xbb, 0xd5, 0x16, 0xe8, 0x1c, 0xf5, 0xce, 0x8b, 0x94, + 0xa2, 0x24, 0xf2, 0x44, 0x16, 0x14, 0x8f, 0x6e, 0x42, 0x78, 0xbc, 0x17, 0xe3, 0x34, 0xe0, 0xc5, + 0xd4, 0x27, 0x81, 0x37, 0x1b, 0x4f, 0x27, 0x8b, 0xf9, 0xe4, 0xd9, 0x27, 0x83, 0xf1, 0x68, 0x4c, + 0x86, 0xf5, 0x7f, 0xce, 0x19, 0x3c, 0x1d, 0x04, 0xc4, 0x9b, 0x91, 0xc5, 0x68, 0x3e, 0x19, 0xe4, + 0x0f, 0xea, 0x20, 0x87, 0x73, 0x7f, 0xb8, 0x07, 0x2b, 0x39, 0x1c, 0x92, 0x47, 0xb2, 0x0b, 0xab, + 0xfd, 0x4f, 0x00, 0xdb, 0x2b, 0xf9, 0xf6, 0xc7, 0x54, 0x7d, 0x77, 0x54, 0xa0, 0xb2, 0x58, 0xe2, + 0xe7, 0xdd, 0x7d, 0xf0, 0xfa, 0x60, 0x5d, 0x2e, 0x23, 0x2a, 0x38, 0x92, 0x31, 0xc7, 0x9c, 0x09, + 0x33, 0x99, 0x5d, 0x3d, 0x55, 0x61, 0xf2, 0xd3, 0xfa, 0xef, 0x4b, 0xb2, 0xac, 0x19, 0xe7, 0xee, + 0x3b, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x8a, 0xb1, 0x83, 0x31, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/language/v1/language_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1/language_service.pb.go new file mode 100644 index 000000000..241bc0df9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1/language_service.pb.go @@ -0,0 +1,2460 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/language/v1/language_service.proto + +/* +Package language is a generated protocol buffer package. + +It is generated from these files: + google/cloud/language/v1/language_service.proto + +It has these top-level messages: + Document + Sentence + Entity + Token + Sentiment + PartOfSpeech + DependencyEdge + EntityMention + TextSpan + AnalyzeSentimentRequest + AnalyzeSentimentResponse + AnalyzeEntitySentimentRequest + AnalyzeEntitySentimentResponse + AnalyzeEntitiesRequest + AnalyzeEntitiesResponse + AnalyzeSyntaxRequest + AnalyzeSyntaxResponse + AnnotateTextRequest + AnnotateTextResponse +*/ +package language + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents the text encoding that the caller uses to process the output. +// Providing an `EncodingType` is recommended because the API provides the +// beginning offsets for various outputs, such as tokens and mentions, and +// languages that natively use different text encodings may access offsets +// differently. +type EncodingType int32 + +const ( + // If `EncodingType` is not specified, encoding-dependent information (such as + // `begin_offset`) will be set at `-1`. + EncodingType_NONE EncodingType = 0 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-8 encoding of the input. C++ and Go are examples of languages + // that use this encoding natively. + EncodingType_UTF8 EncodingType = 1 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-16 encoding of the input. Java and Javascript are examples of + // languages that use this encoding natively. + EncodingType_UTF16 EncodingType = 2 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-32 encoding of the input. Python is an example of a language + // that uses this encoding natively. + EncodingType_UTF32 EncodingType = 3 +) + +var EncodingType_name = map[int32]string{ + 0: "NONE", + 1: "UTF8", + 2: "UTF16", + 3: "UTF32", +} +var EncodingType_value = map[string]int32{ + "NONE": 0, + "UTF8": 1, + "UTF16": 2, + "UTF32": 3, +} + +func (x EncodingType) String() string { + return proto.EnumName(EncodingType_name, int32(x)) +} +func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The document types enum. +type Document_Type int32 + +const ( + // The content type is not specified. + Document_TYPE_UNSPECIFIED Document_Type = 0 + // Plain text + Document_PLAIN_TEXT Document_Type = 1 + // HTML + Document_HTML Document_Type = 2 +) + +var Document_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "PLAIN_TEXT", + 2: "HTML", +} +var Document_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "PLAIN_TEXT": 1, + "HTML": 2, +} + +func (x Document_Type) String() string { + return proto.EnumName(Document_Type_name, int32(x)) +} +func (Document_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// The type of the entity. +type Entity_Type int32 + +const ( + // Unknown + Entity_UNKNOWN Entity_Type = 0 + // Person + Entity_PERSON Entity_Type = 1 + // Location + Entity_LOCATION Entity_Type = 2 + // Organization + Entity_ORGANIZATION Entity_Type = 3 + // Event + Entity_EVENT Entity_Type = 4 + // Work of art + Entity_WORK_OF_ART Entity_Type = 5 + // Consumer goods + Entity_CONSUMER_GOOD Entity_Type = 6 + // Other types + Entity_OTHER Entity_Type = 7 +) + +var Entity_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PERSON", + 2: "LOCATION", + 3: "ORGANIZATION", + 4: "EVENT", + 5: "WORK_OF_ART", + 6: "CONSUMER_GOOD", + 7: "OTHER", +} +var Entity_Type_value = map[string]int32{ + "UNKNOWN": 0, + "PERSON": 1, + "LOCATION": 2, + "ORGANIZATION": 3, + "EVENT": 4, + "WORK_OF_ART": 5, + "CONSUMER_GOOD": 6, + "OTHER": 7, +} + +func (x Entity_Type) String() string { + return proto.EnumName(Entity_Type_name, int32(x)) +} +func (Entity_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// The part of speech tags enum. +type PartOfSpeech_Tag int32 + +const ( + // Unknown + PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0 + // Adjective + PartOfSpeech_ADJ PartOfSpeech_Tag = 1 + // Adposition (preposition and postposition) + PartOfSpeech_ADP PartOfSpeech_Tag = 2 + // Adverb + PartOfSpeech_ADV PartOfSpeech_Tag = 3 + // Conjunction + PartOfSpeech_CONJ PartOfSpeech_Tag = 4 + // Determiner + PartOfSpeech_DET PartOfSpeech_Tag = 5 + // Noun (common and proper) + PartOfSpeech_NOUN PartOfSpeech_Tag = 6 + // Cardinal number + PartOfSpeech_NUM PartOfSpeech_Tag = 7 + // Pronoun + PartOfSpeech_PRON PartOfSpeech_Tag = 8 + // Particle or other function word + PartOfSpeech_PRT PartOfSpeech_Tag = 9 + // Punctuation + PartOfSpeech_PUNCT PartOfSpeech_Tag = 10 + // Verb (all tenses and modes) + PartOfSpeech_VERB PartOfSpeech_Tag = 11 + // Other: foreign words, typos, abbreviations + PartOfSpeech_X PartOfSpeech_Tag = 12 + // Affix + PartOfSpeech_AFFIX PartOfSpeech_Tag = 13 +) + +var PartOfSpeech_Tag_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ADJ", + 2: "ADP", + 3: "ADV", + 4: "CONJ", + 5: "DET", + 6: "NOUN", + 7: "NUM", + 8: "PRON", + 9: "PRT", + 10: "PUNCT", + 11: "VERB", + 12: "X", + 13: "AFFIX", +} +var PartOfSpeech_Tag_value = map[string]int32{ + "UNKNOWN": 0, + "ADJ": 1, + "ADP": 2, + "ADV": 3, + "CONJ": 4, + "DET": 5, + "NOUN": 6, + "NUM": 7, + "PRON": 8, + "PRT": 9, + "PUNCT": 10, + "VERB": 11, + "X": 12, + "AFFIX": 13, +} + +func (x PartOfSpeech_Tag) String() string { + return proto.EnumName(PartOfSpeech_Tag_name, int32(x)) +} +func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} } + +// The characteristic of a verb that expresses time flow during an event. +type PartOfSpeech_Aspect int32 + +const ( + // Aspect is not applicable in the analyzed language or is not predicted. + PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0 + // Perfective + PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1 + // Imperfective + PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2 + // Progressive + PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3 +) + +var PartOfSpeech_Aspect_name = map[int32]string{ + 0: "ASPECT_UNKNOWN", + 1: "PERFECTIVE", + 2: "IMPERFECTIVE", + 3: "PROGRESSIVE", +} +var PartOfSpeech_Aspect_value = map[string]int32{ + "ASPECT_UNKNOWN": 0, + "PERFECTIVE": 1, + "IMPERFECTIVE": 2, + "PROGRESSIVE": 3, +} + +func (x PartOfSpeech_Aspect) String() string { + return proto.EnumName(PartOfSpeech_Aspect_name, int32(x)) +} +func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} } + +// The grammatical function performed by a noun or pronoun in a phrase, +// clause, or sentence. In some languages, other parts of speech, such as +// adjective and determiner, take case inflection in agreement with the noun. +type PartOfSpeech_Case int32 + +const ( + // Case is not applicable in the analyzed language or is not predicted. + PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0 + // Accusative + PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1 + // Adverbial + PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2 + // Complementive + PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3 + // Dative + PartOfSpeech_DATIVE PartOfSpeech_Case = 4 + // Genitive + PartOfSpeech_GENITIVE PartOfSpeech_Case = 5 + // Instrumental + PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6 + // Locative + PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7 + // Nominative + PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8 + // Oblique + PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9 + // Partitive + PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10 + // Prepositional + PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11 + // Reflexive + PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12 + // Relative + PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13 + // Vocative + PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14 +) + +var PartOfSpeech_Case_name = map[int32]string{ + 0: "CASE_UNKNOWN", + 1: "ACCUSATIVE", + 2: "ADVERBIAL", + 3: "COMPLEMENTIVE", + 4: "DATIVE", + 5: "GENITIVE", + 6: "INSTRUMENTAL", + 7: "LOCATIVE", + 8: "NOMINATIVE", + 9: "OBLIQUE", + 10: "PARTITIVE", + 11: "PREPOSITIONAL", + 12: "REFLEXIVE_CASE", + 13: "RELATIVE_CASE", + 14: "VOCATIVE", +} +var PartOfSpeech_Case_value = map[string]int32{ + "CASE_UNKNOWN": 0, + "ACCUSATIVE": 1, + "ADVERBIAL": 2, + "COMPLEMENTIVE": 3, + "DATIVE": 4, + "GENITIVE": 5, + "INSTRUMENTAL": 6, + "LOCATIVE": 7, + "NOMINATIVE": 8, + "OBLIQUE": 9, + "PARTITIVE": 10, + "PREPOSITIONAL": 11, + "REFLEXIVE_CASE": 12, + "RELATIVE_CASE": 13, + "VOCATIVE": 14, +} + +func (x PartOfSpeech_Case) String() string { + return proto.EnumName(PartOfSpeech_Case_name, int32(x)) +} +func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 2} } + +// Depending on the language, Form can be categorizing different forms of +// verbs, adjectives, adverbs, etc. For example, categorizing inflected +// endings of verbs and adjectives or distinguishing between short and long +// forms of adjectives and participles +type PartOfSpeech_Form int32 + +const ( + // Form is not applicable in the analyzed language or is not predicted. + PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0 + // Adnomial + PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1 + // Auxiliary + PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2 + // Complementizer + PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3 + // Final ending + PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4 + // Gerund + PartOfSpeech_GERUND PartOfSpeech_Form = 5 + // Realis + PartOfSpeech_REALIS PartOfSpeech_Form = 6 + // Irrealis + PartOfSpeech_IRREALIS PartOfSpeech_Form = 7 + // Short form + PartOfSpeech_SHORT PartOfSpeech_Form = 8 + // Long form + PartOfSpeech_LONG PartOfSpeech_Form = 9 + // Order form + PartOfSpeech_ORDER PartOfSpeech_Form = 10 + // Specific form + PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11 +) + +var PartOfSpeech_Form_name = map[int32]string{ + 0: "FORM_UNKNOWN", + 1: "ADNOMIAL", + 2: "AUXILIARY", + 3: "COMPLEMENTIZER", + 4: "FINAL_ENDING", + 5: "GERUND", + 6: "REALIS", + 7: "IRREALIS", + 8: "SHORT", + 9: "LONG", + 10: "ORDER", + 11: "SPECIFIC", +} +var PartOfSpeech_Form_value = map[string]int32{ + "FORM_UNKNOWN": 0, + "ADNOMIAL": 1, + "AUXILIARY": 2, + "COMPLEMENTIZER": 3, + "FINAL_ENDING": 4, + "GERUND": 5, + "REALIS": 6, + "IRREALIS": 7, + "SHORT": 8, + "LONG": 9, + "ORDER": 10, + "SPECIFIC": 11, +} + +func (x PartOfSpeech_Form) String() string { + return proto.EnumName(PartOfSpeech_Form_name, int32(x)) +} +func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 3} } + +// Gender classes of nouns reflected in the behaviour of associated words. +type PartOfSpeech_Gender int32 + +const ( + // Gender is not applicable in the analyzed language or is not predicted. + PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0 + // Feminine + PartOfSpeech_FEMININE PartOfSpeech_Gender = 1 + // Masculine + PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2 + // Neuter + PartOfSpeech_NEUTER PartOfSpeech_Gender = 3 +) + +var PartOfSpeech_Gender_name = map[int32]string{ + 0: "GENDER_UNKNOWN", + 1: "FEMININE", + 2: "MASCULINE", + 3: "NEUTER", +} +var PartOfSpeech_Gender_value = map[string]int32{ + "GENDER_UNKNOWN": 0, + "FEMININE": 1, + "MASCULINE": 2, + "NEUTER": 3, +} + +func (x PartOfSpeech_Gender) String() string { + return proto.EnumName(PartOfSpeech_Gender_name, int32(x)) +} +func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 4} } + +// The grammatical feature of verbs, used for showing modality and attitude. +type PartOfSpeech_Mood int32 + +const ( + // Mood is not applicable in the analyzed language or is not predicted. + PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0 + // Conditional + PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1 + // Imperative + PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2 + // Indicative + PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3 + // Interrogative + PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4 + // Jussive + PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5 + // Subjunctive + PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6 +) + +var PartOfSpeech_Mood_name = map[int32]string{ + 0: "MOOD_UNKNOWN", + 1: "CONDITIONAL_MOOD", + 2: "IMPERATIVE", + 3: "INDICATIVE", + 4: "INTERROGATIVE", + 5: "JUSSIVE", + 6: "SUBJUNCTIVE", +} +var PartOfSpeech_Mood_value = map[string]int32{ + "MOOD_UNKNOWN": 0, + "CONDITIONAL_MOOD": 1, + "IMPERATIVE": 2, + "INDICATIVE": 3, + "INTERROGATIVE": 4, + "JUSSIVE": 5, + "SUBJUNCTIVE": 6, +} + +func (x PartOfSpeech_Mood) String() string { + return proto.EnumName(PartOfSpeech_Mood_name, int32(x)) +} +func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 5} } + +// Count distinctions. +type PartOfSpeech_Number int32 + +const ( + // Number is not applicable in the analyzed language or is not predicted. + PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0 + // Singular + PartOfSpeech_SINGULAR PartOfSpeech_Number = 1 + // Plural + PartOfSpeech_PLURAL PartOfSpeech_Number = 2 + // Dual + PartOfSpeech_DUAL PartOfSpeech_Number = 3 +) + +var PartOfSpeech_Number_name = map[int32]string{ + 0: "NUMBER_UNKNOWN", + 1: "SINGULAR", + 2: "PLURAL", + 3: "DUAL", +} +var PartOfSpeech_Number_value = map[string]int32{ + "NUMBER_UNKNOWN": 0, + "SINGULAR": 1, + "PLURAL": 2, + "DUAL": 3, +} + +func (x PartOfSpeech_Number) String() string { + return proto.EnumName(PartOfSpeech_Number_name, int32(x)) +} +func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 6} } + +// The distinction between the speaker, second person, third person, etc. +type PartOfSpeech_Person int32 + +const ( + // Person is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0 + // First + PartOfSpeech_FIRST PartOfSpeech_Person = 1 + // Second + PartOfSpeech_SECOND PartOfSpeech_Person = 2 + // Third + PartOfSpeech_THIRD PartOfSpeech_Person = 3 + // Reflexive + PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4 +) + +var PartOfSpeech_Person_name = map[int32]string{ + 0: "PERSON_UNKNOWN", + 1: "FIRST", + 2: "SECOND", + 3: "THIRD", + 4: "REFLEXIVE_PERSON", +} +var PartOfSpeech_Person_value = map[string]int32{ + "PERSON_UNKNOWN": 0, + "FIRST": 1, + "SECOND": 2, + "THIRD": 3, + "REFLEXIVE_PERSON": 4, +} + +func (x PartOfSpeech_Person) String() string { + return proto.EnumName(PartOfSpeech_Person_name, int32(x)) +} +func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 7} } + +// This category shows if the token is part of a proper name. +type PartOfSpeech_Proper int32 + +const ( + // Proper is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0 + // Proper + PartOfSpeech_PROPER PartOfSpeech_Proper = 1 + // Not proper + PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2 +) + +var PartOfSpeech_Proper_name = map[int32]string{ + 0: "PROPER_UNKNOWN", + 1: "PROPER", + 2: "NOT_PROPER", +} +var PartOfSpeech_Proper_value = map[string]int32{ + "PROPER_UNKNOWN": 0, + "PROPER": 1, + "NOT_PROPER": 2, +} + +func (x PartOfSpeech_Proper) String() string { + return proto.EnumName(PartOfSpeech_Proper_name, int32(x)) +} +func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 8} } + +// Reciprocal features of a pronoun. +type PartOfSpeech_Reciprocity int32 + +const ( + // Reciprocity is not applicable in the analyzed language or is not + // predicted. + PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0 + // Reciprocal + PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1 + // Non-reciprocal + PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2 +) + +var PartOfSpeech_Reciprocity_name = map[int32]string{ + 0: "RECIPROCITY_UNKNOWN", + 1: "RECIPROCAL", + 2: "NON_RECIPROCAL", +} +var PartOfSpeech_Reciprocity_value = map[string]int32{ + "RECIPROCITY_UNKNOWN": 0, + "RECIPROCAL": 1, + "NON_RECIPROCAL": 2, +} + +func (x PartOfSpeech_Reciprocity) String() string { + return proto.EnumName(PartOfSpeech_Reciprocity_name, int32(x)) +} +func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 9} } + +// Time reference. +type PartOfSpeech_Tense int32 + +const ( + // Tense is not applicable in the analyzed language or is not predicted. + PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0 + // Conditional + PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1 + // Future + PartOfSpeech_FUTURE PartOfSpeech_Tense = 2 + // Past + PartOfSpeech_PAST PartOfSpeech_Tense = 3 + // Present + PartOfSpeech_PRESENT PartOfSpeech_Tense = 4 + // Imperfect + PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5 + // Pluperfect + PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6 +) + +var PartOfSpeech_Tense_name = map[int32]string{ + 0: "TENSE_UNKNOWN", + 1: "CONDITIONAL_TENSE", + 2: "FUTURE", + 3: "PAST", + 4: "PRESENT", + 5: "IMPERFECT", + 6: "PLUPERFECT", +} +var PartOfSpeech_Tense_value = map[string]int32{ + "TENSE_UNKNOWN": 0, + "CONDITIONAL_TENSE": 1, + "FUTURE": 2, + "PAST": 3, + "PRESENT": 4, + "IMPERFECT": 5, + "PLUPERFECT": 6, +} + +func (x PartOfSpeech_Tense) String() string { + return proto.EnumName(PartOfSpeech_Tense_name, int32(x)) +} +func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 10} } + +// The relationship between the action that a verb expresses and the +// participants identified by its arguments. +type PartOfSpeech_Voice int32 + +const ( + // Voice is not applicable in the analyzed language or is not predicted. + PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0 + // Active + PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1 + // Causative + PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2 + // Passive + PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3 +) + +var PartOfSpeech_Voice_name = map[int32]string{ + 0: "VOICE_UNKNOWN", + 1: "ACTIVE", + 2: "CAUSATIVE", + 3: "PASSIVE", +} +var PartOfSpeech_Voice_value = map[string]int32{ + "VOICE_UNKNOWN": 0, + "ACTIVE": 1, + "CAUSATIVE": 2, + "PASSIVE": 3, +} + +func (x PartOfSpeech_Voice) String() string { + return proto.EnumName(PartOfSpeech_Voice_name, int32(x)) +} +func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 11} } + +// The parse label enum for the token. +type DependencyEdge_Label int32 + +const ( + // Unknown + DependencyEdge_UNKNOWN DependencyEdge_Label = 0 + // Abbreviation modifier + DependencyEdge_ABBREV DependencyEdge_Label = 1 + // Adjectival complement + DependencyEdge_ACOMP DependencyEdge_Label = 2 + // Adverbial clause modifier + DependencyEdge_ADVCL DependencyEdge_Label = 3 + // Adverbial modifier + DependencyEdge_ADVMOD DependencyEdge_Label = 4 + // Adjectival modifier of an NP + DependencyEdge_AMOD DependencyEdge_Label = 5 + // Appositional modifier of an NP + DependencyEdge_APPOS DependencyEdge_Label = 6 + // Attribute dependent of a copular verb + DependencyEdge_ATTR DependencyEdge_Label = 7 + // Auxiliary (non-main) verb + DependencyEdge_AUX DependencyEdge_Label = 8 + // Passive auxiliary + DependencyEdge_AUXPASS DependencyEdge_Label = 9 + // Coordinating conjunction + DependencyEdge_CC DependencyEdge_Label = 10 + // Clausal complement of a verb or adjective + DependencyEdge_CCOMP DependencyEdge_Label = 11 + // Conjunct + DependencyEdge_CONJ DependencyEdge_Label = 12 + // Clausal subject + DependencyEdge_CSUBJ DependencyEdge_Label = 13 + // Clausal passive subject + DependencyEdge_CSUBJPASS DependencyEdge_Label = 14 + // Dependency (unable to determine) + DependencyEdge_DEP DependencyEdge_Label = 15 + // Determiner + DependencyEdge_DET DependencyEdge_Label = 16 + // Discourse + DependencyEdge_DISCOURSE DependencyEdge_Label = 17 + // Direct object + DependencyEdge_DOBJ DependencyEdge_Label = 18 + // Expletive + DependencyEdge_EXPL DependencyEdge_Label = 19 + // Goes with (part of a word in a text not well edited) + DependencyEdge_GOESWITH DependencyEdge_Label = 20 + // Indirect object + DependencyEdge_IOBJ DependencyEdge_Label = 21 + // Marker (word introducing a subordinate clause) + DependencyEdge_MARK DependencyEdge_Label = 22 + // Multi-word expression + DependencyEdge_MWE DependencyEdge_Label = 23 + // Multi-word verbal expression + DependencyEdge_MWV DependencyEdge_Label = 24 + // Negation modifier + DependencyEdge_NEG DependencyEdge_Label = 25 + // Noun compound modifier + DependencyEdge_NN DependencyEdge_Label = 26 + // Noun phrase used as an adverbial modifier + DependencyEdge_NPADVMOD DependencyEdge_Label = 27 + // Nominal subject + DependencyEdge_NSUBJ DependencyEdge_Label = 28 + // Passive nominal subject + DependencyEdge_NSUBJPASS DependencyEdge_Label = 29 + // Numeric modifier of a noun + DependencyEdge_NUM DependencyEdge_Label = 30 + // Element of compound number + DependencyEdge_NUMBER DependencyEdge_Label = 31 + // Punctuation mark + DependencyEdge_P DependencyEdge_Label = 32 + // Parataxis relation + DependencyEdge_PARATAXIS DependencyEdge_Label = 33 + // Participial modifier + DependencyEdge_PARTMOD DependencyEdge_Label = 34 + // The complement of a preposition is a clause + DependencyEdge_PCOMP DependencyEdge_Label = 35 + // Object of a preposition + DependencyEdge_POBJ DependencyEdge_Label = 36 + // Possession modifier + DependencyEdge_POSS DependencyEdge_Label = 37 + // Postverbal negative particle + DependencyEdge_POSTNEG DependencyEdge_Label = 38 + // Predicate complement + DependencyEdge_PRECOMP DependencyEdge_Label = 39 + // Preconjunt + DependencyEdge_PRECONJ DependencyEdge_Label = 40 + // Predeterminer + DependencyEdge_PREDET DependencyEdge_Label = 41 + // Prefix + DependencyEdge_PREF DependencyEdge_Label = 42 + // Prepositional modifier + DependencyEdge_PREP DependencyEdge_Label = 43 + // The relationship between a verb and verbal morpheme + DependencyEdge_PRONL DependencyEdge_Label = 44 + // Particle + DependencyEdge_PRT DependencyEdge_Label = 45 + // Associative or possessive marker + DependencyEdge_PS DependencyEdge_Label = 46 + // Quantifier phrase modifier + DependencyEdge_QUANTMOD DependencyEdge_Label = 47 + // Relative clause modifier + DependencyEdge_RCMOD DependencyEdge_Label = 48 + // Complementizer in relative clause + DependencyEdge_RCMODREL DependencyEdge_Label = 49 + // Ellipsis without a preceding predicate + DependencyEdge_RDROP DependencyEdge_Label = 50 + // Referent + DependencyEdge_REF DependencyEdge_Label = 51 + // Remnant + DependencyEdge_REMNANT DependencyEdge_Label = 52 + // Reparandum + DependencyEdge_REPARANDUM DependencyEdge_Label = 53 + // Root + DependencyEdge_ROOT DependencyEdge_Label = 54 + // Suffix specifying a unit of number + DependencyEdge_SNUM DependencyEdge_Label = 55 + // Suffix + DependencyEdge_SUFF DependencyEdge_Label = 56 + // Temporal modifier + DependencyEdge_TMOD DependencyEdge_Label = 57 + // Topic marker + DependencyEdge_TOPIC DependencyEdge_Label = 58 + // Clause headed by an infinite form of the verb that modifies a noun + DependencyEdge_VMOD DependencyEdge_Label = 59 + // Vocative + DependencyEdge_VOCATIVE DependencyEdge_Label = 60 + // Open clausal complement + DependencyEdge_XCOMP DependencyEdge_Label = 61 + // Name suffix + DependencyEdge_SUFFIX DependencyEdge_Label = 62 + // Name title + DependencyEdge_TITLE DependencyEdge_Label = 63 + // Adverbial phrase modifier + DependencyEdge_ADVPHMOD DependencyEdge_Label = 64 + // Causative auxiliary + DependencyEdge_AUXCAUS DependencyEdge_Label = 65 + // Helper auxiliary + DependencyEdge_AUXVV DependencyEdge_Label = 66 + // Rentaishi (Prenominal modifier) + DependencyEdge_DTMOD DependencyEdge_Label = 67 + // Foreign words + DependencyEdge_FOREIGN DependencyEdge_Label = 68 + // Keyword + DependencyEdge_KW DependencyEdge_Label = 69 + // List for chains of comparable items + DependencyEdge_LIST DependencyEdge_Label = 70 + // Nominalized clause + DependencyEdge_NOMC DependencyEdge_Label = 71 + // Nominalized clausal subject + DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72 + // Nominalized clausal passive + DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73 + // Compound of numeric modifier + DependencyEdge_NUMC DependencyEdge_Label = 74 + // Copula + DependencyEdge_COP DependencyEdge_Label = 75 + // Dislocated relation (for fronted/topicalized elements) + DependencyEdge_DISLOCATED DependencyEdge_Label = 76 + // Aspect marker + DependencyEdge_ASP DependencyEdge_Label = 77 + // Genitive modifier + DependencyEdge_GMOD DependencyEdge_Label = 78 + // Genitive object + DependencyEdge_GOBJ DependencyEdge_Label = 79 + // Infinitival modifier + DependencyEdge_INFMOD DependencyEdge_Label = 80 + // Measure + DependencyEdge_MES DependencyEdge_Label = 81 + // Nominal complement of a noun + DependencyEdge_NCOMP DependencyEdge_Label = 82 +) + +var DependencyEdge_Label_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ABBREV", + 2: "ACOMP", + 3: "ADVCL", + 4: "ADVMOD", + 5: "AMOD", + 6: "APPOS", + 7: "ATTR", + 8: "AUX", + 9: "AUXPASS", + 10: "CC", + 11: "CCOMP", + 12: "CONJ", + 13: "CSUBJ", + 14: "CSUBJPASS", + 15: "DEP", + 16: "DET", + 17: "DISCOURSE", + 18: "DOBJ", + 19: "EXPL", + 20: "GOESWITH", + 21: "IOBJ", + 22: "MARK", + 23: "MWE", + 24: "MWV", + 25: "NEG", + 26: "NN", + 27: "NPADVMOD", + 28: "NSUBJ", + 29: "NSUBJPASS", + 30: "NUM", + 31: "NUMBER", + 32: "P", + 33: "PARATAXIS", + 34: "PARTMOD", + 35: "PCOMP", + 36: "POBJ", + 37: "POSS", + 38: "POSTNEG", + 39: "PRECOMP", + 40: "PRECONJ", + 41: "PREDET", + 42: "PREF", + 43: "PREP", + 44: "PRONL", + 45: "PRT", + 46: "PS", + 47: "QUANTMOD", + 48: "RCMOD", + 49: "RCMODREL", + 50: "RDROP", + 51: "REF", + 52: "REMNANT", + 53: "REPARANDUM", + 54: "ROOT", + 55: "SNUM", + 56: "SUFF", + 57: "TMOD", + 58: "TOPIC", + 59: "VMOD", + 60: "VOCATIVE", + 61: "XCOMP", + 62: "SUFFIX", + 63: "TITLE", + 64: "ADVPHMOD", + 65: "AUXCAUS", + 66: "AUXVV", + 67: "DTMOD", + 68: "FOREIGN", + 69: "KW", + 70: "LIST", + 71: "NOMC", + 72: "NOMCSUBJ", + 73: "NOMCSUBJPASS", + 74: "NUMC", + 75: "COP", + 76: "DISLOCATED", + 77: "ASP", + 78: "GMOD", + 79: "GOBJ", + 80: "INFMOD", + 81: "MES", + 82: "NCOMP", +} +var DependencyEdge_Label_value = map[string]int32{ + "UNKNOWN": 0, + "ABBREV": 1, + "ACOMP": 2, + "ADVCL": 3, + "ADVMOD": 4, + "AMOD": 5, + "APPOS": 6, + "ATTR": 7, + "AUX": 8, + "AUXPASS": 9, + "CC": 10, + "CCOMP": 11, + "CONJ": 12, + "CSUBJ": 13, + "CSUBJPASS": 14, + "DEP": 15, + "DET": 16, + "DISCOURSE": 17, + "DOBJ": 18, + "EXPL": 19, + "GOESWITH": 20, + "IOBJ": 21, + "MARK": 22, + "MWE": 23, + "MWV": 24, + "NEG": 25, + "NN": 26, + "NPADVMOD": 27, + "NSUBJ": 28, + "NSUBJPASS": 29, + "NUM": 30, + "NUMBER": 31, + "P": 32, + "PARATAXIS": 33, + "PARTMOD": 34, + "PCOMP": 35, + "POBJ": 36, + "POSS": 37, + "POSTNEG": 38, + "PRECOMP": 39, + "PRECONJ": 40, + "PREDET": 41, + "PREF": 42, + "PREP": 43, + "PRONL": 44, + "PRT": 45, + "PS": 46, + "QUANTMOD": 47, + "RCMOD": 48, + "RCMODREL": 49, + "RDROP": 50, + "REF": 51, + "REMNANT": 52, + "REPARANDUM": 53, + "ROOT": 54, + "SNUM": 55, + "SUFF": 56, + "TMOD": 57, + "TOPIC": 58, + "VMOD": 59, + "VOCATIVE": 60, + "XCOMP": 61, + "SUFFIX": 62, + "TITLE": 63, + "ADVPHMOD": 64, + "AUXCAUS": 65, + "AUXVV": 66, + "DTMOD": 67, + "FOREIGN": 68, + "KW": 69, + "LIST": 70, + "NOMC": 71, + "NOMCSUBJ": 72, + "NOMCSUBJPASS": 73, + "NUMC": 74, + "COP": 75, + "DISLOCATED": 76, + "ASP": 77, + "GMOD": 78, + "GOBJ": 79, + "INFMOD": 80, + "MES": 81, + "NCOMP": 82, +} + +func (x DependencyEdge_Label) String() string { + return proto.EnumName(DependencyEdge_Label_name, int32(x)) +} +func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} } + +// The supported types of mentions. +type EntityMention_Type int32 + +const ( + // Unknown + EntityMention_TYPE_UNKNOWN EntityMention_Type = 0 + // Proper name + EntityMention_PROPER EntityMention_Type = 1 + // Common noun (or noun compound) + EntityMention_COMMON EntityMention_Type = 2 +) + +var EntityMention_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "PROPER", + 2: "COMMON", +} +var EntityMention_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "PROPER": 1, + "COMMON": 2, +} + +func (x EntityMention_Type) String() string { + return proto.EnumName(EntityMention_Type_name, int32(x)) +} +func (EntityMention_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// ################################################################ # +// +// Represents the input to API methods. +type Document struct { + // Required. If the type is not set or is `TYPE_UNSPECIFIED`, + // returns an `INVALID_ARGUMENT` error. + Type Document_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.language.v1.Document_Type" json:"type,omitempty"` + // The source of the document: a string containing the content or a + // Google Cloud Storage URI. + // + // Types that are valid to be assigned to Source: + // *Document_Content + // *Document_GcsContentUri + Source isDocument_Source `protobuf_oneof:"source"` + // The language of the document (if not specified, the language is + // automatically detected). Both ISO and BCP-47 language codes are + // accepted.
+ // [Language Support](/natural-language/docs/languages) + // lists currently supported languages for each API method. + // If the language (either specified by the caller or automatically detected) + // is not supported by the called API method, an `INVALID_ARGUMENT` error + // is returned. + Language string `protobuf:"bytes,4,opt,name=language" json:"language,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isDocument_Source interface { + isDocument_Source() +} + +type Document_Content struct { + Content string `protobuf:"bytes,2,opt,name=content,oneof"` +} +type Document_GcsContentUri struct { + GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,oneof"` +} + +func (*Document_Content) isDocument_Source() {} +func (*Document_GcsContentUri) isDocument_Source() {} + +func (m *Document) GetSource() isDocument_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Document) GetType() Document_Type { + if m != nil { + return m.Type + } + return Document_TYPE_UNSPECIFIED +} + +func (m *Document) GetContent() string { + if x, ok := m.GetSource().(*Document_Content); ok { + return x.Content + } + return "" +} + +func (m *Document) GetGcsContentUri() string { + if x, ok := m.GetSource().(*Document_GcsContentUri); ok { + return x.GcsContentUri + } + return "" +} + +func (m *Document) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Document) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Document_OneofMarshaler, _Document_OneofUnmarshaler, _Document_OneofSizer, []interface{}{ + (*Document_Content)(nil), + (*Document_GcsContentUri)(nil), + } +} + +func _Document_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Content) + case *Document_GcsContentUri: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.GcsContentUri) + case nil: + default: + return fmt.Errorf("Document.Source has unexpected type %T", x) + } + return nil +} + +func _Document_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Document) + switch tag { + case 2: // source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_Content{x} + return true, err + case 3: // source.gcs_content_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_GcsContentUri{x} + return true, err + default: + return false, nil + } +} + +func _Document_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *Document_GcsContentUri: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.GcsContentUri))) + n += len(x.GcsContentUri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a sentence in the input document. +type Sentence struct { + // The sentence text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // For calls to [AnalyzeSentiment][] or if + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] is set to + // true, this field will contain the sentiment for the sentence. + Sentiment *Sentiment `protobuf:"bytes,2,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *Sentence) Reset() { *m = Sentence{} } +func (m *Sentence) String() string { return proto.CompactTextString(m) } +func (*Sentence) ProtoMessage() {} +func (*Sentence) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Sentence) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Sentence) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents a phrase in the text that is a known entity, such as +// a person, an organization, or location. The API associates information, such +// as salience and mentions, with entities. +type Entity struct { + // The representative name for the entity. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The entity type. + Type Entity_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1.Entity_Type" json:"type,omitempty"` + // Metadata associated with the entity. + // + // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if + // available. The associated keys are "wikipedia_url" and "mid", respectively. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The salience score associated with the entity in the [0, 1.0] range. + // + // The salience score for an entity provides information about the + // importance or centrality of that entity to the entire document text. + // Scores closer to 0 are less salient, while scores closer to 1.0 are highly + // salient. + Salience float32 `protobuf:"fixed32,4,opt,name=salience" json:"salience,omitempty"` + // The mentions of this entity in the input document. The API currently + // supports proper noun mentions. + Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions" json:"mentions,omitempty"` + // For calls to [AnalyzeEntitySentiment][] or if + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the aggregate sentiment expressed for this + // entity in the provided document. + Sentiment *Sentiment `protobuf:"bytes,6,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Entity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Entity) GetType() Entity_Type { + if m != nil { + return m.Type + } + return Entity_UNKNOWN +} + +func (m *Entity) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Entity) GetSalience() float32 { + if m != nil { + return m.Salience + } + return 0 +} + +func (m *Entity) GetMentions() []*EntityMention { + if m != nil { + return m.Mentions + } + return nil +} + +func (m *Entity) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents the smallest syntactic building block of the text. +type Token struct { + // The token text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // Parts of speech tag for this token. + PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech" json:"part_of_speech,omitempty"` + // Dependency tree parse for this token. + DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge" json:"dependency_edge,omitempty"` + // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + Lemma string `protobuf:"bytes,4,opt,name=lemma" json:"lemma,omitempty"` +} + +func (m *Token) Reset() { *m = Token{} } +func (m *Token) String() string { return proto.CompactTextString(m) } +func (*Token) ProtoMessage() {} +func (*Token) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Token) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Token) GetPartOfSpeech() *PartOfSpeech { + if m != nil { + return m.PartOfSpeech + } + return nil +} + +func (m *Token) GetDependencyEdge() *DependencyEdge { + if m != nil { + return m.DependencyEdge + } + return nil +} + +func (m *Token) GetLemma() string { + if m != nil { + return m.Lemma + } + return "" +} + +// Represents the feeling associated with the entire text or entities in +// the text. +type Sentiment struct { + // A non-negative number in the [0, +inf) range, which represents + // the absolute magnitude of sentiment regardless of score (positive or + // negative). + Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude" json:"magnitude,omitempty"` + // Sentiment score between -1.0 (negative sentiment) and 1.0 + // (positive sentiment). + Score float32 `protobuf:"fixed32,3,opt,name=score" json:"score,omitempty"` +} + +func (m *Sentiment) Reset() { *m = Sentiment{} } +func (m *Sentiment) String() string { return proto.CompactTextString(m) } +func (*Sentiment) ProtoMessage() {} +func (*Sentiment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Sentiment) GetMagnitude() float32 { + if m != nil { + return m.Magnitude + } + return 0 +} + +func (m *Sentiment) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Represents part of speech information for a token. Parts of speech +// are as defined in +// http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf +type PartOfSpeech struct { + // The part of speech tag. + Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,enum=google.cloud.language.v1.PartOfSpeech_Tag" json:"tag,omitempty"` + // The grammatical aspect. + Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,enum=google.cloud.language.v1.PartOfSpeech_Aspect" json:"aspect,omitempty"` + // The grammatical case. + Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,enum=google.cloud.language.v1.PartOfSpeech_Case" json:"case,omitempty"` + // The grammatical form. + Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,enum=google.cloud.language.v1.PartOfSpeech_Form" json:"form,omitempty"` + // The grammatical gender. + Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,enum=google.cloud.language.v1.PartOfSpeech_Gender" json:"gender,omitempty"` + // The grammatical mood. + Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,enum=google.cloud.language.v1.PartOfSpeech_Mood" json:"mood,omitempty"` + // The grammatical number. + Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,enum=google.cloud.language.v1.PartOfSpeech_Number" json:"number,omitempty"` + // The grammatical person. + Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,enum=google.cloud.language.v1.PartOfSpeech_Person" json:"person,omitempty"` + // The grammatical properness. + Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,enum=google.cloud.language.v1.PartOfSpeech_Proper" json:"proper,omitempty"` + // The grammatical reciprocity. + Reciprocity PartOfSpeech_Reciprocity `protobuf:"varint,10,opt,name=reciprocity,enum=google.cloud.language.v1.PartOfSpeech_Reciprocity" json:"reciprocity,omitempty"` + // The grammatical tense. + Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,enum=google.cloud.language.v1.PartOfSpeech_Tense" json:"tense,omitempty"` + // The grammatical voice. + Voice PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,enum=google.cloud.language.v1.PartOfSpeech_Voice" json:"voice,omitempty"` +} + +func (m *PartOfSpeech) Reset() { *m = PartOfSpeech{} } +func (m *PartOfSpeech) String() string { return proto.CompactTextString(m) } +func (*PartOfSpeech) ProtoMessage() {} +func (*PartOfSpeech) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag { + if m != nil { + return m.Tag + } + return PartOfSpeech_UNKNOWN +} + +func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect { + if m != nil { + return m.Aspect + } + return PartOfSpeech_ASPECT_UNKNOWN +} + +func (m *PartOfSpeech) GetCase() PartOfSpeech_Case { + if m != nil { + return m.Case + } + return PartOfSpeech_CASE_UNKNOWN +} + +func (m *PartOfSpeech) GetForm() PartOfSpeech_Form { + if m != nil { + return m.Form + } + return PartOfSpeech_FORM_UNKNOWN +} + +func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender { + if m != nil { + return m.Gender + } + return PartOfSpeech_GENDER_UNKNOWN +} + +func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood { + if m != nil { + return m.Mood + } + return PartOfSpeech_MOOD_UNKNOWN +} + +func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number { + if m != nil { + return m.Number + } + return PartOfSpeech_NUMBER_UNKNOWN +} + +func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person { + if m != nil { + return m.Person + } + return PartOfSpeech_PERSON_UNKNOWN +} + +func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper { + if m != nil { + return m.Proper + } + return PartOfSpeech_PROPER_UNKNOWN +} + +func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity { + if m != nil { + return m.Reciprocity + } + return PartOfSpeech_RECIPROCITY_UNKNOWN +} + +func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense { + if m != nil { + return m.Tense + } + return PartOfSpeech_TENSE_UNKNOWN +} + +func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice { + if m != nil { + return m.Voice + } + return PartOfSpeech_VOICE_UNKNOWN +} + +// Represents dependency parse tree information for a token. (For more +// information on dependency labels, see +// http://www.aclweb.org/anthology/P13-2017 +type DependencyEdge struct { + // Represents the head of this token in the dependency tree. + // This is the index of the token which has an arc going to this token. + // The index is the position of the token in the array of tokens returned + // by the API method. If this token is a root token, then the + // `head_token_index` is its own index. + HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex" json:"head_token_index,omitempty"` + // The parse label for the token. + Label DependencyEdge_Label `protobuf:"varint,2,opt,name=label,enum=google.cloud.language.v1.DependencyEdge_Label" json:"label,omitempty"` +} + +func (m *DependencyEdge) Reset() { *m = DependencyEdge{} } +func (m *DependencyEdge) String() string { return proto.CompactTextString(m) } +func (*DependencyEdge) ProtoMessage() {} +func (*DependencyEdge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DependencyEdge) GetHeadTokenIndex() int32 { + if m != nil { + return m.HeadTokenIndex + } + return 0 +} + +func (m *DependencyEdge) GetLabel() DependencyEdge_Label { + if m != nil { + return m.Label + } + return DependencyEdge_UNKNOWN +} + +// Represents a mention for an entity in the text. Currently, proper noun +// mentions are supported. +type EntityMention struct { + // The mention text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // The type of the entity mention. + Type EntityMention_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1.EntityMention_Type" json:"type,omitempty"` + // For calls to [AnalyzeEntitySentiment][] or if + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the sentiment expressed for this mention of + // the entity in the provided document. + Sentiment *Sentiment `protobuf:"bytes,3,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *EntityMention) Reset() { *m = EntityMention{} } +func (m *EntityMention) String() string { return proto.CompactTextString(m) } +func (*EntityMention) ProtoMessage() {} +func (*EntityMention) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EntityMention) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *EntityMention) GetType() EntityMention_Type { + if m != nil { + return m.Type + } + return EntityMention_TYPE_UNKNOWN +} + +func (m *EntityMention) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents an output piece of text. +type TextSpan struct { + // The content of the output text. + Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` + // The API calculates the beginning offset of the content in the original + // document according to the [EncodingType][google.cloud.language.v1.EncodingType] specified in the API request. + BeginOffset int32 `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset" json:"begin_offset,omitempty"` +} + +func (m *TextSpan) Reset() { *m = TextSpan{} } +func (m *TextSpan) String() string { return proto.CompactTextString(m) } +func (*TextSpan) ProtoMessage() {} +func (*TextSpan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *TextSpan) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *TextSpan) GetBeginOffset() int32 { + if m != nil { + return m.BeginOffset + } + return 0 +} + +// The sentiment analysis request message. +type AnalyzeSentimentRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate sentence offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSentimentRequest) Reset() { *m = AnalyzeSentimentRequest{} } +func (m *AnalyzeSentimentRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentRequest) ProtoMessage() {} +func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *AnalyzeSentimentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The sentiment analysis response message. +type AnalyzeSentimentResponse struct { + // The overall sentiment of the input document. + DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` + // The sentiment for all the sentences in the document. + Sentences []*Sentence `protobuf:"bytes,3,rep,name=sentences" json:"sentences,omitempty"` +} + +func (m *AnalyzeSentimentResponse) Reset() { *m = AnalyzeSentimentResponse{} } +func (m *AnalyzeSentimentResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentResponse) ProtoMessage() {} +func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnalyzeSentimentResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +// The entity-level sentiment analysis request message. +type AnalyzeEntitySentimentRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeEntitySentimentRequest) Reset() { *m = AnalyzeEntitySentimentRequest{} } +func (m *AnalyzeEntitySentimentRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitySentimentRequest) ProtoMessage() {} +func (*AnalyzeEntitySentimentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AnalyzeEntitySentimentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeEntitySentimentRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The entity-level sentiment analysis response message. +type AnalyzeEntitySentimentResponse struct { + // The recognized entities in the input document with associated sentiments. + Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeEntitySentimentResponse) Reset() { *m = AnalyzeEntitySentimentResponse{} } +func (m *AnalyzeEntitySentimentResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitySentimentResponse) ProtoMessage() {} +func (*AnalyzeEntitySentimentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *AnalyzeEntitySentimentResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnalyzeEntitySentimentResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The entity analysis request message. +type AnalyzeEntitiesRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeEntitiesRequest) Reset() { *m = AnalyzeEntitiesRequest{} } +func (m *AnalyzeEntitiesRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesRequest) ProtoMessage() {} +func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *AnalyzeEntitiesRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The entity analysis response message. +type AnalyzeEntitiesResponse struct { + // The recognized entities in the input document. + Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeEntitiesResponse) Reset() { *m = AnalyzeEntitiesResponse{} } +func (m *AnalyzeEntitiesResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesResponse) ProtoMessage() {} +func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnalyzeEntitiesResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The syntax analysis request message. +type AnalyzeSyntaxRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSyntaxRequest) Reset() { *m = AnalyzeSyntaxRequest{} } +func (m *AnalyzeSyntaxRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxRequest) ProtoMessage() {} +func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *AnalyzeSyntaxRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The syntax analysis response message. +type AnalyzeSyntaxResponse struct { + // Sentences in the input document. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1.Document.language] field for more details. + Language string `protobuf:"bytes,3,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeSyntaxResponse) Reset() { *m = AnalyzeSyntaxResponse{} } +func (m *AnalyzeSyntaxResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxResponse) ProtoMessage() {} +func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The request message for the text annotation API, which can perform multiple +// analysis types (sentiment, entities, and syntax) in one call. +type AnnotateTextRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The enabled features. + Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,3,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnnotateTextRequest) Reset() { *m = AnnotateTextRequest{} } +func (m *AnnotateTextRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest) ProtoMessage() {} +func (*AnnotateTextRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *AnnotateTextRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnnotateTextRequest) GetFeatures() *AnnotateTextRequest_Features { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateTextRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// All available features for sentiment, syntax, and semantic analysis. +// Setting each one to true will enable that specific analysis for the input. +type AnnotateTextRequest_Features struct { + // Extract syntax information. + ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax" json:"extract_syntax,omitempty"` + // Extract entities. + ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities" json:"extract_entities,omitempty"` + // Extract document-level sentiment. + ExtractDocumentSentiment bool `protobuf:"varint,3,opt,name=extract_document_sentiment,json=extractDocumentSentiment" json:"extract_document_sentiment,omitempty"` + // Extract entities and their associated sentiment. + ExtractEntitySentiment bool `protobuf:"varint,4,opt,name=extract_entity_sentiment,json=extractEntitySentiment" json:"extract_entity_sentiment,omitempty"` +} + +func (m *AnnotateTextRequest_Features) Reset() { *m = AnnotateTextRequest_Features{} } +func (m *AnnotateTextRequest_Features) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest_Features) ProtoMessage() {} +func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool { + if m != nil { + return m.ExtractSyntax + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractEntities() bool { + if m != nil { + return m.ExtractEntities + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool { + if m != nil { + return m.ExtractDocumentSentiment + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractEntitySentiment() bool { + if m != nil { + return m.ExtractEntitySentiment + } + return false +} + +// The text annotations response message. +type AnnotateTextResponse struct { + // Sentences in the input document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // Entities, along with their semantic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities]. + Entities []*Entity `protobuf:"bytes,3,rep,name=entities" json:"entities,omitempty"` + // The overall sentiment for the document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment]. + DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1.Document.language] field for more details. + Language string `protobuf:"bytes,5,opt,name=language" json:"language,omitempty"` +} + +func (m *AnnotateTextResponse) Reset() { *m = AnnotateTextResponse{} } +func (m *AnnotateTextResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextResponse) ProtoMessage() {} +func (*AnnotateTextResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *AnnotateTextResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnnotateTextResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnnotateTextResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnnotateTextResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func init() { + proto.RegisterType((*Document)(nil), "google.cloud.language.v1.Document") + proto.RegisterType((*Sentence)(nil), "google.cloud.language.v1.Sentence") + proto.RegisterType((*Entity)(nil), "google.cloud.language.v1.Entity") + proto.RegisterType((*Token)(nil), "google.cloud.language.v1.Token") + proto.RegisterType((*Sentiment)(nil), "google.cloud.language.v1.Sentiment") + proto.RegisterType((*PartOfSpeech)(nil), "google.cloud.language.v1.PartOfSpeech") + proto.RegisterType((*DependencyEdge)(nil), "google.cloud.language.v1.DependencyEdge") + proto.RegisterType((*EntityMention)(nil), "google.cloud.language.v1.EntityMention") + proto.RegisterType((*TextSpan)(nil), "google.cloud.language.v1.TextSpan") + proto.RegisterType((*AnalyzeSentimentRequest)(nil), "google.cloud.language.v1.AnalyzeSentimentRequest") + proto.RegisterType((*AnalyzeSentimentResponse)(nil), "google.cloud.language.v1.AnalyzeSentimentResponse") + proto.RegisterType((*AnalyzeEntitySentimentRequest)(nil), "google.cloud.language.v1.AnalyzeEntitySentimentRequest") + proto.RegisterType((*AnalyzeEntitySentimentResponse)(nil), "google.cloud.language.v1.AnalyzeEntitySentimentResponse") + proto.RegisterType((*AnalyzeEntitiesRequest)(nil), "google.cloud.language.v1.AnalyzeEntitiesRequest") + proto.RegisterType((*AnalyzeEntitiesResponse)(nil), "google.cloud.language.v1.AnalyzeEntitiesResponse") + proto.RegisterType((*AnalyzeSyntaxRequest)(nil), "google.cloud.language.v1.AnalyzeSyntaxRequest") + proto.RegisterType((*AnalyzeSyntaxResponse)(nil), "google.cloud.language.v1.AnalyzeSyntaxResponse") + proto.RegisterType((*AnnotateTextRequest)(nil), "google.cloud.language.v1.AnnotateTextRequest") + proto.RegisterType((*AnnotateTextRequest_Features)(nil), "google.cloud.language.v1.AnnotateTextRequest.Features") + proto.RegisterType((*AnnotateTextResponse)(nil), "google.cloud.language.v1.AnnotateTextResponse") + proto.RegisterEnum("google.cloud.language.v1.EncodingType", EncodingType_name, EncodingType_value) + proto.RegisterEnum("google.cloud.language.v1.Document_Type", Document_Type_name, Document_Type_value) + proto.RegisterEnum("google.cloud.language.v1.Entity_Type", Entity_Type_name, Entity_Type_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Tag", PartOfSpeech_Tag_name, PartOfSpeech_Tag_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Aspect", PartOfSpeech_Aspect_name, PartOfSpeech_Aspect_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Case", PartOfSpeech_Case_name, PartOfSpeech_Case_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Form", PartOfSpeech_Form_name, PartOfSpeech_Form_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Gender", PartOfSpeech_Gender_name, PartOfSpeech_Gender_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Mood", PartOfSpeech_Mood_name, PartOfSpeech_Mood_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Number", PartOfSpeech_Number_name, PartOfSpeech_Number_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Person", PartOfSpeech_Person_name, PartOfSpeech_Person_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Proper", PartOfSpeech_Proper_name, PartOfSpeech_Proper_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Reciprocity", PartOfSpeech_Reciprocity_name, PartOfSpeech_Reciprocity_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Tense", PartOfSpeech_Tense_name, PartOfSpeech_Tense_value) + proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Voice", PartOfSpeech_Voice_name, PartOfSpeech_Voice_value) + proto.RegisterEnum("google.cloud.language.v1.DependencyEdge_Label", DependencyEdge_Label_name, DependencyEdge_Label_value) + proto.RegisterEnum("google.cloud.language.v1.EntityMention_Type", EntityMention_Type_name, EntityMention_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LanguageService service + +type LanguageServiceClient interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) + // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes + // sentiment associated with each entity and its mentions. + AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) + // A convenience method that provides all the features that analyzeSentiment, + // analyzeEntities, and analyzeSyntax provide in one call. + AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) +} + +type languageServiceClient struct { + cc *grpc.ClientConn +} + +func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient { + return &languageServiceClient{cc} +} + +func (c *languageServiceClient) AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) { + out := new(AnalyzeSentimentResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) { + out := new(AnalyzeEntitiesResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeEntities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) { + out := new(AnalyzeEntitySentimentResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) { + out := new(AnalyzeSyntaxResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) { + out := new(AnnotateTextResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnnotateText", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LanguageService service + +type LanguageServiceServer interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error) + // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes + // sentiment associated with each entity and its mentions. + AnalyzeEntitySentiment(context.Context, *AnalyzeEntitySentimentRequest) (*AnalyzeEntitySentimentResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error) + // A convenience method that provides all the features that analyzeSentiment, + // analyzeEntities, and analyzeSyntax provide in one call. + AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error) +} + +func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer) { + s.RegisterService(&_LanguageService_serviceDesc, srv) +} + +func _LanguageService_AnalyzeSentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSentimentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, req.(*AnalyzeSentimentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeEntitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeEntities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, req.(*AnalyzeEntitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeEntitySentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeEntitySentimentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, req.(*AnalyzeEntitySentimentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeSyntax_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSyntaxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, req.(*AnalyzeSyntaxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnnotateText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateTextRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnnotateText(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1.LanguageService/AnnotateText", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnnotateText(ctx, req.(*AnnotateTextRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LanguageService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.language.v1.LanguageService", + HandlerType: (*LanguageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnalyzeSentiment", + Handler: _LanguageService_AnalyzeSentiment_Handler, + }, + { + MethodName: "AnalyzeEntities", + Handler: _LanguageService_AnalyzeEntities_Handler, + }, + { + MethodName: "AnalyzeEntitySentiment", + Handler: _LanguageService_AnalyzeEntitySentiment_Handler, + }, + { + MethodName: "AnalyzeSyntax", + Handler: _LanguageService_AnalyzeSyntax_Handler, + }, + { + MethodName: "AnnotateText", + Handler: _LanguageService_AnnotateText_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/language/v1/language_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/language/v1/language_service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4b, 0x73, 0x1b, 0xc7, + 0xb5, 0xd6, 0xe0, 0x45, 0xa0, 0xc1, 0x47, 0x6b, 0x2c, 0xdb, 0xb8, 0xb4, 0x6c, 0xcb, 0x63, 0x4b, + 0xa6, 0x64, 0x1b, 0x34, 0xe9, 0x7b, 0x65, 0x5a, 0xd2, 0xb5, 0x35, 0x9c, 0x69, 0x80, 0x43, 0x0d, + 0x7a, 0x46, 0x3d, 0x33, 0x10, 0xed, 0x0d, 0x6a, 0x04, 0xb4, 0x60, 0x94, 0xc9, 0x19, 0x5c, 0x60, + 0xa8, 0x12, 0xbd, 0xb9, 0x55, 0xa9, 0xca, 0x32, 0xab, 0x64, 0x91, 0x65, 0xaa, 0xf2, 0xa8, 0x4a, + 0x95, 0x2b, 0xf9, 0x03, 0xa9, 0xca, 0x1f, 0x70, 0x56, 0xc9, 0x4f, 0xc8, 0x2e, 0xfb, 0x54, 0x36, + 0x79, 0xd4, 0xe9, 0x9e, 0xc1, 0x83, 0x22, 0x25, 0x52, 0xe5, 0x54, 0xbc, 0xeb, 0x3e, 0x38, 0xdf, + 0x79, 0xf7, 0x39, 0x3d, 0x0d, 0xb4, 0xde, 0x8f, 0xe3, 0xfe, 0x3e, 0x5f, 0xef, 0xee, 0xc7, 0x87, + 0xbd, 0xf5, 0xfd, 0x30, 0xea, 0x1f, 0x86, 0x7d, 0xbe, 0xfe, 0x78, 0x63, 0xb2, 0xee, 0x8c, 0xf9, + 0xe8, 0xf1, 0xa0, 0xcb, 0xeb, 0xc3, 0x51, 0x9c, 0xc4, 0x6a, 0x4d, 0x02, 0xea, 0x02, 0x50, 0xcf, + 0x98, 0xea, 0x8f, 0x37, 0x56, 0x2f, 0xa7, 0xa2, 0xc2, 0xe1, 0x60, 0x3d, 0x8c, 0xa2, 0x38, 0x09, + 0x93, 0x41, 0x1c, 0x8d, 0x25, 0x4e, 0xfb, 0x8b, 0x82, 0xca, 0x66, 0xdc, 0x3d, 0x3c, 0xe0, 0x51, + 0xa2, 0xde, 0x46, 0x85, 0xe4, 0x68, 0xc8, 0x6b, 0xca, 0x15, 0x65, 0x6d, 0x79, 0xf3, 0xdd, 0xfa, + 0x69, 0x32, 0xeb, 0x19, 0xa2, 0xee, 0x1f, 0x0d, 0x39, 0x13, 0x20, 0x75, 0x15, 0x2d, 0x74, 0xe3, + 0x28, 0xe1, 0x51, 0x52, 0xcb, 0x5d, 0x51, 0xd6, 0x2a, 0x3b, 0x17, 0x58, 0x46, 0x50, 0xd7, 0xd0, + 0x4a, 0xbf, 0x3b, 0xee, 0xa4, 0xdb, 0xce, 0xe1, 0x68, 0x50, 0xcb, 0xa7, 0x3c, 0x4b, 0xfd, 0xee, + 0xd8, 0x90, 0xf4, 0x60, 0x34, 0x50, 0x57, 0x51, 0x39, 0x53, 0x54, 0x2b, 0x00, 0x0b, 0x9b, 0xec, + 0xb5, 0x9b, 0xa8, 0x00, 0xfa, 0xd4, 0x4b, 0x08, 0xfb, 0x9f, 0xbb, 0xa4, 0x13, 0x50, 0xcf, 0x25, + 0x86, 0xd5, 0xb0, 0x88, 0x89, 0x2f, 0xa8, 0xcb, 0x08, 0xb9, 0xb6, 0x6e, 0xd1, 0x8e, 0x4f, 0xf6, + 0x7c, 0xac, 0xa8, 0x65, 0x54, 0xd8, 0xf1, 0x5b, 0x36, 0xce, 0x6d, 0x97, 0x51, 0x69, 0x1c, 0x1f, + 0x8e, 0xba, 0x5c, 0xfb, 0xa1, 0x82, 0xca, 0x1e, 0x07, 0x65, 0x5d, 0xae, 0xde, 0x44, 0x85, 0x84, + 0x3f, 0x49, 0x84, 0xb7, 0xd5, 0x4d, 0xed, 0x74, 0x6f, 0x7d, 0xfe, 0x24, 0xf1, 0x86, 0x61, 0xc4, + 0x04, 0xbf, 0xaa, 0xa3, 0xca, 0x98, 0x47, 0xc9, 0xe0, 0x20, 0x73, 0xb5, 0xba, 0xf9, 0xf6, 0xe9, + 0x60, 0x2f, 0x63, 0x65, 0x53, 0x94, 0xf6, 0xcf, 0x3c, 0x2a, 0x91, 0x28, 0x19, 0x24, 0x47, 0xaa, + 0x8a, 0x0a, 0x51, 0x78, 0x20, 0x63, 0x5e, 0x61, 0x62, 0xad, 0x7e, 0x92, 0xe6, 0x21, 0x27, 0xf2, + 0x70, 0xf5, 0x74, 0xe1, 0x52, 0xc6, 0x6c, 0x16, 0x76, 0x51, 0xf9, 0x80, 0x27, 0x61, 0x2f, 0x4c, + 0xc2, 0x5a, 0xfe, 0x4a, 0x7e, 0xad, 0xba, 0x59, 0x7f, 0x2e, 0xbc, 0x95, 0x02, 0x48, 0x94, 0x8c, + 0x8e, 0xd8, 0x04, 0x0f, 0xb9, 0x18, 0x87, 0xfb, 0x03, 0x08, 0x96, 0xc8, 0x45, 0x8e, 0x4d, 0xf6, + 0xaa, 0x01, 0x7a, 0x22, 0x51, 0x49, 0xb5, 0xa2, 0xd0, 0xf3, 0xee, 0xf3, 0xf4, 0xb4, 0x24, 0x3f, + 0x9b, 0x00, 0xe7, 0x23, 0x59, 0x7a, 0x91, 0x48, 0xae, 0xde, 0x46, 0x4b, 0x73, 0xe6, 0xab, 0x18, + 0xe5, 0xbf, 0xe2, 0x47, 0x69, 0x38, 0x61, 0xa9, 0x5e, 0x42, 0xc5, 0xc7, 0xe1, 0xfe, 0xa1, 0x0c, + 0x67, 0x85, 0xc9, 0xcd, 0xad, 0xdc, 0x96, 0xa2, 0x1d, 0xa5, 0x05, 0x55, 0x45, 0x0b, 0x01, 0xbd, + 0x47, 0x9d, 0x07, 0x14, 0x5f, 0x50, 0x11, 0x2a, 0xb9, 0x84, 0x79, 0x0e, 0xc5, 0x8a, 0xba, 0x88, + 0xca, 0xb6, 0x63, 0xe8, 0xbe, 0xe5, 0x50, 0x9c, 0x53, 0x31, 0x5a, 0x74, 0x58, 0x53, 0xa7, 0xd6, + 0x17, 0x92, 0x92, 0x57, 0x2b, 0xa8, 0x48, 0xda, 0x84, 0xfa, 0xb8, 0xa0, 0xae, 0xa0, 0xea, 0x03, + 0x87, 0xdd, 0xeb, 0x38, 0x8d, 0x8e, 0xce, 0x7c, 0x5c, 0x54, 0x2f, 0xa2, 0x25, 0xc3, 0xa1, 0x5e, + 0xd0, 0x22, 0xac, 0xd3, 0x74, 0x1c, 0x13, 0x97, 0x80, 0xdd, 0xf1, 0x77, 0x08, 0xc3, 0x0b, 0xda, + 0x5f, 0x15, 0x54, 0xf4, 0xe3, 0xaf, 0x78, 0xf4, 0xc2, 0x65, 0x68, 0xa3, 0xe5, 0x61, 0x38, 0x4a, + 0x3a, 0xf1, 0xa3, 0xce, 0x78, 0xc8, 0x79, 0xf7, 0xcb, 0xb4, 0x16, 0xaf, 0x9d, 0x2e, 0xc1, 0x0d, + 0x47, 0x89, 0xf3, 0xc8, 0x13, 0xdc, 0x6c, 0x71, 0x38, 0xb3, 0x53, 0xef, 0xa3, 0x95, 0x1e, 0x1f, + 0xf2, 0xa8, 0xc7, 0xa3, 0xee, 0x51, 0x87, 0xf7, 0xfa, 0x5c, 0x9c, 0xd0, 0xea, 0xe6, 0xda, 0x33, + 0xba, 0xc0, 0x04, 0x40, 0x7a, 0x7d, 0xce, 0x96, 0x7b, 0x73, 0x7b, 0x88, 0xfb, 0x3e, 0x3f, 0x38, + 0x08, 0xd3, 0x73, 0x2c, 0x37, 0xda, 0x67, 0xa8, 0x32, 0x49, 0xa4, 0x7a, 0x19, 0x55, 0x0e, 0xc2, + 0x7e, 0x34, 0x48, 0x0e, 0x7b, 0x32, 0x3d, 0x39, 0x36, 0x25, 0x80, 0x80, 0x71, 0x37, 0x1e, 0x49, + 0x4b, 0x72, 0x4c, 0x6e, 0xb4, 0xbf, 0x63, 0xb4, 0x38, 0xeb, 0x88, 0x7a, 0x07, 0xe5, 0x93, 0xb0, + 0x9f, 0x36, 0xad, 0x1b, 0x67, 0xf3, 0xbe, 0xee, 0x87, 0x7d, 0x06, 0x30, 0x95, 0xa0, 0x52, 0x38, + 0x1e, 0xf2, 0x6e, 0x92, 0x9e, 0xb6, 0x0f, 0xce, 0x28, 0x40, 0x17, 0x20, 0x96, 0x82, 0xd5, 0xcf, + 0x50, 0xa1, 0x1b, 0x8e, 0xa5, 0xa9, 0xcb, 0x9b, 0xef, 0x9d, 0x51, 0x88, 0x11, 0x8e, 0x39, 0x13, + 0x40, 0x10, 0xf0, 0x28, 0x1e, 0x1d, 0x88, 0x60, 0x9d, 0x5d, 0x40, 0x23, 0x1e, 0x1d, 0x30, 0x01, + 0x04, 0x47, 0xfa, 0x10, 0xfe, 0x51, 0xad, 0x78, 0x2e, 0x47, 0x9a, 0x02, 0xc4, 0x52, 0x30, 0xd8, + 0x71, 0x10, 0xc7, 0x3d, 0x71, 0x1c, 0xcf, 0x6e, 0x47, 0x2b, 0x8e, 0x7b, 0x4c, 0x00, 0xc1, 0x8e, + 0xe8, 0xf0, 0xe0, 0x21, 0x1f, 0xd5, 0x16, 0xce, 0x65, 0x07, 0x15, 0x20, 0x96, 0x82, 0x41, 0xcc, + 0x90, 0x8f, 0xc6, 0x71, 0x54, 0x2b, 0x9f, 0x4b, 0x8c, 0x2b, 0x40, 0x2c, 0x05, 0x0b, 0x31, 0xa3, + 0x78, 0xc8, 0x47, 0xb5, 0xca, 0xf9, 0xc4, 0x08, 0x10, 0x4b, 0xc1, 0xaa, 0x8f, 0xaa, 0x23, 0xde, + 0x1d, 0x0c, 0x47, 0x71, 0x77, 0x90, 0x1c, 0xd5, 0x90, 0x90, 0xb5, 0x79, 0x46, 0x59, 0x6c, 0x8a, + 0x64, 0xb3, 0x62, 0xd4, 0x6d, 0x54, 0x4c, 0x78, 0x34, 0xe6, 0xb5, 0xaa, 0x90, 0xf7, 0xfe, 0x59, + 0x6b, 0x17, 0x30, 0x4c, 0x42, 0x41, 0xc6, 0xe3, 0x78, 0xd0, 0xe5, 0xb5, 0xc5, 0x73, 0xc9, 0x68, + 0x03, 0x86, 0x49, 0xa8, 0xf6, 0x23, 0x05, 0xe5, 0xfd, 0xb0, 0x3f, 0xdf, 0x07, 0x17, 0x50, 0x5e, + 0x37, 0x77, 0xb1, 0x22, 0x17, 0x2e, 0xce, 0xc9, 0x45, 0x1b, 0xe7, 0x61, 0xb4, 0x1a, 0x0e, 0xdd, + 0xc5, 0x05, 0x20, 0x99, 0x04, 0xba, 0x5d, 0x19, 0x15, 0xa8, 0x13, 0x50, 0x5c, 0x02, 0x12, 0x0d, + 0x5a, 0x78, 0x01, 0x48, 0x2e, 0x73, 0x28, 0x2e, 0x03, 0xc9, 0x65, 0x3e, 0xae, 0x40, 0x03, 0x74, + 0x03, 0x6a, 0xf8, 0x18, 0xc1, 0xaf, 0x6d, 0xc2, 0xb6, 0x71, 0x55, 0x2d, 0x22, 0x65, 0x0f, 0x2f, + 0xc2, 0x6f, 0x7a, 0xa3, 0x61, 0xed, 0xe1, 0x25, 0xcd, 0x41, 0x25, 0x79, 0xbc, 0x54, 0x15, 0x2d, + 0xeb, 0x30, 0xe4, 0xfd, 0xce, 0xd4, 0x30, 0x18, 0xf4, 0x84, 0x35, 0x88, 0xe1, 0x5b, 0x6d, 0x82, + 0x15, 0x68, 0xcb, 0x56, 0x6b, 0x86, 0x92, 0x83, 0x5e, 0xec, 0x32, 0xa7, 0xc9, 0x88, 0xe7, 0x01, + 0x21, 0xaf, 0xfd, 0x4d, 0x41, 0x05, 0x38, 0x6b, 0xc0, 0x6b, 0xe8, 0x1e, 0x99, 0x97, 0xa6, 0x1b, + 0x46, 0xe0, 0xe9, 0xa9, 0xb4, 0x25, 0x54, 0xd1, 0x4d, 0xb0, 0xcc, 0xd2, 0x6d, 0x9c, 0x93, 0x5d, + 0xbc, 0xe5, 0xda, 0xa4, 0x45, 0xa8, 0xe0, 0xc8, 0xc3, 0x80, 0x30, 0x25, 0x77, 0x01, 0x06, 0x44, + 0x93, 0x50, 0x4b, 0xec, 0x8a, 0xc2, 0x12, 0xea, 0xf9, 0x2c, 0x00, 0x66, 0xdd, 0xc6, 0xa5, 0xe9, + 0x00, 0x69, 0x13, 0xbc, 0x00, 0xba, 0xa8, 0xd3, 0xb2, 0xa8, 0xdc, 0x97, 0x21, 0xde, 0xce, 0xb6, + 0x6d, 0xdd, 0x0f, 0x08, 0xae, 0x80, 0x62, 0x57, 0x67, 0xbe, 0x94, 0x85, 0x40, 0xb1, 0xcb, 0x88, + 0xeb, 0x78, 0x16, 0xcc, 0x1a, 0xdd, 0xc6, 0x55, 0x08, 0x06, 0x23, 0x0d, 0x9b, 0xec, 0x59, 0x6d, + 0xd2, 0x01, 0x37, 0xf0, 0x22, 0xb0, 0x31, 0x62, 0x0b, 0x81, 0x92, 0xb4, 0x04, 0x3a, 0xdb, 0x99, + 0xce, 0x65, 0xed, 0xb7, 0x0a, 0x2a, 0x40, 0x97, 0x00, 0xe3, 0x1a, 0x0e, 0x6b, 0xcd, 0xb8, 0xbe, + 0x88, 0xca, 0xba, 0x09, 0x06, 0xe9, 0x76, 0xea, 0x78, 0xb0, 0x67, 0xd9, 0x96, 0xce, 0x3e, 0xc7, + 0x39, 0x50, 0x36, 0xe3, 0xf8, 0x17, 0x84, 0xe1, 0xbc, 0x10, 0x61, 0x51, 0xdd, 0xee, 0x10, 0x6a, + 0x5a, 0xb4, 0x89, 0x0b, 0x10, 0x8b, 0x26, 0x61, 0x01, 0x35, 0x71, 0x11, 0xd6, 0x8c, 0xe8, 0xb6, + 0xe5, 0x49, 0xbf, 0x2d, 0x96, 0xee, 0x16, 0x20, 0xb5, 0xde, 0x8e, 0xc3, 0x7c, 0x5c, 0x86, 0xb4, + 0xdb, 0x0e, 0x6d, 0xca, 0x5a, 0x70, 0x98, 0x49, 0x18, 0x46, 0xc0, 0x9d, 0xde, 0xe4, 0x0c, 0x5c, + 0xd5, 0x08, 0x2a, 0xc9, 0x9e, 0x04, 0x36, 0x34, 0x09, 0x35, 0x09, 0x9b, 0x37, 0xba, 0x41, 0x5a, + 0x16, 0xb5, 0x68, 0x9a, 0xad, 0x96, 0xee, 0x19, 0x81, 0x0d, 0xdb, 0x1c, 0x98, 0x40, 0x49, 0xe0, + 0x83, 0xb1, 0xda, 0xff, 0xa3, 0x02, 0x74, 0x25, 0x30, 0xba, 0xe5, 0x38, 0xe6, 0x8c, 0x88, 0x4b, + 0x08, 0x1b, 0x0e, 0x35, 0xd3, 0xc0, 0x76, 0xe0, 0x57, 0xac, 0x40, 0x72, 0x44, 0x19, 0xe9, 0x69, + 0x11, 0xc1, 0x9e, 0x9a, 0x56, 0x1a, 0xc8, 0x3c, 0x44, 0xda, 0xa2, 0x3e, 0x61, 0xcc, 0x69, 0x66, + 0xd9, 0xaf, 0xa2, 0x85, 0xdd, 0x40, 0xd6, 0x58, 0x11, 0x8a, 0xce, 0x0b, 0xb6, 0x77, 0xa1, 0xbc, + 0x81, 0x50, 0xd2, 0xee, 0xa2, 0x92, 0xec, 0x69, 0xe0, 0x07, 0x0d, 0x5a, 0xdb, 0xc7, 0xfd, 0xf0, + 0x2c, 0xda, 0x0c, 0x6c, 0x9d, 0x61, 0x45, 0x5c, 0x3a, 0xec, 0x80, 0x89, 0x92, 0x2b, 0xa3, 0x82, + 0x19, 0xe8, 0x36, 0xce, 0x6b, 0x3e, 0x2a, 0xc9, 0x76, 0x06, 0x12, 0xe4, 0xa5, 0x64, 0x46, 0x42, + 0x05, 0x15, 0x1b, 0x16, 0xf3, 0x7c, 0x09, 0xf7, 0x08, 0xf8, 0x84, 0x73, 0x40, 0xf6, 0x77, 0x2c, + 0x66, 0xe2, 0x3c, 0x38, 0x3a, 0x2d, 0x98, 0xf4, 0x52, 0x53, 0xd0, 0xb6, 0x50, 0x49, 0x76, 0x37, + 0x21, 0x95, 0x39, 0xee, 0x9c, 0x5d, 0x60, 0x89, 0xa0, 0xc9, 0x90, 0x50, 0xc7, 0xef, 0xa4, 0xfb, + 0x9c, 0xb6, 0x8b, 0xaa, 0x33, 0xbd, 0x4c, 0x7d, 0x15, 0xbd, 0xc4, 0x88, 0x61, 0xb9, 0xcc, 0x31, + 0x2c, 0xff, 0xf3, 0xf9, 0x33, 0x95, 0xfd, 0x20, 0x4a, 0x0b, 0xfc, 0x77, 0x68, 0x67, 0x86, 0x96, + 0xd3, 0xc6, 0xa8, 0x28, 0xfa, 0x18, 0xc4, 0xd5, 0x27, 0x74, 0xee, 0x4c, 0xbe, 0x8c, 0x2e, 0xce, + 0x26, 0x48, 0xfc, 0x2c, 0xbd, 0x6c, 0x04, 0x7e, 0xc0, 0x88, 0x0c, 0x92, 0xab, 0x7b, 0x3e, 0xce, + 0x43, 0x12, 0x5c, 0x46, 0x3c, 0x79, 0x0b, 0x5b, 0x42, 0x95, 0x49, 0x2f, 0xc0, 0x45, 0xf9, 0x4d, + 0x10, 0x64, 0xfb, 0x92, 0xb6, 0x8d, 0x8a, 0xa2, 0xf1, 0x81, 0xd2, 0xb6, 0x63, 0x19, 0x64, 0xde, + 0x71, 0xdd, 0x98, 0x36, 0x01, 0x43, 0xcf, 0x7a, 0x42, 0x4e, 0xa8, 0xd0, 0xb3, 0x5e, 0xf2, 0x4d, + 0x19, 0x2d, 0xcf, 0xdf, 0x7c, 0xd4, 0x35, 0x84, 0xbf, 0xe4, 0x61, 0xaf, 0x93, 0xc0, 0x85, 0xae, + 0x33, 0x88, 0x7a, 0xfc, 0x89, 0xb8, 0x8e, 0x14, 0xd9, 0x32, 0xd0, 0xc5, 0x3d, 0xcf, 0x02, 0xaa, + 0x6a, 0xa2, 0xe2, 0x7e, 0xf8, 0x90, 0xef, 0xa7, 0x97, 0x8d, 0xfa, 0x59, 0x2f, 0x57, 0x75, 0x1b, + 0x50, 0x4c, 0x82, 0xb5, 0x5f, 0x2f, 0xa0, 0xa2, 0x20, 0x3c, 0x75, 0x73, 0xd5, 0xb7, 0xb7, 0x19, + 0x69, 0x63, 0x45, 0x74, 0x53, 0x38, 0xbf, 0xb2, 0x20, 0x74, 0xb3, 0x6d, 0xd8, 0xb2, 0x75, 0xe9, + 0x66, 0xbb, 0xe5, 0x98, 0xb8, 0x00, 0x11, 0xd4, 0x61, 0x55, 0x14, 0x0c, 0xae, 0xeb, 0xc0, 0xb9, + 0x05, 0xa2, 0xef, 0x33, 0xbc, 0x20, 0x9a, 0x7d, 0xb0, 0x27, 0x9b, 0x94, 0x1e, 0xec, 0x81, 0xff, + 0xb8, 0xa2, 0x96, 0x50, 0xce, 0x30, 0x30, 0x02, 0x88, 0x21, 0xc4, 0x57, 0x27, 0xc3, 0x40, 0x74, + 0x70, 0x03, 0x8e, 0x00, 0x5e, 0x12, 0x01, 0x84, 0xa5, 0x80, 0x2d, 0xcb, 0x31, 0xe1, 0xe2, 0x95, + 0x6c, 0x5e, 0x60, 0x60, 0x30, 0x2d, 0xcf, 0x70, 0x02, 0xe6, 0x11, 0x7c, 0x51, 0xd4, 0xbc, 0xb3, + 0xbd, 0x8b, 0x55, 0x58, 0x91, 0x3d, 0xd7, 0xc6, 0x2f, 0x89, 0xde, 0xea, 0x10, 0xef, 0x81, 0xe5, + 0xef, 0xe0, 0x4b, 0x40, 0xb7, 0x80, 0xe3, 0x65, 0x58, 0xb5, 0x74, 0x76, 0x0f, 0xbf, 0x02, 0xd2, + 0x5a, 0x0f, 0x08, 0x7e, 0x55, 0x2e, 0xda, 0xb8, 0x26, 0x86, 0x0f, 0x69, 0xe2, 0xff, 0x02, 0x43, + 0x29, 0xc5, 0xab, 0x20, 0x84, 0xba, 0xa9, 0xcf, 0xaf, 0x81, 0x85, 0x54, 0x58, 0x78, 0x19, 0x0c, + 0xa0, 0x13, 0x0b, 0x5f, 0xcf, 0xa6, 0xd6, 0x1b, 0xa2, 0x85, 0x88, 0xb3, 0x8a, 0xdf, 0x84, 0xc9, + 0xe4, 0xe2, 0x2b, 0x69, 0x67, 0xd6, 0x7d, 0x7d, 0xcf, 0xf2, 0xf0, 0x5b, 0xb2, 0x1a, 0x98, 0x0f, + 0x12, 0x35, 0x31, 0xd1, 0x44, 0x20, 0xde, 0x16, 0x25, 0x09, 0x16, 0xbe, 0x23, 0x57, 0x9e, 0x87, + 0xaf, 0x0a, 0x5e, 0xc7, 0xf3, 0xc1, 0xa6, 0x6b, 0x69, 0xa5, 0x0a, 0xee, 0x77, 0x27, 0x1b, 0xba, + 0x8b, 0xd7, 0xe4, 0xa1, 0x23, 0x10, 0x99, 0xeb, 0x72, 0x6c, 0x92, 0x06, 0xbe, 0x91, 0xae, 0x5c, + 0xfc, 0x9e, 0xd0, 0xc2, 0x1c, 0x6a, 0xe3, 0xf7, 0xb3, 0x59, 0xfa, 0x01, 0x78, 0xe8, 0x7a, 0xb8, + 0x0e, 0x1e, 0xde, 0x0f, 0x74, 0x2a, 0xec, 0x59, 0x07, 0x4e, 0x66, 0xc0, 0xf2, 0x43, 0xf8, 0x41, + 0x2c, 0x19, 0xb1, 0xf1, 0x86, 0xf8, 0xc1, 0x64, 0x8e, 0x8b, 0x37, 0x41, 0x04, 0x28, 0xf8, 0x08, + 0x6c, 0x60, 0xa4, 0x45, 0x75, 0xea, 0xe3, 0xff, 0x96, 0x87, 0x16, 0xfc, 0xa4, 0x66, 0xd0, 0xc2, + 0xff, 0x03, 0xda, 0x99, 0xe3, 0xf8, 0xf8, 0x26, 0xac, 0x3c, 0x08, 0xce, 0xc7, 0x62, 0x15, 0x34, + 0x1a, 0x78, 0x0b, 0x56, 0x42, 0xe3, 0x27, 0xa2, 0xdf, 0x38, 0xae, 0x65, 0xe0, 0x5b, 0x62, 0xa6, + 0x03, 0xf1, 0xf6, 0xdc, 0x0c, 0xba, 0x03, 0x2c, 0x7b, 0xc2, 0xed, 0xff, 0x15, 0x9d, 0x2a, 0x10, + 0x63, 0xfe, 0x53, 0x81, 0xb4, 0x7c, 0x9b, 0xe0, 0xcf, 0xe4, 0x28, 0x6a, 0xbb, 0x3b, 0x80, 0xbe, + 0x9b, 0x96, 0x1c, 0x9c, 0x40, 0xac, 0x8b, 0xea, 0x0c, 0xf6, 0xda, 0x6d, 0xbc, 0x0d, 0x4b, 0x53, + 0x68, 0x35, 0x80, 0xa5, 0xe1, 0x30, 0x62, 0x35, 0x29, 0x36, 0x21, 0x14, 0xf7, 0x1e, 0x60, 0x22, + 0x86, 0x8b, 0xe5, 0xf9, 0xb8, 0x21, 0xaf, 0x23, 0x2d, 0x03, 0x37, 0x45, 0x01, 0x38, 0x2d, 0x59, + 0x97, 0x3b, 0x30, 0x0c, 0xb2, 0x9d, 0x48, 0xbc, 0x25, 0x38, 0x83, 0x96, 0x81, 0x77, 0x21, 0x2c, + 0x86, 0xe3, 0xe2, 0x7b, 0x10, 0x09, 0xd3, 0xf2, 0xc4, 0xdc, 0x26, 0x26, 0xb6, 0xc5, 0x51, 0xf0, + 0x5c, 0xdc, 0x02, 0xde, 0x26, 0xa8, 0xa7, 0x62, 0x05, 0xb9, 0x76, 0xc0, 0x21, 0x8b, 0x36, 0x80, + 0xea, 0x8a, 0x32, 0x24, 0x1e, 0xbe, 0x2f, 0xea, 0x4c, 0x38, 0xcc, 0xb4, 0x7f, 0x28, 0x68, 0x69, + 0xee, 0xfb, 0xf7, 0x85, 0x3f, 0xf8, 0xee, 0xce, 0xbd, 0x0a, 0xbc, 0x7f, 0xc6, 0xcf, 0xed, 0xd9, + 0xc7, 0x81, 0xb9, 0xef, 0xed, 0xfc, 0x0b, 0xbd, 0x5c, 0x7c, 0x98, 0x7e, 0x32, 0x63, 0xb4, 0x98, + 0xbe, 0xc1, 0x9c, 0x34, 0x38, 0x10, 0x2a, 0x19, 0x4e, 0xab, 0x05, 0x5f, 0xcd, 0x5a, 0x13, 0x95, + 0x33, 0x47, 0xd4, 0xda, 0xf4, 0x8d, 0x48, 0x7e, 0xa0, 0x4f, 0x5e, 0x88, 0xde, 0x42, 0x8b, 0x0f, + 0x79, 0x7f, 0x10, 0x75, 0xe2, 0x47, 0x8f, 0xc6, 0x5c, 0x7e, 0x8c, 0x15, 0x59, 0x55, 0xd0, 0x1c, + 0x41, 0xd2, 0x7e, 0xa5, 0xa0, 0x57, 0xf5, 0x28, 0xdc, 0x3f, 0xfa, 0x9a, 0x4f, 0x4d, 0xe3, 0xff, + 0x77, 0xc8, 0xc7, 0x89, 0xfa, 0x29, 0x2a, 0xf7, 0xd2, 0x37, 0xa9, 0xe7, 0xc7, 0x35, 0x7b, 0xbd, + 0x62, 0x13, 0x8c, 0x7a, 0x0f, 0x2d, 0xf1, 0xa8, 0x1b, 0xf7, 0x06, 0x51, 0xbf, 0x33, 0x13, 0xe4, + 0x6b, 0xcf, 0x0a, 0xb2, 0x64, 0x17, 0xe1, 0x5d, 0xe4, 0x33, 0x3b, 0xed, 0x5b, 0x05, 0xd5, 0x9e, + 0x36, 0x74, 0x3c, 0x8c, 0x61, 0xdc, 0x31, 0xa4, 0x66, 0x5a, 0x3b, 0xd3, 0x64, 0x28, 0x67, 0x4f, + 0xc6, 0xc5, 0x0c, 0x3e, 0xfd, 0x8c, 0x9e, 0x7d, 0x34, 0xcb, 0xcd, 0x3f, 0x9a, 0xa9, 0x77, 0x65, + 0xce, 0x79, 0xd4, 0xe5, 0xe3, 0xf4, 0x45, 0x48, 0x7b, 0xb6, 0x1a, 0x60, 0x65, 0x53, 0x90, 0xf6, + 0x8d, 0x82, 0x5e, 0x4f, 0xdd, 0x91, 0x95, 0xf5, 0xfd, 0x8e, 0xfe, 0xd7, 0xe8, 0x8d, 0xd3, 0xac, + 0x4d, 0x53, 0x70, 0x07, 0x95, 0x81, 0x96, 0x0c, 0xf8, 0xb8, 0xa6, 0x88, 0x88, 0x5c, 0x79, 0xde, + 0x61, 0x62, 0x13, 0xc4, 0xb3, 0x82, 0xad, 0xfd, 0x52, 0x41, 0xaf, 0xcc, 0x2a, 0x1f, 0xf0, 0xf1, + 0xf7, 0x32, 0x46, 0xe3, 0xc9, 0x49, 0x9a, 0x9a, 0xf9, 0x6f, 0x0f, 0xce, 0xcf, 0x15, 0x74, 0x29, + 0x3b, 0x16, 0x47, 0x51, 0x12, 0x3e, 0xf9, 0x5e, 0x86, 0xe6, 0x37, 0x0a, 0x7a, 0xf9, 0x98, 0x95, + 0x69, 0x64, 0xe6, 0x4e, 0x92, 0xf2, 0x02, 0x27, 0x49, 0xfd, 0x18, 0x95, 0xc4, 0x15, 0x71, 0x5c, + 0xcb, 0x09, 0xf8, 0x9b, 0xcf, 0xe8, 0xfd, 0xc0, 0xc7, 0x52, 0xf6, 0xb9, 0xb0, 0xe6, 0x8f, 0x85, + 0xf5, 0x0f, 0x79, 0xf4, 0x92, 0x2e, 0xdf, 0xf5, 0x39, 0x34, 0xda, 0xef, 0x2a, 0xaa, 0x0c, 0x95, + 0x1f, 0xf1, 0x30, 0x39, 0x1c, 0xf1, 0x71, 0xfa, 0xb2, 0x78, 0xf3, 0x74, 0xfc, 0x09, 0x06, 0xd4, + 0x1b, 0x29, 0x9a, 0x4d, 0xe4, 0x3c, 0x9d, 0xa9, 0xfc, 0x8b, 0x67, 0x6a, 0xf5, 0x4f, 0x0a, 0x2a, + 0x67, 0x3a, 0xd4, 0xab, 0x68, 0x99, 0x3f, 0x49, 0x46, 0x61, 0x37, 0xe9, 0x8c, 0x45, 0xda, 0x84, + 0xcf, 0x65, 0xb6, 0x94, 0x52, 0x65, 0x2e, 0xd5, 0xeb, 0x08, 0x67, 0x6c, 0x93, 0x2a, 0xcf, 0x09, + 0xc6, 0x95, 0x94, 0x9e, 0x1d, 0x08, 0xf5, 0x0e, 0x5a, 0xcd, 0x58, 0x4f, 0x68, 0xd8, 0x79, 0x01, + 0xaa, 0xa5, 0x1c, 0xe6, 0x53, 0x2d, 0x79, 0x0b, 0xd5, 0xe6, 0x14, 0x1d, 0xcd, 0x60, 0x0b, 0x02, + 0xfb, 0xca, 0xac, 0xc2, 0x69, 0x97, 0xd2, 0x7e, 0x9f, 0x83, 0x63, 0x32, 0x1b, 0xce, 0xff, 0x7c, + 0xfd, 0xcd, 0x36, 0x85, 0xfc, 0xb9, 0x9b, 0xc2, 0xc9, 0x23, 0xaf, 0xf0, 0x9d, 0x8d, 0xbc, 0xe2, + 0xfc, 0x89, 0xb8, 0xb1, 0x85, 0x16, 0x67, 0xcb, 0x46, 0x5e, 0x06, 0x29, 0xc1, 0x17, 0x60, 0x15, + 0xf8, 0x8d, 0x2d, 0xf9, 0x7d, 0x14, 0xf8, 0x8d, 0x8d, 0x9b, 0xf2, 0xfb, 0x28, 0xf0, 0x1b, 0x1f, + 0x6d, 0xe2, 0xfc, 0xe6, 0xb7, 0x25, 0xb4, 0x62, 0xa7, 0x62, 0x3c, 0xf9, 0xff, 0x9a, 0xfa, 0x0b, + 0x05, 0xe1, 0xe3, 0xd3, 0x5c, 0xdd, 0x78, 0xd6, 0x51, 0x38, 0xf1, 0x8a, 0xb2, 0xba, 0x79, 0x1e, + 0x88, 0x4c, 0xb9, 0x76, 0xfd, 0x07, 0x7f, 0xfc, 0xf3, 0x8f, 0x73, 0x6f, 0x6b, 0x6f, 0xac, 0x3f, + 0xde, 0x58, 0xcf, 0x82, 0x30, 0xbe, 0x15, 0x1e, 0xe3, 0xbf, 0xa5, 0xdc, 0x50, 0x7f, 0xa6, 0xa0, + 0x95, 0x63, 0x3d, 0x5d, 0xfd, 0xf0, 0xb9, 0x2a, 0x8f, 0x4d, 0xa9, 0xd5, 0x8d, 0x73, 0x20, 0x52, + 0x1b, 0xd7, 0x84, 0x8d, 0x9a, 0xf6, 0xfa, 0x89, 0x36, 0x66, 0xec, 0x60, 0xe2, 0xef, 0x8e, 0x4d, + 0xc7, 0x69, 0xd1, 0xab, 0x1f, 0x9f, 0x4d, 0xef, 0x53, 0x57, 0x8f, 0xd5, 0xad, 0xf3, 0x03, 0x53, + 0xbb, 0xd7, 0x85, 0xdd, 0xd7, 0xb5, 0x77, 0x4e, 0xb7, 0xfb, 0x68, 0x2e, 0xc2, 0x3f, 0x55, 0xd0, + 0xd2, 0xdc, 0x64, 0x50, 0xeb, 0xcf, 0x4f, 0xe9, 0xec, 0xa0, 0x5b, 0x5d, 0x3f, 0x33, 0x7f, 0x6a, + 0xe3, 0x35, 0x61, 0xe3, 0x15, 0xed, 0xb5, 0x93, 0xf3, 0x2f, 0x98, 0xc1, 0xb4, 0x9f, 0x28, 0x68, + 0x71, 0xb6, 0x67, 0xa8, 0x1f, 0x9c, 0xab, 0x55, 0xaf, 0xd6, 0xcf, 0xca, 0x9e, 0xda, 0x75, 0x55, + 0xd8, 0xf5, 0xa6, 0xb6, 0x7a, 0xdc, 0xae, 0x29, 0xef, 0x2d, 0xe5, 0xc6, 0xf6, 0x13, 0x74, 0xb9, + 0x1b, 0x1f, 0x9c, 0x2a, 0x7b, 0xfb, 0xd2, 0xb1, 0xb3, 0xe6, 0x8e, 0xe2, 0x24, 0x76, 0x95, 0x2f, + 0xee, 0xa6, 0x88, 0x7e, 0x0c, 0xdc, 0xf5, 0x78, 0xd4, 0x5f, 0xef, 0xf3, 0x48, 0xfc, 0x61, 0x9d, + 0xfe, 0x31, 0x1e, 0x0e, 0x07, 0xe3, 0xa7, 0xff, 0x1c, 0xbf, 0x9d, 0xad, 0x1f, 0x96, 0x04, 0xf3, + 0x47, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xad, 0x2c, 0xa0, 0x48, 0x1f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta1/language_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta1/language_service.pb.go new file mode 100644 index 000000000..4eba89507 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta1/language_service.pb.go @@ -0,0 +1,2311 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/language/v1beta1/language_service.proto + +/* +Package language is a generated protocol buffer package. + +It is generated from these files: + google/cloud/language/v1beta1/language_service.proto + +It has these top-level messages: + Document + Sentence + Entity + Token + Sentiment + PartOfSpeech + DependencyEdge + EntityMention + TextSpan + AnalyzeSentimentRequest + AnalyzeSentimentResponse + AnalyzeEntitiesRequest + AnalyzeEntitiesResponse + AnalyzeSyntaxRequest + AnalyzeSyntaxResponse + AnnotateTextRequest + AnnotateTextResponse +*/ +package language + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents the text encoding that the caller uses to process the output. +// Providing an `EncodingType` is recommended because the API provides the +// beginning offsets for various outputs, such as tokens and mentions, and +// languages that natively use different text encodings may access offsets +// differently. +type EncodingType int32 + +const ( + // If `EncodingType` is not specified, encoding-dependent information (such as + // `begin_offset`) will be set at `-1`. + EncodingType_NONE EncodingType = 0 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-8 encoding of the input. C++ and Go are examples of languages + // that use this encoding natively. + EncodingType_UTF8 EncodingType = 1 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-16 encoding of the input. Java and Javascript are examples of + // languages that use this encoding natively. + EncodingType_UTF16 EncodingType = 2 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-32 encoding of the input. Python is an example of a language + // that uses this encoding natively. + EncodingType_UTF32 EncodingType = 3 +) + +var EncodingType_name = map[int32]string{ + 0: "NONE", + 1: "UTF8", + 2: "UTF16", + 3: "UTF32", +} +var EncodingType_value = map[string]int32{ + "NONE": 0, + "UTF8": 1, + "UTF16": 2, + "UTF32": 3, +} + +func (x EncodingType) String() string { + return proto.EnumName(EncodingType_name, int32(x)) +} +func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The document types enum. +type Document_Type int32 + +const ( + // The content type is not specified. + Document_TYPE_UNSPECIFIED Document_Type = 0 + // Plain text + Document_PLAIN_TEXT Document_Type = 1 + // HTML + Document_HTML Document_Type = 2 +) + +var Document_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "PLAIN_TEXT", + 2: "HTML", +} +var Document_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "PLAIN_TEXT": 1, + "HTML": 2, +} + +func (x Document_Type) String() string { + return proto.EnumName(Document_Type_name, int32(x)) +} +func (Document_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// The type of the entity. +type Entity_Type int32 + +const ( + // Unknown + Entity_UNKNOWN Entity_Type = 0 + // Person + Entity_PERSON Entity_Type = 1 + // Location + Entity_LOCATION Entity_Type = 2 + // Organization + Entity_ORGANIZATION Entity_Type = 3 + // Event + Entity_EVENT Entity_Type = 4 + // Work of art + Entity_WORK_OF_ART Entity_Type = 5 + // Consumer goods + Entity_CONSUMER_GOOD Entity_Type = 6 + // Other types + Entity_OTHER Entity_Type = 7 +) + +var Entity_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PERSON", + 2: "LOCATION", + 3: "ORGANIZATION", + 4: "EVENT", + 5: "WORK_OF_ART", + 6: "CONSUMER_GOOD", + 7: "OTHER", +} +var Entity_Type_value = map[string]int32{ + "UNKNOWN": 0, + "PERSON": 1, + "LOCATION": 2, + "ORGANIZATION": 3, + "EVENT": 4, + "WORK_OF_ART": 5, + "CONSUMER_GOOD": 6, + "OTHER": 7, +} + +func (x Entity_Type) String() string { + return proto.EnumName(Entity_Type_name, int32(x)) +} +func (Entity_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// The part of speech tags enum. +type PartOfSpeech_Tag int32 + +const ( + // Unknown + PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0 + // Adjective + PartOfSpeech_ADJ PartOfSpeech_Tag = 1 + // Adposition (preposition and postposition) + PartOfSpeech_ADP PartOfSpeech_Tag = 2 + // Adverb + PartOfSpeech_ADV PartOfSpeech_Tag = 3 + // Conjunction + PartOfSpeech_CONJ PartOfSpeech_Tag = 4 + // Determiner + PartOfSpeech_DET PartOfSpeech_Tag = 5 + // Noun (common and proper) + PartOfSpeech_NOUN PartOfSpeech_Tag = 6 + // Cardinal number + PartOfSpeech_NUM PartOfSpeech_Tag = 7 + // Pronoun + PartOfSpeech_PRON PartOfSpeech_Tag = 8 + // Particle or other function word + PartOfSpeech_PRT PartOfSpeech_Tag = 9 + // Punctuation + PartOfSpeech_PUNCT PartOfSpeech_Tag = 10 + // Verb (all tenses and modes) + PartOfSpeech_VERB PartOfSpeech_Tag = 11 + // Other: foreign words, typos, abbreviations + PartOfSpeech_X PartOfSpeech_Tag = 12 + // Affix + PartOfSpeech_AFFIX PartOfSpeech_Tag = 13 +) + +var PartOfSpeech_Tag_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ADJ", + 2: "ADP", + 3: "ADV", + 4: "CONJ", + 5: "DET", + 6: "NOUN", + 7: "NUM", + 8: "PRON", + 9: "PRT", + 10: "PUNCT", + 11: "VERB", + 12: "X", + 13: "AFFIX", +} +var PartOfSpeech_Tag_value = map[string]int32{ + "UNKNOWN": 0, + "ADJ": 1, + "ADP": 2, + "ADV": 3, + "CONJ": 4, + "DET": 5, + "NOUN": 6, + "NUM": 7, + "PRON": 8, + "PRT": 9, + "PUNCT": 10, + "VERB": 11, + "X": 12, + "AFFIX": 13, +} + +func (x PartOfSpeech_Tag) String() string { + return proto.EnumName(PartOfSpeech_Tag_name, int32(x)) +} +func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} } + +// The characteristic of a verb that expresses time flow during an event. +type PartOfSpeech_Aspect int32 + +const ( + // Aspect is not applicable in the analyzed language or is not predicted. + PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0 + // Perfective + PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1 + // Imperfective + PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2 + // Progressive + PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3 +) + +var PartOfSpeech_Aspect_name = map[int32]string{ + 0: "ASPECT_UNKNOWN", + 1: "PERFECTIVE", + 2: "IMPERFECTIVE", + 3: "PROGRESSIVE", +} +var PartOfSpeech_Aspect_value = map[string]int32{ + "ASPECT_UNKNOWN": 0, + "PERFECTIVE": 1, + "IMPERFECTIVE": 2, + "PROGRESSIVE": 3, +} + +func (x PartOfSpeech_Aspect) String() string { + return proto.EnumName(PartOfSpeech_Aspect_name, int32(x)) +} +func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} } + +// The grammatical function performed by a noun or pronoun in a phrase, +// clause, or sentence. In some languages, other parts of speech, such as +// adjective and determiner, take case inflection in agreement with the noun. +type PartOfSpeech_Case int32 + +const ( + // Case is not applicable in the analyzed language or is not predicted. + PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0 + // Accusative + PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1 + // Adverbial + PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2 + // Complementive + PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3 + // Dative + PartOfSpeech_DATIVE PartOfSpeech_Case = 4 + // Genitive + PartOfSpeech_GENITIVE PartOfSpeech_Case = 5 + // Instrumental + PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6 + // Locative + PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7 + // Nominative + PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8 + // Oblique + PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9 + // Partitive + PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10 + // Prepositional + PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11 + // Reflexive + PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12 + // Relative + PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13 + // Vocative + PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14 +) + +var PartOfSpeech_Case_name = map[int32]string{ + 0: "CASE_UNKNOWN", + 1: "ACCUSATIVE", + 2: "ADVERBIAL", + 3: "COMPLEMENTIVE", + 4: "DATIVE", + 5: "GENITIVE", + 6: "INSTRUMENTAL", + 7: "LOCATIVE", + 8: "NOMINATIVE", + 9: "OBLIQUE", + 10: "PARTITIVE", + 11: "PREPOSITIONAL", + 12: "REFLEXIVE_CASE", + 13: "RELATIVE_CASE", + 14: "VOCATIVE", +} +var PartOfSpeech_Case_value = map[string]int32{ + "CASE_UNKNOWN": 0, + "ACCUSATIVE": 1, + "ADVERBIAL": 2, + "COMPLEMENTIVE": 3, + "DATIVE": 4, + "GENITIVE": 5, + "INSTRUMENTAL": 6, + "LOCATIVE": 7, + "NOMINATIVE": 8, + "OBLIQUE": 9, + "PARTITIVE": 10, + "PREPOSITIONAL": 11, + "REFLEXIVE_CASE": 12, + "RELATIVE_CASE": 13, + "VOCATIVE": 14, +} + +func (x PartOfSpeech_Case) String() string { + return proto.EnumName(PartOfSpeech_Case_name, int32(x)) +} +func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 2} } + +// Depending on the language, Form can be categorizing different forms of +// verbs, adjectives, adverbs, etc. For example, categorizing inflected +// endings of verbs and adjectives or distinguishing between short and long +// forms of adjectives and participles +type PartOfSpeech_Form int32 + +const ( + // Form is not applicable in the analyzed language or is not predicted. + PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0 + // Adnomial + PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1 + // Auxiliary + PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2 + // Complementizer + PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3 + // Final ending + PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4 + // Gerund + PartOfSpeech_GERUND PartOfSpeech_Form = 5 + // Realis + PartOfSpeech_REALIS PartOfSpeech_Form = 6 + // Irrealis + PartOfSpeech_IRREALIS PartOfSpeech_Form = 7 + // Short form + PartOfSpeech_SHORT PartOfSpeech_Form = 8 + // Long form + PartOfSpeech_LONG PartOfSpeech_Form = 9 + // Order form + PartOfSpeech_ORDER PartOfSpeech_Form = 10 + // Specific form + PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11 +) + +var PartOfSpeech_Form_name = map[int32]string{ + 0: "FORM_UNKNOWN", + 1: "ADNOMIAL", + 2: "AUXILIARY", + 3: "COMPLEMENTIZER", + 4: "FINAL_ENDING", + 5: "GERUND", + 6: "REALIS", + 7: "IRREALIS", + 8: "SHORT", + 9: "LONG", + 10: "ORDER", + 11: "SPECIFIC", +} +var PartOfSpeech_Form_value = map[string]int32{ + "FORM_UNKNOWN": 0, + "ADNOMIAL": 1, + "AUXILIARY": 2, + "COMPLEMENTIZER": 3, + "FINAL_ENDING": 4, + "GERUND": 5, + "REALIS": 6, + "IRREALIS": 7, + "SHORT": 8, + "LONG": 9, + "ORDER": 10, + "SPECIFIC": 11, +} + +func (x PartOfSpeech_Form) String() string { + return proto.EnumName(PartOfSpeech_Form_name, int32(x)) +} +func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 3} } + +// Gender classes of nouns reflected in the behaviour of associated words. +type PartOfSpeech_Gender int32 + +const ( + // Gender is not applicable in the analyzed language or is not predicted. + PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0 + // Feminine + PartOfSpeech_FEMININE PartOfSpeech_Gender = 1 + // Masculine + PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2 + // Neuter + PartOfSpeech_NEUTER PartOfSpeech_Gender = 3 +) + +var PartOfSpeech_Gender_name = map[int32]string{ + 0: "GENDER_UNKNOWN", + 1: "FEMININE", + 2: "MASCULINE", + 3: "NEUTER", +} +var PartOfSpeech_Gender_value = map[string]int32{ + "GENDER_UNKNOWN": 0, + "FEMININE": 1, + "MASCULINE": 2, + "NEUTER": 3, +} + +func (x PartOfSpeech_Gender) String() string { + return proto.EnumName(PartOfSpeech_Gender_name, int32(x)) +} +func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 4} } + +// The grammatical feature of verbs, used for showing modality and attitude. +type PartOfSpeech_Mood int32 + +const ( + // Mood is not applicable in the analyzed language or is not predicted. + PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0 + // Conditional + PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1 + // Imperative + PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2 + // Indicative + PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3 + // Interrogative + PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4 + // Jussive + PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5 + // Subjunctive + PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6 +) + +var PartOfSpeech_Mood_name = map[int32]string{ + 0: "MOOD_UNKNOWN", + 1: "CONDITIONAL_MOOD", + 2: "IMPERATIVE", + 3: "INDICATIVE", + 4: "INTERROGATIVE", + 5: "JUSSIVE", + 6: "SUBJUNCTIVE", +} +var PartOfSpeech_Mood_value = map[string]int32{ + "MOOD_UNKNOWN": 0, + "CONDITIONAL_MOOD": 1, + "IMPERATIVE": 2, + "INDICATIVE": 3, + "INTERROGATIVE": 4, + "JUSSIVE": 5, + "SUBJUNCTIVE": 6, +} + +func (x PartOfSpeech_Mood) String() string { + return proto.EnumName(PartOfSpeech_Mood_name, int32(x)) +} +func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 5} } + +// Count distinctions. +type PartOfSpeech_Number int32 + +const ( + // Number is not applicable in the analyzed language or is not predicted. + PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0 + // Singular + PartOfSpeech_SINGULAR PartOfSpeech_Number = 1 + // Plural + PartOfSpeech_PLURAL PartOfSpeech_Number = 2 + // Dual + PartOfSpeech_DUAL PartOfSpeech_Number = 3 +) + +var PartOfSpeech_Number_name = map[int32]string{ + 0: "NUMBER_UNKNOWN", + 1: "SINGULAR", + 2: "PLURAL", + 3: "DUAL", +} +var PartOfSpeech_Number_value = map[string]int32{ + "NUMBER_UNKNOWN": 0, + "SINGULAR": 1, + "PLURAL": 2, + "DUAL": 3, +} + +func (x PartOfSpeech_Number) String() string { + return proto.EnumName(PartOfSpeech_Number_name, int32(x)) +} +func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 6} } + +// The distinction between the speaker, second person, third person, etc. +type PartOfSpeech_Person int32 + +const ( + // Person is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0 + // First + PartOfSpeech_FIRST PartOfSpeech_Person = 1 + // Second + PartOfSpeech_SECOND PartOfSpeech_Person = 2 + // Third + PartOfSpeech_THIRD PartOfSpeech_Person = 3 + // Reflexive + PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4 +) + +var PartOfSpeech_Person_name = map[int32]string{ + 0: "PERSON_UNKNOWN", + 1: "FIRST", + 2: "SECOND", + 3: "THIRD", + 4: "REFLEXIVE_PERSON", +} +var PartOfSpeech_Person_value = map[string]int32{ + "PERSON_UNKNOWN": 0, + "FIRST": 1, + "SECOND": 2, + "THIRD": 3, + "REFLEXIVE_PERSON": 4, +} + +func (x PartOfSpeech_Person) String() string { + return proto.EnumName(PartOfSpeech_Person_name, int32(x)) +} +func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 7} } + +// This category shows if the token is part of a proper name. +type PartOfSpeech_Proper int32 + +const ( + // Proper is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0 + // Proper + PartOfSpeech_PROPER PartOfSpeech_Proper = 1 + // Not proper + PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2 +) + +var PartOfSpeech_Proper_name = map[int32]string{ + 0: "PROPER_UNKNOWN", + 1: "PROPER", + 2: "NOT_PROPER", +} +var PartOfSpeech_Proper_value = map[string]int32{ + "PROPER_UNKNOWN": 0, + "PROPER": 1, + "NOT_PROPER": 2, +} + +func (x PartOfSpeech_Proper) String() string { + return proto.EnumName(PartOfSpeech_Proper_name, int32(x)) +} +func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 8} } + +// Reciprocal features of a pronoun. +type PartOfSpeech_Reciprocity int32 + +const ( + // Reciprocity is not applicable in the analyzed language or is not + // predicted. + PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0 + // Reciprocal + PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1 + // Non-reciprocal + PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2 +) + +var PartOfSpeech_Reciprocity_name = map[int32]string{ + 0: "RECIPROCITY_UNKNOWN", + 1: "RECIPROCAL", + 2: "NON_RECIPROCAL", +} +var PartOfSpeech_Reciprocity_value = map[string]int32{ + "RECIPROCITY_UNKNOWN": 0, + "RECIPROCAL": 1, + "NON_RECIPROCAL": 2, +} + +func (x PartOfSpeech_Reciprocity) String() string { + return proto.EnumName(PartOfSpeech_Reciprocity_name, int32(x)) +} +func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 9} } + +// Time reference. +type PartOfSpeech_Tense int32 + +const ( + // Tense is not applicable in the analyzed language or is not predicted. + PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0 + // Conditional + PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1 + // Future + PartOfSpeech_FUTURE PartOfSpeech_Tense = 2 + // Past + PartOfSpeech_PAST PartOfSpeech_Tense = 3 + // Present + PartOfSpeech_PRESENT PartOfSpeech_Tense = 4 + // Imperfect + PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5 + // Pluperfect + PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6 +) + +var PartOfSpeech_Tense_name = map[int32]string{ + 0: "TENSE_UNKNOWN", + 1: "CONDITIONAL_TENSE", + 2: "FUTURE", + 3: "PAST", + 4: "PRESENT", + 5: "IMPERFECT", + 6: "PLUPERFECT", +} +var PartOfSpeech_Tense_value = map[string]int32{ + "TENSE_UNKNOWN": 0, + "CONDITIONAL_TENSE": 1, + "FUTURE": 2, + "PAST": 3, + "PRESENT": 4, + "IMPERFECT": 5, + "PLUPERFECT": 6, +} + +func (x PartOfSpeech_Tense) String() string { + return proto.EnumName(PartOfSpeech_Tense_name, int32(x)) +} +func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 10} } + +// The relationship between the action that a verb expresses and the +// participants identified by its arguments. +type PartOfSpeech_Voice int32 + +const ( + // Voice is not applicable in the analyzed language or is not predicted. + PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0 + // Active + PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1 + // Causative + PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2 + // Passive + PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3 +) + +var PartOfSpeech_Voice_name = map[int32]string{ + 0: "VOICE_UNKNOWN", + 1: "ACTIVE", + 2: "CAUSATIVE", + 3: "PASSIVE", +} +var PartOfSpeech_Voice_value = map[string]int32{ + "VOICE_UNKNOWN": 0, + "ACTIVE": 1, + "CAUSATIVE": 2, + "PASSIVE": 3, +} + +func (x PartOfSpeech_Voice) String() string { + return proto.EnumName(PartOfSpeech_Voice_name, int32(x)) +} +func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 11} } + +// The parse label enum for the token. +type DependencyEdge_Label int32 + +const ( + // Unknown + DependencyEdge_UNKNOWN DependencyEdge_Label = 0 + // Abbreviation modifier + DependencyEdge_ABBREV DependencyEdge_Label = 1 + // Adjectival complement + DependencyEdge_ACOMP DependencyEdge_Label = 2 + // Adverbial clause modifier + DependencyEdge_ADVCL DependencyEdge_Label = 3 + // Adverbial modifier + DependencyEdge_ADVMOD DependencyEdge_Label = 4 + // Adjectival modifier of an NP + DependencyEdge_AMOD DependencyEdge_Label = 5 + // Appositional modifier of an NP + DependencyEdge_APPOS DependencyEdge_Label = 6 + // Attribute dependent of a copular verb + DependencyEdge_ATTR DependencyEdge_Label = 7 + // Auxiliary (non-main) verb + DependencyEdge_AUX DependencyEdge_Label = 8 + // Passive auxiliary + DependencyEdge_AUXPASS DependencyEdge_Label = 9 + // Coordinating conjunction + DependencyEdge_CC DependencyEdge_Label = 10 + // Clausal complement of a verb or adjective + DependencyEdge_CCOMP DependencyEdge_Label = 11 + // Conjunct + DependencyEdge_CONJ DependencyEdge_Label = 12 + // Clausal subject + DependencyEdge_CSUBJ DependencyEdge_Label = 13 + // Clausal passive subject + DependencyEdge_CSUBJPASS DependencyEdge_Label = 14 + // Dependency (unable to determine) + DependencyEdge_DEP DependencyEdge_Label = 15 + // Determiner + DependencyEdge_DET DependencyEdge_Label = 16 + // Discourse + DependencyEdge_DISCOURSE DependencyEdge_Label = 17 + // Direct object + DependencyEdge_DOBJ DependencyEdge_Label = 18 + // Expletive + DependencyEdge_EXPL DependencyEdge_Label = 19 + // Goes with (part of a word in a text not well edited) + DependencyEdge_GOESWITH DependencyEdge_Label = 20 + // Indirect object + DependencyEdge_IOBJ DependencyEdge_Label = 21 + // Marker (word introducing a subordinate clause) + DependencyEdge_MARK DependencyEdge_Label = 22 + // Multi-word expression + DependencyEdge_MWE DependencyEdge_Label = 23 + // Multi-word verbal expression + DependencyEdge_MWV DependencyEdge_Label = 24 + // Negation modifier + DependencyEdge_NEG DependencyEdge_Label = 25 + // Noun compound modifier + DependencyEdge_NN DependencyEdge_Label = 26 + // Noun phrase used as an adverbial modifier + DependencyEdge_NPADVMOD DependencyEdge_Label = 27 + // Nominal subject + DependencyEdge_NSUBJ DependencyEdge_Label = 28 + // Passive nominal subject + DependencyEdge_NSUBJPASS DependencyEdge_Label = 29 + // Numeric modifier of a noun + DependencyEdge_NUM DependencyEdge_Label = 30 + // Element of compound number + DependencyEdge_NUMBER DependencyEdge_Label = 31 + // Punctuation mark + DependencyEdge_P DependencyEdge_Label = 32 + // Parataxis relation + DependencyEdge_PARATAXIS DependencyEdge_Label = 33 + // Participial modifier + DependencyEdge_PARTMOD DependencyEdge_Label = 34 + // The complement of a preposition is a clause + DependencyEdge_PCOMP DependencyEdge_Label = 35 + // Object of a preposition + DependencyEdge_POBJ DependencyEdge_Label = 36 + // Possession modifier + DependencyEdge_POSS DependencyEdge_Label = 37 + // Postverbal negative particle + DependencyEdge_POSTNEG DependencyEdge_Label = 38 + // Predicate complement + DependencyEdge_PRECOMP DependencyEdge_Label = 39 + // Preconjunt + DependencyEdge_PRECONJ DependencyEdge_Label = 40 + // Predeterminer + DependencyEdge_PREDET DependencyEdge_Label = 41 + // Prefix + DependencyEdge_PREF DependencyEdge_Label = 42 + // Prepositional modifier + DependencyEdge_PREP DependencyEdge_Label = 43 + // The relationship between a verb and verbal morpheme + DependencyEdge_PRONL DependencyEdge_Label = 44 + // Particle + DependencyEdge_PRT DependencyEdge_Label = 45 + // Associative or possessive marker + DependencyEdge_PS DependencyEdge_Label = 46 + // Quantifier phrase modifier + DependencyEdge_QUANTMOD DependencyEdge_Label = 47 + // Relative clause modifier + DependencyEdge_RCMOD DependencyEdge_Label = 48 + // Complementizer in relative clause + DependencyEdge_RCMODREL DependencyEdge_Label = 49 + // Ellipsis without a preceding predicate + DependencyEdge_RDROP DependencyEdge_Label = 50 + // Referent + DependencyEdge_REF DependencyEdge_Label = 51 + // Remnant + DependencyEdge_REMNANT DependencyEdge_Label = 52 + // Reparandum + DependencyEdge_REPARANDUM DependencyEdge_Label = 53 + // Root + DependencyEdge_ROOT DependencyEdge_Label = 54 + // Suffix specifying a unit of number + DependencyEdge_SNUM DependencyEdge_Label = 55 + // Suffix + DependencyEdge_SUFF DependencyEdge_Label = 56 + // Temporal modifier + DependencyEdge_TMOD DependencyEdge_Label = 57 + // Topic marker + DependencyEdge_TOPIC DependencyEdge_Label = 58 + // Clause headed by an infinite form of the verb that modifies a noun + DependencyEdge_VMOD DependencyEdge_Label = 59 + // Vocative + DependencyEdge_VOCATIVE DependencyEdge_Label = 60 + // Open clausal complement + DependencyEdge_XCOMP DependencyEdge_Label = 61 + // Name suffix + DependencyEdge_SUFFIX DependencyEdge_Label = 62 + // Name title + DependencyEdge_TITLE DependencyEdge_Label = 63 + // Adverbial phrase modifier + DependencyEdge_ADVPHMOD DependencyEdge_Label = 64 + // Causative auxiliary + DependencyEdge_AUXCAUS DependencyEdge_Label = 65 + // Helper auxiliary + DependencyEdge_AUXVV DependencyEdge_Label = 66 + // Rentaishi (Prenominal modifier) + DependencyEdge_DTMOD DependencyEdge_Label = 67 + // Foreign words + DependencyEdge_FOREIGN DependencyEdge_Label = 68 + // Keyword + DependencyEdge_KW DependencyEdge_Label = 69 + // List for chains of comparable items + DependencyEdge_LIST DependencyEdge_Label = 70 + // Nominalized clause + DependencyEdge_NOMC DependencyEdge_Label = 71 + // Nominalized clausal subject + DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72 + // Nominalized clausal passive + DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73 + // Compound of numeric modifier + DependencyEdge_NUMC DependencyEdge_Label = 74 + // Copula + DependencyEdge_COP DependencyEdge_Label = 75 + // Dislocated relation (for fronted/topicalized elements) + DependencyEdge_DISLOCATED DependencyEdge_Label = 76 +) + +var DependencyEdge_Label_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ABBREV", + 2: "ACOMP", + 3: "ADVCL", + 4: "ADVMOD", + 5: "AMOD", + 6: "APPOS", + 7: "ATTR", + 8: "AUX", + 9: "AUXPASS", + 10: "CC", + 11: "CCOMP", + 12: "CONJ", + 13: "CSUBJ", + 14: "CSUBJPASS", + 15: "DEP", + 16: "DET", + 17: "DISCOURSE", + 18: "DOBJ", + 19: "EXPL", + 20: "GOESWITH", + 21: "IOBJ", + 22: "MARK", + 23: "MWE", + 24: "MWV", + 25: "NEG", + 26: "NN", + 27: "NPADVMOD", + 28: "NSUBJ", + 29: "NSUBJPASS", + 30: "NUM", + 31: "NUMBER", + 32: "P", + 33: "PARATAXIS", + 34: "PARTMOD", + 35: "PCOMP", + 36: "POBJ", + 37: "POSS", + 38: "POSTNEG", + 39: "PRECOMP", + 40: "PRECONJ", + 41: "PREDET", + 42: "PREF", + 43: "PREP", + 44: "PRONL", + 45: "PRT", + 46: "PS", + 47: "QUANTMOD", + 48: "RCMOD", + 49: "RCMODREL", + 50: "RDROP", + 51: "REF", + 52: "REMNANT", + 53: "REPARANDUM", + 54: "ROOT", + 55: "SNUM", + 56: "SUFF", + 57: "TMOD", + 58: "TOPIC", + 59: "VMOD", + 60: "VOCATIVE", + 61: "XCOMP", + 62: "SUFFIX", + 63: "TITLE", + 64: "ADVPHMOD", + 65: "AUXCAUS", + 66: "AUXVV", + 67: "DTMOD", + 68: "FOREIGN", + 69: "KW", + 70: "LIST", + 71: "NOMC", + 72: "NOMCSUBJ", + 73: "NOMCSUBJPASS", + 74: "NUMC", + 75: "COP", + 76: "DISLOCATED", +} +var DependencyEdge_Label_value = map[string]int32{ + "UNKNOWN": 0, + "ABBREV": 1, + "ACOMP": 2, + "ADVCL": 3, + "ADVMOD": 4, + "AMOD": 5, + "APPOS": 6, + "ATTR": 7, + "AUX": 8, + "AUXPASS": 9, + "CC": 10, + "CCOMP": 11, + "CONJ": 12, + "CSUBJ": 13, + "CSUBJPASS": 14, + "DEP": 15, + "DET": 16, + "DISCOURSE": 17, + "DOBJ": 18, + "EXPL": 19, + "GOESWITH": 20, + "IOBJ": 21, + "MARK": 22, + "MWE": 23, + "MWV": 24, + "NEG": 25, + "NN": 26, + "NPADVMOD": 27, + "NSUBJ": 28, + "NSUBJPASS": 29, + "NUM": 30, + "NUMBER": 31, + "P": 32, + "PARATAXIS": 33, + "PARTMOD": 34, + "PCOMP": 35, + "POBJ": 36, + "POSS": 37, + "POSTNEG": 38, + "PRECOMP": 39, + "PRECONJ": 40, + "PREDET": 41, + "PREF": 42, + "PREP": 43, + "PRONL": 44, + "PRT": 45, + "PS": 46, + "QUANTMOD": 47, + "RCMOD": 48, + "RCMODREL": 49, + "RDROP": 50, + "REF": 51, + "REMNANT": 52, + "REPARANDUM": 53, + "ROOT": 54, + "SNUM": 55, + "SUFF": 56, + "TMOD": 57, + "TOPIC": 58, + "VMOD": 59, + "VOCATIVE": 60, + "XCOMP": 61, + "SUFFIX": 62, + "TITLE": 63, + "ADVPHMOD": 64, + "AUXCAUS": 65, + "AUXVV": 66, + "DTMOD": 67, + "FOREIGN": 68, + "KW": 69, + "LIST": 70, + "NOMC": 71, + "NOMCSUBJ": 72, + "NOMCSUBJPASS": 73, + "NUMC": 74, + "COP": 75, + "DISLOCATED": 76, +} + +func (x DependencyEdge_Label) String() string { + return proto.EnumName(DependencyEdge_Label_name, int32(x)) +} +func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} } + +// The supported types of mentions. +type EntityMention_Type int32 + +const ( + // Unknown + EntityMention_TYPE_UNKNOWN EntityMention_Type = 0 + // Proper name + EntityMention_PROPER EntityMention_Type = 1 + // Common noun (or noun compound) + EntityMention_COMMON EntityMention_Type = 2 +) + +var EntityMention_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "PROPER", + 2: "COMMON", +} +var EntityMention_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "PROPER": 1, + "COMMON": 2, +} + +func (x EntityMention_Type) String() string { + return proto.EnumName(EntityMention_Type_name, int32(x)) +} +func (EntityMention_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// ################################################################ # +// +// Represents the input to API methods. +type Document struct { + // Required. If the type is not set or is `TYPE_UNSPECIFIED`, + // returns an `INVALID_ARGUMENT` error. + Type Document_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.language.v1beta1.Document_Type" json:"type,omitempty"` + // The source of the document: a string containing the content or a + // Google Cloud Storage URI. + // + // Types that are valid to be assigned to Source: + // *Document_Content + // *Document_GcsContentUri + Source isDocument_Source `protobuf_oneof:"source"` + // The language of the document (if not specified, the language is + // automatically detected). Both ISO and BCP-47 language codes are + // accepted.
+ // [Language Support](https://cloud.google.com/natural-language/docs/languages) + // lists currently supported languages for each API method. + // If the language (either specified by the caller or automatically detected) + // is not supported by the called API method, an `INVALID_ARGUMENT` error + // is returned. + Language string `protobuf:"bytes,4,opt,name=language" json:"language,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isDocument_Source interface { + isDocument_Source() +} + +type Document_Content struct { + Content string `protobuf:"bytes,2,opt,name=content,oneof"` +} +type Document_GcsContentUri struct { + GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,oneof"` +} + +func (*Document_Content) isDocument_Source() {} +func (*Document_GcsContentUri) isDocument_Source() {} + +func (m *Document) GetSource() isDocument_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Document) GetType() Document_Type { + if m != nil { + return m.Type + } + return Document_TYPE_UNSPECIFIED +} + +func (m *Document) GetContent() string { + if x, ok := m.GetSource().(*Document_Content); ok { + return x.Content + } + return "" +} + +func (m *Document) GetGcsContentUri() string { + if x, ok := m.GetSource().(*Document_GcsContentUri); ok { + return x.GcsContentUri + } + return "" +} + +func (m *Document) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Document) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Document_OneofMarshaler, _Document_OneofUnmarshaler, _Document_OneofSizer, []interface{}{ + (*Document_Content)(nil), + (*Document_GcsContentUri)(nil), + } +} + +func _Document_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Content) + case *Document_GcsContentUri: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.GcsContentUri) + case nil: + default: + return fmt.Errorf("Document.Source has unexpected type %T", x) + } + return nil +} + +func _Document_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Document) + switch tag { + case 2: // source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_Content{x} + return true, err + case 3: // source.gcs_content_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_GcsContentUri{x} + return true, err + default: + return false, nil + } +} + +func _Document_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *Document_GcsContentUri: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.GcsContentUri))) + n += len(x.GcsContentUri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a sentence in the input document. +type Sentence struct { + // The sentence text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // For calls to [AnalyzeSentiment][] or if + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment] is set to + // true, this field will contain the sentiment for the sentence. + Sentiment *Sentiment `protobuf:"bytes,2,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *Sentence) Reset() { *m = Sentence{} } +func (m *Sentence) String() string { return proto.CompactTextString(m) } +func (*Sentence) ProtoMessage() {} +func (*Sentence) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Sentence) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Sentence) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents a phrase in the text that is a known entity, such as +// a person, an organization, or location. The API associates information, such +// as salience and mentions, with entities. +type Entity struct { + // The representative name for the entity. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The entity type. + Type Entity_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta1.Entity_Type" json:"type,omitempty"` + // Metadata associated with the entity. + // + // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if + // available. The associated keys are "wikipedia_url" and "mid", respectively. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The salience score associated with the entity in the [0, 1.0] range. + // + // The salience score for an entity provides information about the + // importance or centrality of that entity to the entire document text. + // Scores closer to 0 are less salient, while scores closer to 1.0 are highly + // salient. + Salience float32 `protobuf:"fixed32,4,opt,name=salience" json:"salience,omitempty"` + // The mentions of this entity in the input document. The API currently + // supports proper noun mentions. + Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions" json:"mentions,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Entity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Entity) GetType() Entity_Type { + if m != nil { + return m.Type + } + return Entity_UNKNOWN +} + +func (m *Entity) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Entity) GetSalience() float32 { + if m != nil { + return m.Salience + } + return 0 +} + +func (m *Entity) GetMentions() []*EntityMention { + if m != nil { + return m.Mentions + } + return nil +} + +// Represents the smallest syntactic building block of the text. +type Token struct { + // The token text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // Parts of speech tag for this token. + PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech" json:"part_of_speech,omitempty"` + // Dependency tree parse for this token. + DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge" json:"dependency_edge,omitempty"` + // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + Lemma string `protobuf:"bytes,4,opt,name=lemma" json:"lemma,omitempty"` +} + +func (m *Token) Reset() { *m = Token{} } +func (m *Token) String() string { return proto.CompactTextString(m) } +func (*Token) ProtoMessage() {} +func (*Token) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Token) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Token) GetPartOfSpeech() *PartOfSpeech { + if m != nil { + return m.PartOfSpeech + } + return nil +} + +func (m *Token) GetDependencyEdge() *DependencyEdge { + if m != nil { + return m.DependencyEdge + } + return nil +} + +func (m *Token) GetLemma() string { + if m != nil { + return m.Lemma + } + return "" +} + +// Represents the feeling associated with the entire text or entities in +// the text. +type Sentiment struct { + // DEPRECATED FIELD - This field is being deprecated in + // favor of score. Please refer to our documentation at + // https://cloud.google.com/natural-language/docs for more information. + Polarity float32 `protobuf:"fixed32,1,opt,name=polarity" json:"polarity,omitempty"` + // A non-negative number in the [0, +inf) range, which represents + // the absolute magnitude of sentiment regardless of score (positive or + // negative). + Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude" json:"magnitude,omitempty"` + // Sentiment score between -1.0 (negative sentiment) and 1.0 + // (positive sentiment). + Score float32 `protobuf:"fixed32,3,opt,name=score" json:"score,omitempty"` +} + +func (m *Sentiment) Reset() { *m = Sentiment{} } +func (m *Sentiment) String() string { return proto.CompactTextString(m) } +func (*Sentiment) ProtoMessage() {} +func (*Sentiment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Sentiment) GetPolarity() float32 { + if m != nil { + return m.Polarity + } + return 0 +} + +func (m *Sentiment) GetMagnitude() float32 { + if m != nil { + return m.Magnitude + } + return 0 +} + +func (m *Sentiment) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Represents part of speech information for a token. +type PartOfSpeech struct { + // The part of speech tag. + Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,enum=google.cloud.language.v1beta1.PartOfSpeech_Tag" json:"tag,omitempty"` + // The grammatical aspect. + Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,enum=google.cloud.language.v1beta1.PartOfSpeech_Aspect" json:"aspect,omitempty"` + // The grammatical case. + Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,enum=google.cloud.language.v1beta1.PartOfSpeech_Case" json:"case,omitempty"` + // The grammatical form. + Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,enum=google.cloud.language.v1beta1.PartOfSpeech_Form" json:"form,omitempty"` + // The grammatical gender. + Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,enum=google.cloud.language.v1beta1.PartOfSpeech_Gender" json:"gender,omitempty"` + // The grammatical mood. + Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,enum=google.cloud.language.v1beta1.PartOfSpeech_Mood" json:"mood,omitempty"` + // The grammatical number. + Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,enum=google.cloud.language.v1beta1.PartOfSpeech_Number" json:"number,omitempty"` + // The grammatical person. + Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,enum=google.cloud.language.v1beta1.PartOfSpeech_Person" json:"person,omitempty"` + // The grammatical properness. + Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,enum=google.cloud.language.v1beta1.PartOfSpeech_Proper" json:"proper,omitempty"` + // The grammatical reciprocity. + Reciprocity PartOfSpeech_Reciprocity `protobuf:"varint,10,opt,name=reciprocity,enum=google.cloud.language.v1beta1.PartOfSpeech_Reciprocity" json:"reciprocity,omitempty"` + // The grammatical tense. + Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,enum=google.cloud.language.v1beta1.PartOfSpeech_Tense" json:"tense,omitempty"` + // The grammatical voice. + Voice PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,enum=google.cloud.language.v1beta1.PartOfSpeech_Voice" json:"voice,omitempty"` +} + +func (m *PartOfSpeech) Reset() { *m = PartOfSpeech{} } +func (m *PartOfSpeech) String() string { return proto.CompactTextString(m) } +func (*PartOfSpeech) ProtoMessage() {} +func (*PartOfSpeech) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag { + if m != nil { + return m.Tag + } + return PartOfSpeech_UNKNOWN +} + +func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect { + if m != nil { + return m.Aspect + } + return PartOfSpeech_ASPECT_UNKNOWN +} + +func (m *PartOfSpeech) GetCase() PartOfSpeech_Case { + if m != nil { + return m.Case + } + return PartOfSpeech_CASE_UNKNOWN +} + +func (m *PartOfSpeech) GetForm() PartOfSpeech_Form { + if m != nil { + return m.Form + } + return PartOfSpeech_FORM_UNKNOWN +} + +func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender { + if m != nil { + return m.Gender + } + return PartOfSpeech_GENDER_UNKNOWN +} + +func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood { + if m != nil { + return m.Mood + } + return PartOfSpeech_MOOD_UNKNOWN +} + +func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number { + if m != nil { + return m.Number + } + return PartOfSpeech_NUMBER_UNKNOWN +} + +func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person { + if m != nil { + return m.Person + } + return PartOfSpeech_PERSON_UNKNOWN +} + +func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper { + if m != nil { + return m.Proper + } + return PartOfSpeech_PROPER_UNKNOWN +} + +func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity { + if m != nil { + return m.Reciprocity + } + return PartOfSpeech_RECIPROCITY_UNKNOWN +} + +func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense { + if m != nil { + return m.Tense + } + return PartOfSpeech_TENSE_UNKNOWN +} + +func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice { + if m != nil { + return m.Voice + } + return PartOfSpeech_VOICE_UNKNOWN +} + +// Represents dependency parse tree information for a token. +type DependencyEdge struct { + // Represents the head of this token in the dependency tree. + // This is the index of the token which has an arc going to this token. + // The index is the position of the token in the array of tokens returned + // by the API method. If this token is a root token, then the + // `head_token_index` is its own index. + HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex" json:"head_token_index,omitempty"` + // The parse label for the token. + Label DependencyEdge_Label `protobuf:"varint,2,opt,name=label,enum=google.cloud.language.v1beta1.DependencyEdge_Label" json:"label,omitempty"` +} + +func (m *DependencyEdge) Reset() { *m = DependencyEdge{} } +func (m *DependencyEdge) String() string { return proto.CompactTextString(m) } +func (*DependencyEdge) ProtoMessage() {} +func (*DependencyEdge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DependencyEdge) GetHeadTokenIndex() int32 { + if m != nil { + return m.HeadTokenIndex + } + return 0 +} + +func (m *DependencyEdge) GetLabel() DependencyEdge_Label { + if m != nil { + return m.Label + } + return DependencyEdge_UNKNOWN +} + +// Represents a mention for an entity in the text. Currently, proper noun +// mentions are supported. +type EntityMention struct { + // The mention text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // The type of the entity mention. + Type EntityMention_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta1.EntityMention_Type" json:"type,omitempty"` +} + +func (m *EntityMention) Reset() { *m = EntityMention{} } +func (m *EntityMention) String() string { return proto.CompactTextString(m) } +func (*EntityMention) ProtoMessage() {} +func (*EntityMention) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EntityMention) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *EntityMention) GetType() EntityMention_Type { + if m != nil { + return m.Type + } + return EntityMention_TYPE_UNKNOWN +} + +// Represents an output piece of text. +type TextSpan struct { + // The content of the output text. + Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` + // The API calculates the beginning offset of the content in the original + // document according to the [EncodingType][google.cloud.language.v1beta1.EncodingType] specified in the API request. + BeginOffset int32 `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset" json:"begin_offset,omitempty"` +} + +func (m *TextSpan) Reset() { *m = TextSpan{} } +func (m *TextSpan) String() string { return proto.CompactTextString(m) } +func (*TextSpan) ProtoMessage() {} +func (*TextSpan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *TextSpan) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *TextSpan) GetBeginOffset() int32 { + if m != nil { + return m.BeginOffset + } + return 0 +} + +// The sentiment analysis request message. +type AnalyzeSentimentRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate sentence offsets for the + // sentence sentiment. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSentimentRequest) Reset() { *m = AnalyzeSentimentRequest{} } +func (m *AnalyzeSentimentRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentRequest) ProtoMessage() {} +func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *AnalyzeSentimentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The sentiment analysis response message. +type AnalyzeSentimentResponse struct { + // The overall sentiment of the input document. + DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` + // The sentiment for all the sentences in the document. + Sentences []*Sentence `protobuf:"bytes,3,rep,name=sentences" json:"sentences,omitempty"` +} + +func (m *AnalyzeSentimentResponse) Reset() { *m = AnalyzeSentimentResponse{} } +func (m *AnalyzeSentimentResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentResponse) ProtoMessage() {} +func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnalyzeSentimentResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +// The entity analysis request message. +type AnalyzeEntitiesRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeEntitiesRequest) Reset() { *m = AnalyzeEntitiesRequest{} } +func (m *AnalyzeEntitiesRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesRequest) ProtoMessage() {} +func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AnalyzeEntitiesRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The entity analysis response message. +type AnalyzeEntitiesResponse struct { + // The recognized entities in the input document. + Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeEntitiesResponse) Reset() { *m = AnalyzeEntitiesResponse{} } +func (m *AnalyzeEntitiesResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesResponse) ProtoMessage() {} +func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnalyzeEntitiesResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The syntax analysis request message. +type AnalyzeSyntaxRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSyntaxRequest) Reset() { *m = AnalyzeSyntaxRequest{} } +func (m *AnalyzeSyntaxRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxRequest) ProtoMessage() {} +func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *AnalyzeSyntaxRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The syntax analysis response message. +type AnalyzeSyntaxResponse struct { + // Sentences in the input document. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details. + Language string `protobuf:"bytes,3,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeSyntaxResponse) Reset() { *m = AnalyzeSyntaxResponse{} } +func (m *AnalyzeSyntaxResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxResponse) ProtoMessage() {} +func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The request message for the text annotation API, which can perform multiple +// analysis types (sentiment, entities, and syntax) in one call. +type AnnotateTextRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The enabled features. + Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,3,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta1.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnnotateTextRequest) Reset() { *m = AnnotateTextRequest{} } +func (m *AnnotateTextRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest) ProtoMessage() {} +func (*AnnotateTextRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *AnnotateTextRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnnotateTextRequest) GetFeatures() *AnnotateTextRequest_Features { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateTextRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// All available features for sentiment, syntax, and semantic analysis. +// Setting each one to true will enable that specific analysis for the input. +type AnnotateTextRequest_Features struct { + // Extract syntax information. + ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax" json:"extract_syntax,omitempty"` + // Extract entities. + ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities" json:"extract_entities,omitempty"` + // Extract document-level sentiment. + ExtractDocumentSentiment bool `protobuf:"varint,3,opt,name=extract_document_sentiment,json=extractDocumentSentiment" json:"extract_document_sentiment,omitempty"` +} + +func (m *AnnotateTextRequest_Features) Reset() { *m = AnnotateTextRequest_Features{} } +func (m *AnnotateTextRequest_Features) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest_Features) ProtoMessage() {} +func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15, 0} +} + +func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool { + if m != nil { + return m.ExtractSyntax + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractEntities() bool { + if m != nil { + return m.ExtractEntities + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool { + if m != nil { + return m.ExtractDocumentSentiment + } + return false +} + +// The text annotations response message. +type AnnotateTextResponse struct { + // Sentences in the input document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax]. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax]. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // Entities, along with their semantic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_entities]. + Entities []*Entity `protobuf:"bytes,3,rep,name=entities" json:"entities,omitempty"` + // The overall sentiment for the document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment]. + DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details. + Language string `protobuf:"bytes,5,opt,name=language" json:"language,omitempty"` +} + +func (m *AnnotateTextResponse) Reset() { *m = AnnotateTextResponse{} } +func (m *AnnotateTextResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextResponse) ProtoMessage() {} +func (*AnnotateTextResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *AnnotateTextResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnnotateTextResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnnotateTextResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnnotateTextResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func init() { + proto.RegisterType((*Document)(nil), "google.cloud.language.v1beta1.Document") + proto.RegisterType((*Sentence)(nil), "google.cloud.language.v1beta1.Sentence") + proto.RegisterType((*Entity)(nil), "google.cloud.language.v1beta1.Entity") + proto.RegisterType((*Token)(nil), "google.cloud.language.v1beta1.Token") + proto.RegisterType((*Sentiment)(nil), "google.cloud.language.v1beta1.Sentiment") + proto.RegisterType((*PartOfSpeech)(nil), "google.cloud.language.v1beta1.PartOfSpeech") + proto.RegisterType((*DependencyEdge)(nil), "google.cloud.language.v1beta1.DependencyEdge") + proto.RegisterType((*EntityMention)(nil), "google.cloud.language.v1beta1.EntityMention") + proto.RegisterType((*TextSpan)(nil), "google.cloud.language.v1beta1.TextSpan") + proto.RegisterType((*AnalyzeSentimentRequest)(nil), "google.cloud.language.v1beta1.AnalyzeSentimentRequest") + proto.RegisterType((*AnalyzeSentimentResponse)(nil), "google.cloud.language.v1beta1.AnalyzeSentimentResponse") + proto.RegisterType((*AnalyzeEntitiesRequest)(nil), "google.cloud.language.v1beta1.AnalyzeEntitiesRequest") + proto.RegisterType((*AnalyzeEntitiesResponse)(nil), "google.cloud.language.v1beta1.AnalyzeEntitiesResponse") + proto.RegisterType((*AnalyzeSyntaxRequest)(nil), "google.cloud.language.v1beta1.AnalyzeSyntaxRequest") + proto.RegisterType((*AnalyzeSyntaxResponse)(nil), "google.cloud.language.v1beta1.AnalyzeSyntaxResponse") + proto.RegisterType((*AnnotateTextRequest)(nil), "google.cloud.language.v1beta1.AnnotateTextRequest") + proto.RegisterType((*AnnotateTextRequest_Features)(nil), "google.cloud.language.v1beta1.AnnotateTextRequest.Features") + proto.RegisterType((*AnnotateTextResponse)(nil), "google.cloud.language.v1beta1.AnnotateTextResponse") + proto.RegisterEnum("google.cloud.language.v1beta1.EncodingType", EncodingType_name, EncodingType_value) + proto.RegisterEnum("google.cloud.language.v1beta1.Document_Type", Document_Type_name, Document_Type_value) + proto.RegisterEnum("google.cloud.language.v1beta1.Entity_Type", Entity_Type_name, Entity_Type_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Tag", PartOfSpeech_Tag_name, PartOfSpeech_Tag_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Aspect", PartOfSpeech_Aspect_name, PartOfSpeech_Aspect_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Case", PartOfSpeech_Case_name, PartOfSpeech_Case_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Form", PartOfSpeech_Form_name, PartOfSpeech_Form_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Gender", PartOfSpeech_Gender_name, PartOfSpeech_Gender_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Mood", PartOfSpeech_Mood_name, PartOfSpeech_Mood_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Number", PartOfSpeech_Number_name, PartOfSpeech_Number_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Person", PartOfSpeech_Person_name, PartOfSpeech_Person_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Proper", PartOfSpeech_Proper_name, PartOfSpeech_Proper_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Reciprocity", PartOfSpeech_Reciprocity_name, PartOfSpeech_Reciprocity_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Tense", PartOfSpeech_Tense_name, PartOfSpeech_Tense_value) + proto.RegisterEnum("google.cloud.language.v1beta1.PartOfSpeech_Voice", PartOfSpeech_Voice_name, PartOfSpeech_Voice_value) + proto.RegisterEnum("google.cloud.language.v1beta1.DependencyEdge_Label", DependencyEdge_Label_name, DependencyEdge_Label_value) + proto.RegisterEnum("google.cloud.language.v1beta1.EntityMention_Type", EntityMention_Type_name, EntityMention_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LanguageService service + +type LanguageServiceClient interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) + // A convenience method that provides all the features that analyzeSentiment, + // analyzeEntities, and analyzeSyntax provide in one call. + AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) +} + +type languageServiceClient struct { + cc *grpc.ClientConn +} + +func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient { + return &languageServiceClient{cc} +} + +func (c *languageServiceClient) AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) { + out := new(AnalyzeSentimentResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta1.LanguageService/AnalyzeSentiment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) { + out := new(AnalyzeEntitiesResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta1.LanguageService/AnalyzeEntities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) { + out := new(AnalyzeSyntaxResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta1.LanguageService/AnalyzeSyntax", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) { + out := new(AnnotateTextResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta1.LanguageService/AnnotateText", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LanguageService service + +type LanguageServiceServer interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error) + // A convenience method that provides all the features that analyzeSentiment, + // analyzeEntities, and analyzeSyntax provide in one call. + AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error) +} + +func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer) { + s.RegisterService(&_LanguageService_serviceDesc, srv) +} + +func _LanguageService_AnalyzeSentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSentimentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta1.LanguageService/AnalyzeSentiment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, req.(*AnalyzeSentimentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeEntitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta1.LanguageService/AnalyzeEntities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, req.(*AnalyzeEntitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeSyntax_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSyntaxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta1.LanguageService/AnalyzeSyntax", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, req.(*AnalyzeSyntaxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnnotateText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateTextRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnnotateText(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta1.LanguageService/AnnotateText", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnnotateText(ctx, req.(*AnnotateTextRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LanguageService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.language.v1beta1.LanguageService", + HandlerType: (*LanguageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnalyzeSentiment", + Handler: _LanguageService_AnalyzeSentiment_Handler, + }, + { + MethodName: "AnalyzeEntities", + Handler: _LanguageService_AnalyzeEntities_Handler, + }, + { + MethodName: "AnalyzeSyntax", + Handler: _LanguageService_AnalyzeSyntax_Handler, + }, + { + MethodName: "AnnotateText", + Handler: _LanguageService_AnnotateText_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/language/v1beta1/language_service.proto", +} + +func init() { + proto.RegisterFile("google/cloud/language/v1beta1/language_service.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 2755 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x73, 0xdb, 0xc6, + 0x15, 0x37, 0xf8, 0x4f, 0xe4, 0x92, 0x92, 0xd6, 0x88, 0x93, 0xb0, 0x6a, 0xd2, 0x38, 0x48, 0x5c, + 0x2b, 0x76, 0x42, 0xc5, 0x52, 0xe2, 0xb8, 0x76, 0x9a, 0x06, 0x02, 0x96, 0x14, 0x64, 0x10, 0x40, + 0x16, 0x00, 0x25, 0xa7, 0x07, 0x0e, 0x4c, 0xae, 0x19, 0x4e, 0x24, 0x80, 0x25, 0x21, 0x8f, 0xd5, + 0x4b, 0x67, 0x32, 0xd3, 0x63, 0xa7, 0x87, 0xde, 0x7a, 0x6c, 0x0f, 0x3d, 0x75, 0xd2, 0x99, 0x5e, + 0xda, 0x0f, 0xd0, 0x43, 0xa7, 0xc7, 0xcc, 0xf4, 0x13, 0xf4, 0xd8, 0x43, 0x0f, 0x3d, 0xf4, 0xd8, + 0x79, 0xbb, 0x0b, 0xfe, 0x51, 0x1c, 0x4b, 0x4c, 0x72, 0xc8, 0x6d, 0xf7, 0xf1, 0xfd, 0x7e, 0xef, + 0xed, 0xdb, 0xb7, 0xef, 0x2d, 0x96, 0xe8, 0x9d, 0x41, 0x92, 0x0c, 0x8e, 0xd8, 0x56, 0xef, 0x28, + 0x39, 0xe9, 0x6f, 0x1d, 0x45, 0xf1, 0xe0, 0x24, 0x1a, 0xb0, 0xad, 0xc7, 0xb7, 0x1e, 0xb2, 0x34, + 0xba, 0x35, 0x15, 0x74, 0x27, 0x6c, 0xfc, 0x78, 0xd8, 0x63, 0x8d, 0xd1, 0x38, 0x49, 0x13, 0xf5, + 0x65, 0x81, 0x6a, 0x70, 0x54, 0x23, 0x53, 0x6a, 0x48, 0xd4, 0xc6, 0x4b, 0x92, 0x34, 0x1a, 0x0d, + 0xb7, 0xa2, 0x38, 0x4e, 0xd2, 0x28, 0x1d, 0x26, 0xf1, 0x44, 0x80, 0xb5, 0xff, 0x28, 0xa8, 0x6c, + 0x26, 0xbd, 0x93, 0x63, 0x16, 0xa7, 0xea, 0x87, 0xa8, 0x90, 0x9e, 0x8e, 0x58, 0x5d, 0xb9, 0xaa, + 0x6c, 0xae, 0x6d, 0xbf, 0xd9, 0x78, 0x26, 0x71, 0x23, 0x83, 0x35, 0x82, 0xd3, 0x11, 0xa3, 0x1c, + 0xa9, 0x6e, 0xa0, 0x95, 0x5e, 0x12, 0xa7, 0x2c, 0x4e, 0xeb, 0xb9, 0xab, 0xca, 0x66, 0x65, 0xef, + 0x12, 0xcd, 0x04, 0xea, 0x26, 0x5a, 0x1f, 0xf4, 0x26, 0x5d, 0x39, 0xed, 0x9e, 0x8c, 0x87, 0xf5, + 0xbc, 0xd4, 0x59, 0x1d, 0xf4, 0x26, 0x86, 0x90, 0x87, 0xe3, 0xa1, 0xba, 0x81, 0xca, 0x99, 0xb5, + 0x7a, 0x01, 0x54, 0xe8, 0x74, 0xae, 0xdd, 0x46, 0x05, 0xb0, 0xa7, 0x5e, 0x41, 0x38, 0x78, 0xe0, + 0x91, 0x6e, 0xe8, 0xf8, 0x1e, 0x31, 0xac, 0xa6, 0x45, 0x4c, 0x7c, 0x49, 0x5d, 0x43, 0xc8, 0xb3, + 0x75, 0xcb, 0xe9, 0x06, 0xe4, 0x30, 0xc0, 0x8a, 0x5a, 0x46, 0x85, 0xbd, 0xa0, 0x6d, 0xe3, 0xdc, + 0x6e, 0x19, 0x95, 0x26, 0xc9, 0xc9, 0xb8, 0xc7, 0xb4, 0x5f, 0x2b, 0xa8, 0xec, 0x33, 0x30, 0xd6, + 0x63, 0xea, 0x3d, 0x54, 0x48, 0xd9, 0x93, 0x94, 0x2f, 0xb9, 0xba, 0x7d, 0xfd, 0x9c, 0x25, 0x07, + 0xec, 0x49, 0xea, 0x8f, 0xa2, 0x98, 0x72, 0x90, 0xda, 0x44, 0x95, 0x09, 0x8b, 0xd3, 0xe1, 0x71, + 0xb6, 0xde, 0xea, 0xf6, 0xe6, 0x39, 0x0c, 0x7e, 0xa6, 0x4f, 0x67, 0x50, 0xed, 0x1f, 0x79, 0x54, + 0x22, 0x71, 0x3a, 0x4c, 0x4f, 0x55, 0x15, 0x15, 0xe2, 0xe8, 0x58, 0x6c, 0x41, 0x85, 0xf2, 0xb1, + 0xfa, 0x81, 0xdc, 0x96, 0x1c, 0xdf, 0x96, 0x1b, 0xe7, 0x58, 0x10, 0x44, 0xf3, 0x9b, 0xe2, 0xa2, + 0xf2, 0x31, 0x4b, 0xa3, 0x7e, 0x94, 0x46, 0xf5, 0xfc, 0xd5, 0xfc, 0x66, 0x75, 0x7b, 0xe7, 0x62, + 0x1c, 0x6d, 0x89, 0x22, 0x71, 0x3a, 0x3e, 0xa5, 0x53, 0x12, 0xd8, 0x9f, 0x49, 0x74, 0x34, 0x84, + 0x00, 0xf2, 0xfd, 0xc9, 0xd1, 0xe9, 0x5c, 0xdd, 0x03, 0x63, 0x31, 0x4f, 0xb1, 0x7a, 0x91, 0x1b, + 0x7b, 0xf3, 0x42, 0xc6, 0xda, 0x02, 0x44, 0xa7, 0xe8, 0x8d, 0x7b, 0x68, 0x75, 0xc1, 0x01, 0x15, + 0xa3, 0xfc, 0xa7, 0xec, 0x54, 0x86, 0x06, 0x86, 0xea, 0x15, 0x54, 0x7c, 0x1c, 0x1d, 0x9d, 0x88, + 0xd0, 0x54, 0xa8, 0x98, 0xdc, 0xcd, 0xdd, 0x51, 0xb4, 0x53, 0x99, 0x26, 0x55, 0xb4, 0x12, 0x3a, + 0xf7, 0x1d, 0xf7, 0xc0, 0xc1, 0x97, 0x54, 0x84, 0x4a, 0x1e, 0xa1, 0xbe, 0xeb, 0x60, 0x45, 0xad, + 0xa1, 0xb2, 0xed, 0x1a, 0x7a, 0x60, 0xb9, 0x0e, 0xce, 0xa9, 0x18, 0xd5, 0x5c, 0xda, 0xd2, 0x1d, + 0xeb, 0x63, 0x21, 0xc9, 0xab, 0x15, 0x54, 0x24, 0x1d, 0xe2, 0x04, 0xb8, 0xa0, 0xae, 0xa3, 0xea, + 0x81, 0x4b, 0xef, 0x77, 0xdd, 0x66, 0x57, 0xa7, 0x01, 0x2e, 0xaa, 0x97, 0xd1, 0xaa, 0xe1, 0x3a, + 0x7e, 0xd8, 0x26, 0xb4, 0xdb, 0x72, 0x5d, 0x13, 0x97, 0x40, 0xdd, 0x0d, 0xf6, 0x08, 0xc5, 0x2b, + 0xda, 0x2f, 0x73, 0xa8, 0x18, 0x24, 0x9f, 0xb2, 0xf8, 0x9b, 0x25, 0xd7, 0x47, 0x68, 0x6d, 0x14, + 0x8d, 0xd3, 0x6e, 0xf2, 0xa8, 0x3b, 0x19, 0x31, 0xd6, 0xfb, 0x44, 0x66, 0xd8, 0xcd, 0x73, 0x68, + 0xbc, 0x68, 0x9c, 0xba, 0x8f, 0x7c, 0x0e, 0xa1, 0xb5, 0xd1, 0xdc, 0x4c, 0xed, 0xa0, 0xf5, 0x3e, + 0x1b, 0xb1, 0xb8, 0xcf, 0xe2, 0xde, 0x69, 0x97, 0xf5, 0x07, 0x8c, 0x9f, 0xc0, 0xea, 0xf6, 0x5b, + 0xe7, 0x1d, 0xf5, 0x29, 0x8a, 0xf4, 0x07, 0x8c, 0xae, 0xf5, 0x17, 0xe6, 0xb0, 0x0d, 0x47, 0xec, + 0xf8, 0x38, 0x92, 0x87, 0x55, 0x4c, 0xb4, 0x9f, 0xa2, 0xca, 0x34, 0xdb, 0x21, 0x65, 0x46, 0xc9, + 0x51, 0x34, 0x1e, 0xa6, 0x62, 0x03, 0x73, 0x74, 0x3a, 0x57, 0x5f, 0x42, 0x95, 0xe3, 0x68, 0x10, + 0x0f, 0xd3, 0x93, 0xbe, 0xd8, 0xc9, 0x1c, 0x9d, 0x09, 0x80, 0x7c, 0xd2, 0x4b, 0xc6, 0xc2, 0xd5, + 0x1c, 0x15, 0x13, 0xed, 0xaf, 0x97, 0x51, 0x6d, 0x7e, 0xa5, 0xaa, 0x8e, 0xf2, 0x69, 0x34, 0x90, + 0xa5, 0x6b, 0x6b, 0x89, 0x18, 0x35, 0x82, 0x68, 0x40, 0x01, 0xab, 0xee, 0xa3, 0x52, 0x34, 0x19, + 0xb1, 0x5e, 0x2a, 0x4f, 0xda, 0xf6, 0x32, 0x2c, 0x3a, 0x47, 0x52, 0xc9, 0xa0, 0x9a, 0xa8, 0xd0, + 0x8b, 0x26, 0xc2, 0xe9, 0xb5, 0xed, 0xb7, 0x97, 0x61, 0x32, 0xa2, 0x09, 0xa3, 0x1c, 0x0d, 0x2c, + 0x8f, 0x92, 0xf1, 0x31, 0x8f, 0xeb, 0x92, 0x2c, 0xcd, 0x64, 0x7c, 0x4c, 0x39, 0x1a, 0xd6, 0x35, + 0x80, 0xed, 0x1a, 0xd7, 0x8b, 0xcb, 0xaf, 0xab, 0xc5, 0x91, 0x54, 0x32, 0x80, 0x47, 0xc7, 0x49, + 0xd2, 0xaf, 0x97, 0x96, 0xf7, 0xa8, 0x9d, 0x24, 0x7d, 0xca, 0xd1, 0xe0, 0x51, 0x7c, 0x72, 0xfc, + 0x90, 0x8d, 0xeb, 0x2b, 0xcb, 0x7b, 0xe4, 0x70, 0x24, 0x95, 0x0c, 0xc0, 0x35, 0x62, 0xe3, 0x49, + 0x12, 0xd7, 0xcb, 0xcb, 0x73, 0x79, 0x1c, 0x49, 0x25, 0x03, 0xe7, 0x1a, 0x27, 0x23, 0x36, 0xae, + 0x57, 0xbe, 0x06, 0x17, 0x47, 0x52, 0xc9, 0xa0, 0x3e, 0x40, 0xd5, 0x31, 0xeb, 0x0d, 0x47, 0xe3, + 0xa4, 0x07, 0x49, 0x8f, 0x38, 0xe1, 0x7b, 0xcb, 0x10, 0xd2, 0x19, 0x9c, 0xce, 0x73, 0xa9, 0x2d, + 0x54, 0x4c, 0x59, 0x3c, 0x61, 0xf5, 0x2a, 0x27, 0xbd, 0xb5, 0x54, 0xb6, 0x03, 0x90, 0x0a, 0x3c, + 0x10, 0x3d, 0x4e, 0x86, 0x3d, 0x56, 0xaf, 0x2d, 0x4f, 0xd4, 0x01, 0x20, 0x15, 0x78, 0xed, 0x57, + 0x0a, 0xca, 0x07, 0xd1, 0x60, 0xb1, 0xdc, 0xae, 0xa0, 0xbc, 0x6e, 0xee, 0x63, 0x45, 0x0c, 0x3c, + 0x9c, 0x13, 0x83, 0x0e, 0xce, 0x43, 0x5f, 0x36, 0x5c, 0x67, 0x1f, 0x17, 0x40, 0x64, 0x12, 0x28, + 0xaa, 0x65, 0x54, 0x70, 0xdc, 0xd0, 0xc1, 0x25, 0x10, 0x39, 0x61, 0x1b, 0xaf, 0x80, 0xc8, 0xa3, + 0xae, 0x83, 0xcb, 0x20, 0xf2, 0x68, 0x80, 0x2b, 0x50, 0x67, 0xbd, 0xd0, 0x31, 0x02, 0x8c, 0xe0, + 0xd7, 0x0e, 0xa1, 0xbb, 0xb8, 0xaa, 0x16, 0x91, 0x72, 0x88, 0x6b, 0xf0, 0x9b, 0xde, 0x6c, 0x5a, + 0x87, 0x78, 0x55, 0x73, 0x51, 0x49, 0x1c, 0x48, 0x55, 0x45, 0x6b, 0x3a, 0xdc, 0x10, 0x82, 0xee, + 0xcc, 0x31, 0xb8, 0x25, 0x10, 0xda, 0x24, 0x46, 0x60, 0x75, 0x08, 0x56, 0xa0, 0xfa, 0x5b, 0xed, + 0x39, 0x49, 0x0e, 0x4a, 0xbe, 0x47, 0xdd, 0x16, 0x25, 0xbe, 0x0f, 0x82, 0xbc, 0xf6, 0x3f, 0x05, + 0x15, 0xe0, 0x60, 0x82, 0xae, 0xa1, 0xfb, 0x64, 0x91, 0x4d, 0x37, 0x8c, 0xd0, 0xd7, 0x25, 0xdb, + 0x2a, 0xaa, 0xe8, 0x26, 0x78, 0x66, 0xe9, 0x36, 0xce, 0x89, 0x66, 0xd1, 0xf6, 0x6c, 0xd2, 0x26, + 0x0e, 0xd7, 0xc8, 0x43, 0x1f, 0x32, 0x85, 0x76, 0x01, 0xfa, 0x50, 0x8b, 0x38, 0x16, 0x9f, 0x15, + 0xb9, 0x27, 0x8e, 0x1f, 0xd0, 0x10, 0x94, 0x75, 0x1b, 0x97, 0x66, 0x7d, 0xaa, 0x43, 0xf0, 0x0a, + 0xd8, 0x72, 0xdc, 0xb6, 0xe5, 0x88, 0x79, 0x19, 0xe2, 0xed, 0xee, 0xda, 0xd6, 0x47, 0x21, 0xc1, + 0x15, 0x30, 0xec, 0xe9, 0x34, 0x10, 0x5c, 0x08, 0x0c, 0x7b, 0x94, 0x78, 0xae, 0x6f, 0x41, 0x4b, + 0xd3, 0x6d, 0x5c, 0x85, 0x60, 0x50, 0xd2, 0xb4, 0xc9, 0xa1, 0xd5, 0x21, 0x5d, 0x58, 0x06, 0xae, + 0x81, 0x1a, 0x25, 0x36, 0x27, 0x14, 0xa2, 0x55, 0xb0, 0xd9, 0xc9, 0x6c, 0xae, 0x69, 0x9f, 0x2b, + 0xa8, 0x00, 0xd5, 0x04, 0x9c, 0x6b, 0xba, 0xb4, 0x3d, 0xb7, 0xf4, 0x1a, 0x2a, 0xeb, 0x26, 0x38, + 0xa4, 0xdb, 0x72, 0xe1, 0xe1, 0xa1, 0x65, 0x5b, 0x3a, 0x7d, 0x80, 0x73, 0x60, 0x6c, 0x6e, 0xe1, + 0x1f, 0x13, 0x8a, 0xf3, 0x9c, 0xc2, 0x72, 0x74, 0xbb, 0x4b, 0x1c, 0xd3, 0x72, 0x5a, 0xb8, 0x00, + 0xb1, 0x68, 0x11, 0x1a, 0x3a, 0x26, 0x2e, 0xc2, 0x98, 0x12, 0xdd, 0xb6, 0x7c, 0xb1, 0x6e, 0x8b, + 0xca, 0xd9, 0x0a, 0x6c, 0xad, 0xbf, 0xe7, 0xd2, 0x00, 0x97, 0x61, 0xdb, 0x6d, 0xd7, 0x69, 0x89, + 0x5c, 0x70, 0xa9, 0x49, 0x28, 0x46, 0xa0, 0x2d, 0xaf, 0x81, 0x06, 0xae, 0x6a, 0x04, 0x95, 0x44, + 0xd9, 0x02, 0x1f, 0x5a, 0xc4, 0x31, 0x09, 0x5d, 0x74, 0xba, 0x49, 0xda, 0x96, 0x63, 0x39, 0x72, + 0xb7, 0xda, 0xba, 0x6f, 0x84, 0x36, 0x4c, 0x73, 0xe0, 0x82, 0x43, 0xc2, 0x00, 0x9c, 0xd5, 0x7e, + 0x81, 0x0a, 0x50, 0xb3, 0xc0, 0xe9, 0xb6, 0xeb, 0x9a, 0x73, 0x14, 0x57, 0x10, 0x36, 0x5c, 0xc7, + 0x94, 0x81, 0xed, 0xc2, 0xaf, 0x58, 0x81, 0xcd, 0xe1, 0x69, 0xa4, 0xcb, 0x24, 0x82, 0xb9, 0x63, + 0x5a, 0x32, 0x90, 0x79, 0x88, 0xb4, 0xe5, 0x04, 0x84, 0x52, 0xb7, 0x95, 0xed, 0x7e, 0x15, 0xad, + 0xec, 0x87, 0x22, 0xc7, 0x8a, 0x90, 0x74, 0x7e, 0xb8, 0xbb, 0x0f, 0xe9, 0x0d, 0x82, 0x92, 0xf6, + 0x21, 0x2a, 0x89, 0x62, 0x07, 0xeb, 0x70, 0xc2, 0xf6, 0xee, 0xd9, 0x75, 0xf8, 0x96, 0xd3, 0x0a, + 0x6d, 0x9d, 0x62, 0x85, 0xdf, 0x6d, 0xec, 0x90, 0xf2, 0x94, 0x2b, 0xa3, 0x82, 0x19, 0xea, 0x36, + 0xce, 0x6b, 0x01, 0x2a, 0x89, 0x12, 0x07, 0x0c, 0xe2, 0xee, 0x33, 0xc7, 0x50, 0x41, 0xc5, 0xa6, + 0x45, 0xfd, 0x40, 0xc0, 0x7d, 0x02, 0x6b, 0xc2, 0x39, 0x10, 0x07, 0x7b, 0x16, 0x35, 0x71, 0x1e, + 0x16, 0x3a, 0x4b, 0x18, 0x79, 0x77, 0x2a, 0x68, 0x77, 0x50, 0x49, 0x14, 0x3b, 0xce, 0x4a, 0x5d, + 0x6f, 0xc1, 0x2f, 0xf0, 0x84, 0xcb, 0x44, 0x48, 0x1c, 0x37, 0xe8, 0xca, 0x79, 0x4e, 0xdb, 0x47, + 0xd5, 0xb9, 0xaa, 0xa6, 0xbe, 0x88, 0x9e, 0xa3, 0xc4, 0xb0, 0x3c, 0xea, 0x1a, 0x56, 0xf0, 0x60, + 0xf1, 0x4c, 0x65, 0x3f, 0xf0, 0xd4, 0x82, 0xf5, 0xbb, 0x4e, 0x77, 0x4e, 0x96, 0xd3, 0x26, 0xa8, + 0xc8, 0x8b, 0x19, 0xc4, 0x35, 0x20, 0xce, 0xc2, 0x99, 0x7c, 0x1e, 0x5d, 0x9e, 0xdf, 0x20, 0xfe, + 0xb3, 0x58, 0x65, 0x33, 0x0c, 0x42, 0x4a, 0x44, 0x90, 0x3c, 0xdd, 0x0f, 0x70, 0x1e, 0x36, 0xc1, + 0xa3, 0xc4, 0x17, 0x97, 0xbd, 0x55, 0x54, 0x99, 0xd6, 0x02, 0x5c, 0x14, 0x1f, 0x14, 0x61, 0x36, + 0x2f, 0x69, 0xbb, 0xa8, 0xc8, 0x0b, 0x1f, 0x18, 0xed, 0xb8, 0x96, 0x41, 0x16, 0x17, 0xae, 0x1b, + 0xb3, 0x22, 0x60, 0xe8, 0x59, 0x4d, 0xc8, 0x71, 0x13, 0x7a, 0x56, 0x4b, 0xfe, 0xbb, 0x82, 0xd6, + 0x16, 0x6f, 0x54, 0xea, 0x26, 0xc2, 0x9f, 0xb0, 0xa8, 0xdf, 0x4d, 0xe1, 0xde, 0xd8, 0x1d, 0xc6, + 0x7d, 0xf6, 0x84, 0x5f, 0x65, 0x8a, 0x74, 0x0d, 0xe4, 0xfc, 0x3a, 0x69, 0x81, 0x54, 0xb5, 0x50, + 0xf1, 0x28, 0x7a, 0xc8, 0x8e, 0xe4, 0x1d, 0x65, 0x67, 0xa9, 0x9b, 0x5b, 0xc3, 0x06, 0x28, 0x15, + 0x0c, 0xda, 0xbf, 0x4b, 0xa8, 0xc8, 0x05, 0x5f, 0xba, 0x25, 0xeb, 0xbb, 0xbb, 0x94, 0x74, 0xb0, + 0xc2, 0x4b, 0x2a, 0x1c, 0x62, 0x91, 0x15, 0xba, 0xd9, 0x31, 0x6c, 0x51, 0xbf, 0x74, 0xb3, 0xd3, + 0x76, 0x4d, 0x5c, 0x80, 0x30, 0xea, 0x30, 0x2a, 0x72, 0x05, 0xcf, 0x73, 0xe1, 0xf0, 0x82, 0x30, + 0x08, 0x28, 0x5e, 0xe1, 0x15, 0x3f, 0x3c, 0x14, 0x95, 0x4a, 0x0f, 0x0f, 0x21, 0x08, 0xb8, 0xa2, + 0x96, 0x50, 0xce, 0x30, 0x30, 0x02, 0x88, 0xc1, 0xe9, 0xab, 0xd3, 0x8e, 0xc0, 0xcb, 0xb8, 0x01, + 0xe7, 0x00, 0xaf, 0xf2, 0x28, 0xc2, 0x90, 0xc3, 0xd6, 0x44, 0xaf, 0xf0, 0xf0, 0x7a, 0xd6, 0x34, + 0x30, 0x28, 0x98, 0x96, 0x6f, 0xb8, 0x21, 0xf5, 0x09, 0xbe, 0xcc, 0x13, 0xdf, 0xdd, 0xdd, 0xc7, + 0x2a, 0x8c, 0xc8, 0xa1, 0x67, 0xe3, 0xe7, 0x78, 0x81, 0x75, 0x89, 0x7f, 0x60, 0x05, 0x7b, 0xf8, + 0x0a, 0xc8, 0x2d, 0xd0, 0x78, 0x1e, 0x46, 0x6d, 0x9d, 0xde, 0xc7, 0x2f, 0x00, 0x5b, 0xfb, 0x80, + 0xe0, 0x17, 0xc5, 0xa0, 0x83, 0xeb, 0xbc, 0x03, 0x91, 0x16, 0xfe, 0x1e, 0x38, 0xea, 0x38, 0x78, + 0x03, 0x48, 0x1c, 0x4f, 0xae, 0xf9, 0xfb, 0xe0, 0xa1, 0xc3, 0x3d, 0x7c, 0x09, 0x1c, 0x70, 0xa6, + 0x1e, 0xbe, 0x9c, 0xb5, 0xae, 0x1f, 0xf0, 0x3a, 0xc2, 0x0f, 0x2c, 0x7e, 0x05, 0xda, 0x93, 0x87, + 0xaf, 0xca, 0xf2, 0xac, 0x07, 0xfa, 0xa1, 0xe5, 0xe3, 0x57, 0x45, 0x4a, 0xd0, 0x00, 0x18, 0x35, + 0xde, 0xd6, 0x78, 0x20, 0x5e, 0xe3, 0x79, 0x09, 0x1e, 0xbe, 0x2e, 0x46, 0xbe, 0x8f, 0xaf, 0x71, + 0x5d, 0xd7, 0x0f, 0xc0, 0xa7, 0x1f, 0xca, 0x74, 0xe5, 0xda, 0xd7, 0xa7, 0x13, 0x67, 0x1f, 0x6f, + 0x8a, 0x93, 0x47, 0x20, 0x32, 0x6f, 0x88, 0xde, 0x49, 0x9a, 0xf8, 0x86, 0x1c, 0x79, 0xf8, 0x26, + 0xb7, 0x42, 0x5d, 0xc7, 0xc6, 0x6f, 0x66, 0x0d, 0xf5, 0x2d, 0x58, 0xa1, 0xe7, 0xe3, 0x06, 0xac, + 0xf0, 0xa3, 0x50, 0x77, 0xb8, 0x3f, 0x5b, 0xa0, 0x49, 0x0d, 0x18, 0xbe, 0x0d, 0x3f, 0xf0, 0x21, + 0x25, 0x36, 0xbe, 0xc5, 0x7f, 0x30, 0xa9, 0xeb, 0xe1, 0x6d, 0xa0, 0x00, 0x03, 0x3b, 0xe0, 0x03, + 0x25, 0x6d, 0x47, 0x77, 0x02, 0xfc, 0x8e, 0x38, 0xb9, 0xb0, 0x4e, 0xc7, 0x0c, 0xdb, 0xf8, 0x5d, + 0xb0, 0x4e, 0x5d, 0x37, 0xc0, 0xb7, 0x61, 0xe4, 0x43, 0x70, 0xde, 0xe3, 0xa3, 0xb0, 0xd9, 0xc4, + 0x77, 0x60, 0xc4, 0x2d, 0xfe, 0x88, 0x17, 0x1d, 0xd7, 0xb3, 0x0c, 0x7c, 0x97, 0x37, 0x76, 0x10, + 0xde, 0x5b, 0x68, 0x44, 0xef, 0x83, 0xca, 0x21, 0x5f, 0xf6, 0x8f, 0x79, 0xb9, 0x0a, 0x79, 0xaf, + 0xff, 0x80, 0x23, 0xad, 0xc0, 0x26, 0xf8, 0x27, 0xa2, 0x1f, 0x75, 0xbc, 0x3d, 0x40, 0x7f, 0x28, + 0x53, 0x0e, 0x8e, 0x21, 0xd6, 0x79, 0x76, 0x86, 0x87, 0x9d, 0x0e, 0xde, 0x85, 0xa1, 0xc9, 0xad, + 0x1a, 0xa0, 0xd2, 0x74, 0x29, 0xb1, 0x5a, 0x0e, 0x36, 0x21, 0x14, 0xf7, 0x0f, 0x30, 0xe1, 0x1d, + 0xc6, 0xf2, 0x03, 0xdc, 0x14, 0x77, 0x92, 0xb6, 0x81, 0x5b, 0x3c, 0x01, 0xdc, 0xb6, 0xc8, 0xcb, + 0x3d, 0xe8, 0x08, 0xd9, 0x8c, 0x6f, 0xbc, 0xc5, 0x35, 0xc3, 0xb6, 0x81, 0xf7, 0x21, 0x2c, 0x86, + 0xeb, 0xe1, 0xfb, 0x10, 0x09, 0xd3, 0xf2, 0x79, 0xf3, 0x26, 0x26, 0xb6, 0xb5, 0xbf, 0x29, 0x68, + 0x75, 0xe1, 0x5b, 0xf7, 0x9b, 0x7d, 0x1f, 0x92, 0x85, 0x57, 0x81, 0x5b, 0xcb, 0x7c, 0x64, 0xcf, + 0x3d, 0x0e, 0x68, 0x6f, 0xcb, 0x0f, 0x65, 0x8c, 0x6a, 0xf2, 0x3d, 0xe5, 0x69, 0x75, 0x1c, 0xa1, + 0x92, 0xe1, 0xb6, 0xdb, 0xf0, 0xad, 0xac, 0xb5, 0x50, 0x39, 0x73, 0x45, 0xad, 0xcf, 0xde, 0x7b, + 0xc4, 0x67, 0xf9, 0xf4, 0xb5, 0xe7, 0x55, 0x54, 0x7b, 0xc8, 0x06, 0xc3, 0xb8, 0x9b, 0x3c, 0x7a, + 0x34, 0x61, 0xe2, 0x93, 0xaa, 0x48, 0xab, 0x5c, 0xe6, 0x72, 0x91, 0xf6, 0x27, 0x05, 0xbd, 0xa8, + 0xc7, 0xd1, 0xd1, 0xe9, 0xcf, 0xd9, 0xec, 0x59, 0x84, 0xfd, 0xec, 0x84, 0x4d, 0x52, 0xd5, 0x40, + 0xe5, 0xbe, 0x7c, 0x5f, 0xba, 0x60, 0x78, 0xb2, 0xe7, 0x28, 0x3a, 0x05, 0xaa, 0x1e, 0x5a, 0x65, + 0x71, 0x2f, 0xe9, 0x0f, 0xe3, 0x41, 0x77, 0x2e, 0x56, 0x37, 0xcf, 0x8d, 0x95, 0xc0, 0xf0, 0x28, + 0xd5, 0xd8, 0xdc, 0x4c, 0xfb, 0xa7, 0x82, 0xea, 0x5f, 0x76, 0x79, 0x32, 0x4a, 0xa0, 0x0f, 0x1d, + 0x20, 0x35, 0x33, 0xdd, 0x9d, 0xbd, 0x0b, 0x29, 0x4b, 0xbe, 0x0b, 0x5d, 0xce, 0x38, 0x16, 0x3e, + 0x9e, 0xa7, 0xef, 0x61, 0xb9, 0xc5, 0xf7, 0x30, 0x95, 0x88, 0x37, 0x28, 0x16, 0xf7, 0xd8, 0x44, + 0xbe, 0xee, 0x5c, 0xbf, 0x80, 0x2d, 0xd0, 0xa7, 0x33, 0x24, 0x5c, 0xf2, 0x5e, 0x90, 0x0b, 0xe3, + 0xa9, 0x32, 0x64, 0x93, 0xef, 0xf8, 0x56, 0x3c, 0x99, 0x26, 0xcf, 0xcc, 0x61, 0xb9, 0x11, 0x3a, + 0x2a, 0x33, 0x29, 0xab, 0x2b, 0x3c, 0x24, 0xd7, 0x2e, 0x74, 0x3c, 0xe8, 0x14, 0xf6, 0xac, 0x90, + 0x6b, 0x7f, 0x54, 0xd0, 0x95, 0x2c, 0x09, 0x4e, 0xe3, 0x34, 0x7a, 0xf2, 0x1d, 0x8f, 0xd4, 0x5f, + 0x14, 0xf4, 0xfc, 0x19, 0x7f, 0x65, 0xa0, 0x16, 0x92, 0x47, 0xf9, 0xba, 0xc9, 0xa3, 0xbe, 0x8f, + 0x4a, 0xfc, 0xe2, 0x32, 0xa9, 0xe7, 0x38, 0xc7, 0xeb, 0xe7, 0x55, 0x32, 0x50, 0xa6, 0x12, 0xb3, + 0x10, 0xea, 0xfc, 0x99, 0x50, 0xff, 0x2e, 0x8f, 0x9e, 0xd3, 0xc5, 0xa3, 0x35, 0x83, 0xa2, 0xf3, + 0xad, 0x46, 0xfa, 0x00, 0x95, 0x1f, 0xb1, 0x28, 0x3d, 0x19, 0xb3, 0x89, 0x7c, 0x5b, 0xbb, 0x77, + 0x0e, 0xc9, 0x53, 0x5c, 0x69, 0x34, 0x25, 0x05, 0x9d, 0x92, 0x7d, 0x79, 0x0b, 0xf3, 0xdf, 0x70, + 0x0b, 0x37, 0x7e, 0xab, 0xa0, 0x72, 0x66, 0x48, 0xbd, 0x86, 0xd6, 0xd8, 0x93, 0x74, 0x1c, 0xf5, + 0xd2, 0xee, 0x84, 0xef, 0x27, 0x0f, 0x41, 0x99, 0xae, 0x4a, 0xa9, 0xd8, 0x64, 0xf5, 0x0d, 0x84, + 0x33, 0xb5, 0xe9, 0x69, 0xc8, 0x71, 0xc5, 0x75, 0x29, 0xcf, 0x0e, 0x8e, 0xfa, 0x3e, 0xda, 0xc8, + 0x54, 0x9f, 0x52, 0xc1, 0xf2, 0x1c, 0x54, 0x97, 0x1a, 0xe6, 0xd9, 0xf2, 0xa4, 0x7d, 0x91, 0x83, + 0xf3, 0x30, 0x1f, 0x99, 0xef, 0x52, 0x7a, 0xcd, 0x17, 0x83, 0xfc, 0xd7, 0x2b, 0x06, 0x4f, 0x2f, + 0xec, 0x85, 0x6f, 0xb7, 0xb0, 0x17, 0x17, 0x53, 0xff, 0xc6, 0x1d, 0x54, 0x9b, 0x4f, 0x08, 0x71, + 0x17, 0x71, 0x08, 0xbe, 0x04, 0xa3, 0x30, 0x68, 0xde, 0x11, 0xd7, 0xf3, 0x30, 0x68, 0xde, 0xba, + 0x2d, 0xae, 0xe7, 0x61, 0xd0, 0xdc, 0xd9, 0xc6, 0xf9, 0xed, 0xbf, 0x17, 0xd1, 0xba, 0x2d, 0x69, + 0x7c, 0xf1, 0x57, 0x91, 0xfa, 0x67, 0x05, 0xe1, 0xb3, 0x8d, 0x4b, 0xbd, 0x7d, 0x6e, 0xba, 0x3f, + 0xb5, 0x39, 0x6f, 0xbc, 0xb7, 0x34, 0x4e, 0x24, 0x84, 0xd6, 0xf8, 0xec, 0x8b, 0x7f, 0xfd, 0x26, + 0xb7, 0xa9, 0xbd, 0x36, 0xfd, 0x4f, 0x2b, 0x8b, 0xc9, 0xe4, 0x6e, 0x74, 0x06, 0x74, 0x57, 0xb9, + 0xa1, 0x7e, 0xae, 0xa0, 0xf5, 0x33, 0x45, 0x5e, 0x7d, 0xf7, 0x62, 0xc6, 0xcf, 0x74, 0xb1, 0x8d, + 0xdb, 0xcb, 0xc2, 0xa4, 0xcb, 0x6f, 0x71, 0x97, 0xaf, 0x6b, 0xda, 0x57, 0xbb, 0x9c, 0x61, 0xc0, + 0xe3, 0x3f, 0x28, 0x68, 0x75, 0xa1, 0xd6, 0xaa, 0x3b, 0x17, 0x0c, 0xd6, 0x7c, 0x27, 0xd9, 0x78, + 0x67, 0x39, 0x90, 0xf4, 0xf5, 0x26, 0xf7, 0xf5, 0x9a, 0x76, 0xf5, 0x19, 0xe1, 0xe5, 0x08, 0xf0, + 0xf4, 0xf7, 0x0a, 0xaa, 0xcd, 0x9f, 0x5a, 0x75, 0x7b, 0xf9, 0xe2, 0xb7, 0xb1, 0xb3, 0x14, 0x46, + 0xba, 0x79, 0x83, 0xbb, 0xf9, 0xba, 0xf6, 0xca, 0x53, 0xdd, 0x9c, 0x01, 0xee, 0x2a, 0x37, 0x76, + 0x3f, 0x53, 0xd0, 0xab, 0xbd, 0xe4, 0xf8, 0xd9, 0x66, 0x76, 0xaf, 0x9c, 0x49, 0x77, 0x6f, 0x9c, + 0xa4, 0x89, 0xa7, 0x7c, 0x4c, 0x24, 0x6c, 0x90, 0x00, 0xa4, 0x91, 0x8c, 0x07, 0x5b, 0x03, 0x16, + 0xf3, 0x7f, 0x3e, 0xb7, 0xc4, 0x4f, 0xd1, 0x68, 0x38, 0xf9, 0x8a, 0xff, 0x5b, 0xef, 0x65, 0x82, + 0x87, 0x25, 0x8e, 0xd8, 0xf9, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xf6, 0xe5, 0xb1, 0xa0, + 0x1d, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta2/language_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta2/language_service.pb.go new file mode 100644 index 000000000..a1fea0695 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/language/v1beta2/language_service.pb.go @@ -0,0 +1,2596 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/language/v1beta2/language_service.proto + +/* +Package language is a generated protocol buffer package. + +It is generated from these files: + google/cloud/language/v1beta2/language_service.proto + +It has these top-level messages: + Document + Sentence + Entity + Token + Sentiment + PartOfSpeech + DependencyEdge + EntityMention + TextSpan + ClassificationCategory + AnalyzeSentimentRequest + AnalyzeSentimentResponse + AnalyzeEntitySentimentRequest + AnalyzeEntitySentimentResponse + AnalyzeEntitiesRequest + AnalyzeEntitiesResponse + AnalyzeSyntaxRequest + AnalyzeSyntaxResponse + ClassifyTextRequest + ClassifyTextResponse + AnnotateTextRequest + AnnotateTextResponse +*/ +package language + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents the text encoding that the caller uses to process the output. +// Providing an `EncodingType` is recommended because the API provides the +// beginning offsets for various outputs, such as tokens and mentions, and +// languages that natively use different text encodings may access offsets +// differently. +type EncodingType int32 + +const ( + // If `EncodingType` is not specified, encoding-dependent information (such as + // `begin_offset`) will be set at `-1`. + EncodingType_NONE EncodingType = 0 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-8 encoding of the input. C++ and Go are examples of languages + // that use this encoding natively. + EncodingType_UTF8 EncodingType = 1 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-16 encoding of the input. Java and Javascript are examples of + // languages that use this encoding natively. + EncodingType_UTF16 EncodingType = 2 + // Encoding-dependent information (such as `begin_offset`) is calculated based + // on the UTF-32 encoding of the input. Python is an example of a language + // that uses this encoding natively. + EncodingType_UTF32 EncodingType = 3 +) + +var EncodingType_name = map[int32]string{ + 0: "NONE", + 1: "UTF8", + 2: "UTF16", + 3: "UTF32", +} +var EncodingType_value = map[string]int32{ + "NONE": 0, + "UTF8": 1, + "UTF16": 2, + "UTF32": 3, +} + +func (x EncodingType) String() string { + return proto.EnumName(EncodingType_name, int32(x)) +} +func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The document types enum. +type Document_Type int32 + +const ( + // The content type is not specified. + Document_TYPE_UNSPECIFIED Document_Type = 0 + // Plain text + Document_PLAIN_TEXT Document_Type = 1 + // HTML + Document_HTML Document_Type = 2 +) + +var Document_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "PLAIN_TEXT", + 2: "HTML", +} +var Document_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "PLAIN_TEXT": 1, + "HTML": 2, +} + +func (x Document_Type) String() string { + return proto.EnumName(Document_Type_name, int32(x)) +} +func (Document_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// The type of the entity. +type Entity_Type int32 + +const ( + // Unknown + Entity_UNKNOWN Entity_Type = 0 + // Person + Entity_PERSON Entity_Type = 1 + // Location + Entity_LOCATION Entity_Type = 2 + // Organization + Entity_ORGANIZATION Entity_Type = 3 + // Event + Entity_EVENT Entity_Type = 4 + // Work of art + Entity_WORK_OF_ART Entity_Type = 5 + // Consumer goods + Entity_CONSUMER_GOOD Entity_Type = 6 + // Other types + Entity_OTHER Entity_Type = 7 +) + +var Entity_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PERSON", + 2: "LOCATION", + 3: "ORGANIZATION", + 4: "EVENT", + 5: "WORK_OF_ART", + 6: "CONSUMER_GOOD", + 7: "OTHER", +} +var Entity_Type_value = map[string]int32{ + "UNKNOWN": 0, + "PERSON": 1, + "LOCATION": 2, + "ORGANIZATION": 3, + "EVENT": 4, + "WORK_OF_ART": 5, + "CONSUMER_GOOD": 6, + "OTHER": 7, +} + +func (x Entity_Type) String() string { + return proto.EnumName(Entity_Type_name, int32(x)) +} +func (Entity_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// The part of speech tags enum. +type PartOfSpeech_Tag int32 + +const ( + // Unknown + PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0 + // Adjective + PartOfSpeech_ADJ PartOfSpeech_Tag = 1 + // Adposition (preposition and postposition) + PartOfSpeech_ADP PartOfSpeech_Tag = 2 + // Adverb + PartOfSpeech_ADV PartOfSpeech_Tag = 3 + // Conjunction + PartOfSpeech_CONJ PartOfSpeech_Tag = 4 + // Determiner + PartOfSpeech_DET PartOfSpeech_Tag = 5 + // Noun (common and proper) + PartOfSpeech_NOUN PartOfSpeech_Tag = 6 + // Cardinal number + PartOfSpeech_NUM PartOfSpeech_Tag = 7 + // Pronoun + PartOfSpeech_PRON PartOfSpeech_Tag = 8 + // Particle or other function word + PartOfSpeech_PRT PartOfSpeech_Tag = 9 + // Punctuation + PartOfSpeech_PUNCT PartOfSpeech_Tag = 10 + // Verb (all tenses and modes) + PartOfSpeech_VERB PartOfSpeech_Tag = 11 + // Other: foreign words, typos, abbreviations + PartOfSpeech_X PartOfSpeech_Tag = 12 + // Affix + PartOfSpeech_AFFIX PartOfSpeech_Tag = 13 +) + +var PartOfSpeech_Tag_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ADJ", + 2: "ADP", + 3: "ADV", + 4: "CONJ", + 5: "DET", + 6: "NOUN", + 7: "NUM", + 8: "PRON", + 9: "PRT", + 10: "PUNCT", + 11: "VERB", + 12: "X", + 13: "AFFIX", +} +var PartOfSpeech_Tag_value = map[string]int32{ + "UNKNOWN": 0, + "ADJ": 1, + "ADP": 2, + "ADV": 3, + "CONJ": 4, + "DET": 5, + "NOUN": 6, + "NUM": 7, + "PRON": 8, + "PRT": 9, + "PUNCT": 10, + "VERB": 11, + "X": 12, + "AFFIX": 13, +} + +func (x PartOfSpeech_Tag) String() string { + return proto.EnumName(PartOfSpeech_Tag_name, int32(x)) +} +func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} } + +// The characteristic of a verb that expresses time flow during an event. +type PartOfSpeech_Aspect int32 + +const ( + // Aspect is not applicable in the analyzed language or is not predicted. + PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0 + // Perfective + PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1 + // Imperfective + PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2 + // Progressive + PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3 +) + +var PartOfSpeech_Aspect_name = map[int32]string{ + 0: "ASPECT_UNKNOWN", + 1: "PERFECTIVE", + 2: "IMPERFECTIVE", + 3: "PROGRESSIVE", +} +var PartOfSpeech_Aspect_value = map[string]int32{ + "ASPECT_UNKNOWN": 0, + "PERFECTIVE": 1, + "IMPERFECTIVE": 2, + "PROGRESSIVE": 3, +} + +func (x PartOfSpeech_Aspect) String() string { + return proto.EnumName(PartOfSpeech_Aspect_name, int32(x)) +} +func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} } + +// The grammatical function performed by a noun or pronoun in a phrase, +// clause, or sentence. In some languages, other parts of speech, such as +// adjective and determiner, take case inflection in agreement with the noun. +type PartOfSpeech_Case int32 + +const ( + // Case is not applicable in the analyzed language or is not predicted. + PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0 + // Accusative + PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1 + // Adverbial + PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2 + // Complementive + PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3 + // Dative + PartOfSpeech_DATIVE PartOfSpeech_Case = 4 + // Genitive + PartOfSpeech_GENITIVE PartOfSpeech_Case = 5 + // Instrumental + PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6 + // Locative + PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7 + // Nominative + PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8 + // Oblique + PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9 + // Partitive + PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10 + // Prepositional + PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11 + // Reflexive + PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12 + // Relative + PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13 + // Vocative + PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14 +) + +var PartOfSpeech_Case_name = map[int32]string{ + 0: "CASE_UNKNOWN", + 1: "ACCUSATIVE", + 2: "ADVERBIAL", + 3: "COMPLEMENTIVE", + 4: "DATIVE", + 5: "GENITIVE", + 6: "INSTRUMENTAL", + 7: "LOCATIVE", + 8: "NOMINATIVE", + 9: "OBLIQUE", + 10: "PARTITIVE", + 11: "PREPOSITIONAL", + 12: "REFLEXIVE_CASE", + 13: "RELATIVE_CASE", + 14: "VOCATIVE", +} +var PartOfSpeech_Case_value = map[string]int32{ + "CASE_UNKNOWN": 0, + "ACCUSATIVE": 1, + "ADVERBIAL": 2, + "COMPLEMENTIVE": 3, + "DATIVE": 4, + "GENITIVE": 5, + "INSTRUMENTAL": 6, + "LOCATIVE": 7, + "NOMINATIVE": 8, + "OBLIQUE": 9, + "PARTITIVE": 10, + "PREPOSITIONAL": 11, + "REFLEXIVE_CASE": 12, + "RELATIVE_CASE": 13, + "VOCATIVE": 14, +} + +func (x PartOfSpeech_Case) String() string { + return proto.EnumName(PartOfSpeech_Case_name, int32(x)) +} +func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 2} } + +// Depending on the language, Form can be categorizing different forms of +// verbs, adjectives, adverbs, etc. For example, categorizing inflected +// endings of verbs and adjectives or distinguishing between short and long +// forms of adjectives and participles +type PartOfSpeech_Form int32 + +const ( + // Form is not applicable in the analyzed language or is not predicted. + PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0 + // Adnomial + PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1 + // Auxiliary + PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2 + // Complementizer + PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3 + // Final ending + PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4 + // Gerund + PartOfSpeech_GERUND PartOfSpeech_Form = 5 + // Realis + PartOfSpeech_REALIS PartOfSpeech_Form = 6 + // Irrealis + PartOfSpeech_IRREALIS PartOfSpeech_Form = 7 + // Short form + PartOfSpeech_SHORT PartOfSpeech_Form = 8 + // Long form + PartOfSpeech_LONG PartOfSpeech_Form = 9 + // Order form + PartOfSpeech_ORDER PartOfSpeech_Form = 10 + // Specific form + PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11 +) + +var PartOfSpeech_Form_name = map[int32]string{ + 0: "FORM_UNKNOWN", + 1: "ADNOMIAL", + 2: "AUXILIARY", + 3: "COMPLEMENTIZER", + 4: "FINAL_ENDING", + 5: "GERUND", + 6: "REALIS", + 7: "IRREALIS", + 8: "SHORT", + 9: "LONG", + 10: "ORDER", + 11: "SPECIFIC", +} +var PartOfSpeech_Form_value = map[string]int32{ + "FORM_UNKNOWN": 0, + "ADNOMIAL": 1, + "AUXILIARY": 2, + "COMPLEMENTIZER": 3, + "FINAL_ENDING": 4, + "GERUND": 5, + "REALIS": 6, + "IRREALIS": 7, + "SHORT": 8, + "LONG": 9, + "ORDER": 10, + "SPECIFIC": 11, +} + +func (x PartOfSpeech_Form) String() string { + return proto.EnumName(PartOfSpeech_Form_name, int32(x)) +} +func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 3} } + +// Gender classes of nouns reflected in the behaviour of associated words. +type PartOfSpeech_Gender int32 + +const ( + // Gender is not applicable in the analyzed language or is not predicted. + PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0 + // Feminine + PartOfSpeech_FEMININE PartOfSpeech_Gender = 1 + // Masculine + PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2 + // Neuter + PartOfSpeech_NEUTER PartOfSpeech_Gender = 3 +) + +var PartOfSpeech_Gender_name = map[int32]string{ + 0: "GENDER_UNKNOWN", + 1: "FEMININE", + 2: "MASCULINE", + 3: "NEUTER", +} +var PartOfSpeech_Gender_value = map[string]int32{ + "GENDER_UNKNOWN": 0, + "FEMININE": 1, + "MASCULINE": 2, + "NEUTER": 3, +} + +func (x PartOfSpeech_Gender) String() string { + return proto.EnumName(PartOfSpeech_Gender_name, int32(x)) +} +func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 4} } + +// The grammatical feature of verbs, used for showing modality and attitude. +type PartOfSpeech_Mood int32 + +const ( + // Mood is not applicable in the analyzed language or is not predicted. + PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0 + // Conditional + PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1 + // Imperative + PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2 + // Indicative + PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3 + // Interrogative + PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4 + // Jussive + PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5 + // Subjunctive + PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6 +) + +var PartOfSpeech_Mood_name = map[int32]string{ + 0: "MOOD_UNKNOWN", + 1: "CONDITIONAL_MOOD", + 2: "IMPERATIVE", + 3: "INDICATIVE", + 4: "INTERROGATIVE", + 5: "JUSSIVE", + 6: "SUBJUNCTIVE", +} +var PartOfSpeech_Mood_value = map[string]int32{ + "MOOD_UNKNOWN": 0, + "CONDITIONAL_MOOD": 1, + "IMPERATIVE": 2, + "INDICATIVE": 3, + "INTERROGATIVE": 4, + "JUSSIVE": 5, + "SUBJUNCTIVE": 6, +} + +func (x PartOfSpeech_Mood) String() string { + return proto.EnumName(PartOfSpeech_Mood_name, int32(x)) +} +func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 5} } + +// Count distinctions. +type PartOfSpeech_Number int32 + +const ( + // Number is not applicable in the analyzed language or is not predicted. + PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0 + // Singular + PartOfSpeech_SINGULAR PartOfSpeech_Number = 1 + // Plural + PartOfSpeech_PLURAL PartOfSpeech_Number = 2 + // Dual + PartOfSpeech_DUAL PartOfSpeech_Number = 3 +) + +var PartOfSpeech_Number_name = map[int32]string{ + 0: "NUMBER_UNKNOWN", + 1: "SINGULAR", + 2: "PLURAL", + 3: "DUAL", +} +var PartOfSpeech_Number_value = map[string]int32{ + "NUMBER_UNKNOWN": 0, + "SINGULAR": 1, + "PLURAL": 2, + "DUAL": 3, +} + +func (x PartOfSpeech_Number) String() string { + return proto.EnumName(PartOfSpeech_Number_name, int32(x)) +} +func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 6} } + +// The distinction between the speaker, second person, third person, etc. +type PartOfSpeech_Person int32 + +const ( + // Person is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0 + // First + PartOfSpeech_FIRST PartOfSpeech_Person = 1 + // Second + PartOfSpeech_SECOND PartOfSpeech_Person = 2 + // Third + PartOfSpeech_THIRD PartOfSpeech_Person = 3 + // Reflexive + PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4 +) + +var PartOfSpeech_Person_name = map[int32]string{ + 0: "PERSON_UNKNOWN", + 1: "FIRST", + 2: "SECOND", + 3: "THIRD", + 4: "REFLEXIVE_PERSON", +} +var PartOfSpeech_Person_value = map[string]int32{ + "PERSON_UNKNOWN": 0, + "FIRST": 1, + "SECOND": 2, + "THIRD": 3, + "REFLEXIVE_PERSON": 4, +} + +func (x PartOfSpeech_Person) String() string { + return proto.EnumName(PartOfSpeech_Person_name, int32(x)) +} +func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 7} } + +// This category shows if the token is part of a proper name. +type PartOfSpeech_Proper int32 + +const ( + // Proper is not applicable in the analyzed language or is not predicted. + PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0 + // Proper + PartOfSpeech_PROPER PartOfSpeech_Proper = 1 + // Not proper + PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2 +) + +var PartOfSpeech_Proper_name = map[int32]string{ + 0: "PROPER_UNKNOWN", + 1: "PROPER", + 2: "NOT_PROPER", +} +var PartOfSpeech_Proper_value = map[string]int32{ + "PROPER_UNKNOWN": 0, + "PROPER": 1, + "NOT_PROPER": 2, +} + +func (x PartOfSpeech_Proper) String() string { + return proto.EnumName(PartOfSpeech_Proper_name, int32(x)) +} +func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 8} } + +// Reciprocal features of a pronoun. +type PartOfSpeech_Reciprocity int32 + +const ( + // Reciprocity is not applicable in the analyzed language or is not + // predicted. + PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0 + // Reciprocal + PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1 + // Non-reciprocal + PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2 +) + +var PartOfSpeech_Reciprocity_name = map[int32]string{ + 0: "RECIPROCITY_UNKNOWN", + 1: "RECIPROCAL", + 2: "NON_RECIPROCAL", +} +var PartOfSpeech_Reciprocity_value = map[string]int32{ + "RECIPROCITY_UNKNOWN": 0, + "RECIPROCAL": 1, + "NON_RECIPROCAL": 2, +} + +func (x PartOfSpeech_Reciprocity) String() string { + return proto.EnumName(PartOfSpeech_Reciprocity_name, int32(x)) +} +func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 9} } + +// Time reference. +type PartOfSpeech_Tense int32 + +const ( + // Tense is not applicable in the analyzed language or is not predicted. + PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0 + // Conditional + PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1 + // Future + PartOfSpeech_FUTURE PartOfSpeech_Tense = 2 + // Past + PartOfSpeech_PAST PartOfSpeech_Tense = 3 + // Present + PartOfSpeech_PRESENT PartOfSpeech_Tense = 4 + // Imperfect + PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5 + // Pluperfect + PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6 +) + +var PartOfSpeech_Tense_name = map[int32]string{ + 0: "TENSE_UNKNOWN", + 1: "CONDITIONAL_TENSE", + 2: "FUTURE", + 3: "PAST", + 4: "PRESENT", + 5: "IMPERFECT", + 6: "PLUPERFECT", +} +var PartOfSpeech_Tense_value = map[string]int32{ + "TENSE_UNKNOWN": 0, + "CONDITIONAL_TENSE": 1, + "FUTURE": 2, + "PAST": 3, + "PRESENT": 4, + "IMPERFECT": 5, + "PLUPERFECT": 6, +} + +func (x PartOfSpeech_Tense) String() string { + return proto.EnumName(PartOfSpeech_Tense_name, int32(x)) +} +func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 10} } + +// The relationship between the action that a verb expresses and the +// participants identified by its arguments. +type PartOfSpeech_Voice int32 + +const ( + // Voice is not applicable in the analyzed language or is not predicted. + PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0 + // Active + PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1 + // Causative + PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2 + // Passive + PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3 +) + +var PartOfSpeech_Voice_name = map[int32]string{ + 0: "VOICE_UNKNOWN", + 1: "ACTIVE", + 2: "CAUSATIVE", + 3: "PASSIVE", +} +var PartOfSpeech_Voice_value = map[string]int32{ + "VOICE_UNKNOWN": 0, + "ACTIVE": 1, + "CAUSATIVE": 2, + "PASSIVE": 3, +} + +func (x PartOfSpeech_Voice) String() string { + return proto.EnumName(PartOfSpeech_Voice_name, int32(x)) +} +func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 11} } + +// The parse label enum for the token. +type DependencyEdge_Label int32 + +const ( + // Unknown + DependencyEdge_UNKNOWN DependencyEdge_Label = 0 + // Abbreviation modifier + DependencyEdge_ABBREV DependencyEdge_Label = 1 + // Adjectival complement + DependencyEdge_ACOMP DependencyEdge_Label = 2 + // Adverbial clause modifier + DependencyEdge_ADVCL DependencyEdge_Label = 3 + // Adverbial modifier + DependencyEdge_ADVMOD DependencyEdge_Label = 4 + // Adjectival modifier of an NP + DependencyEdge_AMOD DependencyEdge_Label = 5 + // Appositional modifier of an NP + DependencyEdge_APPOS DependencyEdge_Label = 6 + // Attribute dependent of a copular verb + DependencyEdge_ATTR DependencyEdge_Label = 7 + // Auxiliary (non-main) verb + DependencyEdge_AUX DependencyEdge_Label = 8 + // Passive auxiliary + DependencyEdge_AUXPASS DependencyEdge_Label = 9 + // Coordinating conjunction + DependencyEdge_CC DependencyEdge_Label = 10 + // Clausal complement of a verb or adjective + DependencyEdge_CCOMP DependencyEdge_Label = 11 + // Conjunct + DependencyEdge_CONJ DependencyEdge_Label = 12 + // Clausal subject + DependencyEdge_CSUBJ DependencyEdge_Label = 13 + // Clausal passive subject + DependencyEdge_CSUBJPASS DependencyEdge_Label = 14 + // Dependency (unable to determine) + DependencyEdge_DEP DependencyEdge_Label = 15 + // Determiner + DependencyEdge_DET DependencyEdge_Label = 16 + // Discourse + DependencyEdge_DISCOURSE DependencyEdge_Label = 17 + // Direct object + DependencyEdge_DOBJ DependencyEdge_Label = 18 + // Expletive + DependencyEdge_EXPL DependencyEdge_Label = 19 + // Goes with (part of a word in a text not well edited) + DependencyEdge_GOESWITH DependencyEdge_Label = 20 + // Indirect object + DependencyEdge_IOBJ DependencyEdge_Label = 21 + // Marker (word introducing a subordinate clause) + DependencyEdge_MARK DependencyEdge_Label = 22 + // Multi-word expression + DependencyEdge_MWE DependencyEdge_Label = 23 + // Multi-word verbal expression + DependencyEdge_MWV DependencyEdge_Label = 24 + // Negation modifier + DependencyEdge_NEG DependencyEdge_Label = 25 + // Noun compound modifier + DependencyEdge_NN DependencyEdge_Label = 26 + // Noun phrase used as an adverbial modifier + DependencyEdge_NPADVMOD DependencyEdge_Label = 27 + // Nominal subject + DependencyEdge_NSUBJ DependencyEdge_Label = 28 + // Passive nominal subject + DependencyEdge_NSUBJPASS DependencyEdge_Label = 29 + // Numeric modifier of a noun + DependencyEdge_NUM DependencyEdge_Label = 30 + // Element of compound number + DependencyEdge_NUMBER DependencyEdge_Label = 31 + // Punctuation mark + DependencyEdge_P DependencyEdge_Label = 32 + // Parataxis relation + DependencyEdge_PARATAXIS DependencyEdge_Label = 33 + // Participial modifier + DependencyEdge_PARTMOD DependencyEdge_Label = 34 + // The complement of a preposition is a clause + DependencyEdge_PCOMP DependencyEdge_Label = 35 + // Object of a preposition + DependencyEdge_POBJ DependencyEdge_Label = 36 + // Possession modifier + DependencyEdge_POSS DependencyEdge_Label = 37 + // Postverbal negative particle + DependencyEdge_POSTNEG DependencyEdge_Label = 38 + // Predicate complement + DependencyEdge_PRECOMP DependencyEdge_Label = 39 + // Preconjunt + DependencyEdge_PRECONJ DependencyEdge_Label = 40 + // Predeterminer + DependencyEdge_PREDET DependencyEdge_Label = 41 + // Prefix + DependencyEdge_PREF DependencyEdge_Label = 42 + // Prepositional modifier + DependencyEdge_PREP DependencyEdge_Label = 43 + // The relationship between a verb and verbal morpheme + DependencyEdge_PRONL DependencyEdge_Label = 44 + // Particle + DependencyEdge_PRT DependencyEdge_Label = 45 + // Associative or possessive marker + DependencyEdge_PS DependencyEdge_Label = 46 + // Quantifier phrase modifier + DependencyEdge_QUANTMOD DependencyEdge_Label = 47 + // Relative clause modifier + DependencyEdge_RCMOD DependencyEdge_Label = 48 + // Complementizer in relative clause + DependencyEdge_RCMODREL DependencyEdge_Label = 49 + // Ellipsis without a preceding predicate + DependencyEdge_RDROP DependencyEdge_Label = 50 + // Referent + DependencyEdge_REF DependencyEdge_Label = 51 + // Remnant + DependencyEdge_REMNANT DependencyEdge_Label = 52 + // Reparandum + DependencyEdge_REPARANDUM DependencyEdge_Label = 53 + // Root + DependencyEdge_ROOT DependencyEdge_Label = 54 + // Suffix specifying a unit of number + DependencyEdge_SNUM DependencyEdge_Label = 55 + // Suffix + DependencyEdge_SUFF DependencyEdge_Label = 56 + // Temporal modifier + DependencyEdge_TMOD DependencyEdge_Label = 57 + // Topic marker + DependencyEdge_TOPIC DependencyEdge_Label = 58 + // Clause headed by an infinite form of the verb that modifies a noun + DependencyEdge_VMOD DependencyEdge_Label = 59 + // Vocative + DependencyEdge_VOCATIVE DependencyEdge_Label = 60 + // Open clausal complement + DependencyEdge_XCOMP DependencyEdge_Label = 61 + // Name suffix + DependencyEdge_SUFFIX DependencyEdge_Label = 62 + // Name title + DependencyEdge_TITLE DependencyEdge_Label = 63 + // Adverbial phrase modifier + DependencyEdge_ADVPHMOD DependencyEdge_Label = 64 + // Causative auxiliary + DependencyEdge_AUXCAUS DependencyEdge_Label = 65 + // Helper auxiliary + DependencyEdge_AUXVV DependencyEdge_Label = 66 + // Rentaishi (Prenominal modifier) + DependencyEdge_DTMOD DependencyEdge_Label = 67 + // Foreign words + DependencyEdge_FOREIGN DependencyEdge_Label = 68 + // Keyword + DependencyEdge_KW DependencyEdge_Label = 69 + // List for chains of comparable items + DependencyEdge_LIST DependencyEdge_Label = 70 + // Nominalized clause + DependencyEdge_NOMC DependencyEdge_Label = 71 + // Nominalized clausal subject + DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72 + // Nominalized clausal passive + DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73 + // Compound of numeric modifier + DependencyEdge_NUMC DependencyEdge_Label = 74 + // Copula + DependencyEdge_COP DependencyEdge_Label = 75 + // Dislocated relation (for fronted/topicalized elements) + DependencyEdge_DISLOCATED DependencyEdge_Label = 76 + // Aspect marker + DependencyEdge_ASP DependencyEdge_Label = 77 + // Genitive modifier + DependencyEdge_GMOD DependencyEdge_Label = 78 + // Genitive object + DependencyEdge_GOBJ DependencyEdge_Label = 79 + // Infinitival modifier + DependencyEdge_INFMOD DependencyEdge_Label = 80 + // Measure + DependencyEdge_MES DependencyEdge_Label = 81 + // Nominal complement of a noun + DependencyEdge_NCOMP DependencyEdge_Label = 82 +) + +var DependencyEdge_Label_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ABBREV", + 2: "ACOMP", + 3: "ADVCL", + 4: "ADVMOD", + 5: "AMOD", + 6: "APPOS", + 7: "ATTR", + 8: "AUX", + 9: "AUXPASS", + 10: "CC", + 11: "CCOMP", + 12: "CONJ", + 13: "CSUBJ", + 14: "CSUBJPASS", + 15: "DEP", + 16: "DET", + 17: "DISCOURSE", + 18: "DOBJ", + 19: "EXPL", + 20: "GOESWITH", + 21: "IOBJ", + 22: "MARK", + 23: "MWE", + 24: "MWV", + 25: "NEG", + 26: "NN", + 27: "NPADVMOD", + 28: "NSUBJ", + 29: "NSUBJPASS", + 30: "NUM", + 31: "NUMBER", + 32: "P", + 33: "PARATAXIS", + 34: "PARTMOD", + 35: "PCOMP", + 36: "POBJ", + 37: "POSS", + 38: "POSTNEG", + 39: "PRECOMP", + 40: "PRECONJ", + 41: "PREDET", + 42: "PREF", + 43: "PREP", + 44: "PRONL", + 45: "PRT", + 46: "PS", + 47: "QUANTMOD", + 48: "RCMOD", + 49: "RCMODREL", + 50: "RDROP", + 51: "REF", + 52: "REMNANT", + 53: "REPARANDUM", + 54: "ROOT", + 55: "SNUM", + 56: "SUFF", + 57: "TMOD", + 58: "TOPIC", + 59: "VMOD", + 60: "VOCATIVE", + 61: "XCOMP", + 62: "SUFFIX", + 63: "TITLE", + 64: "ADVPHMOD", + 65: "AUXCAUS", + 66: "AUXVV", + 67: "DTMOD", + 68: "FOREIGN", + 69: "KW", + 70: "LIST", + 71: "NOMC", + 72: "NOMCSUBJ", + 73: "NOMCSUBJPASS", + 74: "NUMC", + 75: "COP", + 76: "DISLOCATED", + 77: "ASP", + 78: "GMOD", + 79: "GOBJ", + 80: "INFMOD", + 81: "MES", + 82: "NCOMP", +} +var DependencyEdge_Label_value = map[string]int32{ + "UNKNOWN": 0, + "ABBREV": 1, + "ACOMP": 2, + "ADVCL": 3, + "ADVMOD": 4, + "AMOD": 5, + "APPOS": 6, + "ATTR": 7, + "AUX": 8, + "AUXPASS": 9, + "CC": 10, + "CCOMP": 11, + "CONJ": 12, + "CSUBJ": 13, + "CSUBJPASS": 14, + "DEP": 15, + "DET": 16, + "DISCOURSE": 17, + "DOBJ": 18, + "EXPL": 19, + "GOESWITH": 20, + "IOBJ": 21, + "MARK": 22, + "MWE": 23, + "MWV": 24, + "NEG": 25, + "NN": 26, + "NPADVMOD": 27, + "NSUBJ": 28, + "NSUBJPASS": 29, + "NUM": 30, + "NUMBER": 31, + "P": 32, + "PARATAXIS": 33, + "PARTMOD": 34, + "PCOMP": 35, + "POBJ": 36, + "POSS": 37, + "POSTNEG": 38, + "PRECOMP": 39, + "PRECONJ": 40, + "PREDET": 41, + "PREF": 42, + "PREP": 43, + "PRONL": 44, + "PRT": 45, + "PS": 46, + "QUANTMOD": 47, + "RCMOD": 48, + "RCMODREL": 49, + "RDROP": 50, + "REF": 51, + "REMNANT": 52, + "REPARANDUM": 53, + "ROOT": 54, + "SNUM": 55, + "SUFF": 56, + "TMOD": 57, + "TOPIC": 58, + "VMOD": 59, + "VOCATIVE": 60, + "XCOMP": 61, + "SUFFIX": 62, + "TITLE": 63, + "ADVPHMOD": 64, + "AUXCAUS": 65, + "AUXVV": 66, + "DTMOD": 67, + "FOREIGN": 68, + "KW": 69, + "LIST": 70, + "NOMC": 71, + "NOMCSUBJ": 72, + "NOMCSUBJPASS": 73, + "NUMC": 74, + "COP": 75, + "DISLOCATED": 76, + "ASP": 77, + "GMOD": 78, + "GOBJ": 79, + "INFMOD": 80, + "MES": 81, + "NCOMP": 82, +} + +func (x DependencyEdge_Label) String() string { + return proto.EnumName(DependencyEdge_Label_name, int32(x)) +} +func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} } + +// The supported types of mentions. +type EntityMention_Type int32 + +const ( + // Unknown + EntityMention_TYPE_UNKNOWN EntityMention_Type = 0 + // Proper name + EntityMention_PROPER EntityMention_Type = 1 + // Common noun (or noun compound) + EntityMention_COMMON EntityMention_Type = 2 +) + +var EntityMention_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "PROPER", + 2: "COMMON", +} +var EntityMention_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "PROPER": 1, + "COMMON": 2, +} + +func (x EntityMention_Type) String() string { + return proto.EnumName(EntityMention_Type_name, int32(x)) +} +func (EntityMention_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// ################################################################ # +// +// Represents the input to API methods. +type Document struct { + // Required. If the type is not set or is `TYPE_UNSPECIFIED`, + // returns an `INVALID_ARGUMENT` error. + Type Document_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.language.v1beta2.Document_Type" json:"type,omitempty"` + // The source of the document: a string containing the content or a + // Google Cloud Storage URI. + // + // Types that are valid to be assigned to Source: + // *Document_Content + // *Document_GcsContentUri + Source isDocument_Source `protobuf_oneof:"source"` + // The language of the document (if not specified, the language is + // automatically detected). Both ISO and BCP-47 language codes are + // accepted.
+ // [Language Support](/natural-language/docs/languages) + // lists currently supported languages for each API method. + // If the language (either specified by the caller or automatically detected) + // is not supported by the called API method, an `INVALID_ARGUMENT` error + // is returned. + Language string `protobuf:"bytes,4,opt,name=language" json:"language,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isDocument_Source interface { + isDocument_Source() +} + +type Document_Content struct { + Content string `protobuf:"bytes,2,opt,name=content,oneof"` +} +type Document_GcsContentUri struct { + GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,oneof"` +} + +func (*Document_Content) isDocument_Source() {} +func (*Document_GcsContentUri) isDocument_Source() {} + +func (m *Document) GetSource() isDocument_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Document) GetType() Document_Type { + if m != nil { + return m.Type + } + return Document_TYPE_UNSPECIFIED +} + +func (m *Document) GetContent() string { + if x, ok := m.GetSource().(*Document_Content); ok { + return x.Content + } + return "" +} + +func (m *Document) GetGcsContentUri() string { + if x, ok := m.GetSource().(*Document_GcsContentUri); ok { + return x.GcsContentUri + } + return "" +} + +func (m *Document) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Document) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Document_OneofMarshaler, _Document_OneofUnmarshaler, _Document_OneofSizer, []interface{}{ + (*Document_Content)(nil), + (*Document_GcsContentUri)(nil), + } +} + +func _Document_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Content) + case *Document_GcsContentUri: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.GcsContentUri) + case nil: + default: + return fmt.Errorf("Document.Source has unexpected type %T", x) + } + return nil +} + +func _Document_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Document) + switch tag { + case 2: // source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_Content{x} + return true, err + case 3: // source.gcs_content_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Document_GcsContentUri{x} + return true, err + default: + return false, nil + } +} + +func _Document_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Document) + // source + switch x := m.Source.(type) { + case *Document_Content: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *Document_GcsContentUri: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.GcsContentUri))) + n += len(x.GcsContentUri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a sentence in the input document. +type Sentence struct { + // The sentence text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // For calls to [AnalyzeSentiment][] or if + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] is set to + // true, this field will contain the sentiment for the sentence. + Sentiment *Sentiment `protobuf:"bytes,2,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *Sentence) Reset() { *m = Sentence{} } +func (m *Sentence) String() string { return proto.CompactTextString(m) } +func (*Sentence) ProtoMessage() {} +func (*Sentence) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Sentence) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Sentence) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents a phrase in the text that is a known entity, such as +// a person, an organization, or location. The API associates information, such +// as salience and mentions, with entities. +type Entity struct { + // The representative name for the entity. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The entity type. + Type Entity_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta2.Entity_Type" json:"type,omitempty"` + // Metadata associated with the entity. + // + // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if + // available. The associated keys are "wikipedia_url" and "mid", respectively. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The salience score associated with the entity in the [0, 1.0] range. + // + // The salience score for an entity provides information about the + // importance or centrality of that entity to the entire document text. + // Scores closer to 0 are less salient, while scores closer to 1.0 are highly + // salient. + Salience float32 `protobuf:"fixed32,4,opt,name=salience" json:"salience,omitempty"` + // The mentions of this entity in the input document. The API currently + // supports proper noun mentions. + Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions" json:"mentions,omitempty"` + // For calls to [AnalyzeEntitySentiment][] or if + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the aggregate sentiment expressed for this + // entity in the provided document. + Sentiment *Sentiment `protobuf:"bytes,6,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Entity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Entity) GetType() Entity_Type { + if m != nil { + return m.Type + } + return Entity_UNKNOWN +} + +func (m *Entity) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Entity) GetSalience() float32 { + if m != nil { + return m.Salience + } + return 0 +} + +func (m *Entity) GetMentions() []*EntityMention { + if m != nil { + return m.Mentions + } + return nil +} + +func (m *Entity) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents the smallest syntactic building block of the text. +type Token struct { + // The token text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // Parts of speech tag for this token. + PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech" json:"part_of_speech,omitempty"` + // Dependency tree parse for this token. + DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge" json:"dependency_edge,omitempty"` + // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. + Lemma string `protobuf:"bytes,4,opt,name=lemma" json:"lemma,omitempty"` +} + +func (m *Token) Reset() { *m = Token{} } +func (m *Token) String() string { return proto.CompactTextString(m) } +func (*Token) ProtoMessage() {} +func (*Token) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Token) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *Token) GetPartOfSpeech() *PartOfSpeech { + if m != nil { + return m.PartOfSpeech + } + return nil +} + +func (m *Token) GetDependencyEdge() *DependencyEdge { + if m != nil { + return m.DependencyEdge + } + return nil +} + +func (m *Token) GetLemma() string { + if m != nil { + return m.Lemma + } + return "" +} + +// Represents the feeling associated with the entire text or entities in +// the text. +type Sentiment struct { + // A non-negative number in the [0, +inf) range, which represents + // the absolute magnitude of sentiment regardless of score (positive or + // negative). + Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude" json:"magnitude,omitempty"` + // Sentiment score between -1.0 (negative sentiment) and 1.0 + // (positive sentiment). + Score float32 `protobuf:"fixed32,3,opt,name=score" json:"score,omitempty"` +} + +func (m *Sentiment) Reset() { *m = Sentiment{} } +func (m *Sentiment) String() string { return proto.CompactTextString(m) } +func (*Sentiment) ProtoMessage() {} +func (*Sentiment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Sentiment) GetMagnitude() float32 { + if m != nil { + return m.Magnitude + } + return 0 +} + +func (m *Sentiment) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Represents part of speech information for a token. +type PartOfSpeech struct { + // The part of speech tag. + Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,enum=google.cloud.language.v1beta2.PartOfSpeech_Tag" json:"tag,omitempty"` + // The grammatical aspect. + Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,enum=google.cloud.language.v1beta2.PartOfSpeech_Aspect" json:"aspect,omitempty"` + // The grammatical case. + Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,enum=google.cloud.language.v1beta2.PartOfSpeech_Case" json:"case,omitempty"` + // The grammatical form. + Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,enum=google.cloud.language.v1beta2.PartOfSpeech_Form" json:"form,omitempty"` + // The grammatical gender. + Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,enum=google.cloud.language.v1beta2.PartOfSpeech_Gender" json:"gender,omitempty"` + // The grammatical mood. + Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,enum=google.cloud.language.v1beta2.PartOfSpeech_Mood" json:"mood,omitempty"` + // The grammatical number. + Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,enum=google.cloud.language.v1beta2.PartOfSpeech_Number" json:"number,omitempty"` + // The grammatical person. + Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,enum=google.cloud.language.v1beta2.PartOfSpeech_Person" json:"person,omitempty"` + // The grammatical properness. + Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,enum=google.cloud.language.v1beta2.PartOfSpeech_Proper" json:"proper,omitempty"` + // The grammatical reciprocity. + Reciprocity PartOfSpeech_Reciprocity `protobuf:"varint,10,opt,name=reciprocity,enum=google.cloud.language.v1beta2.PartOfSpeech_Reciprocity" json:"reciprocity,omitempty"` + // The grammatical tense. + Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,enum=google.cloud.language.v1beta2.PartOfSpeech_Tense" json:"tense,omitempty"` + // The grammatical voice. + Voice PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,enum=google.cloud.language.v1beta2.PartOfSpeech_Voice" json:"voice,omitempty"` +} + +func (m *PartOfSpeech) Reset() { *m = PartOfSpeech{} } +func (m *PartOfSpeech) String() string { return proto.CompactTextString(m) } +func (*PartOfSpeech) ProtoMessage() {} +func (*PartOfSpeech) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag { + if m != nil { + return m.Tag + } + return PartOfSpeech_UNKNOWN +} + +func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect { + if m != nil { + return m.Aspect + } + return PartOfSpeech_ASPECT_UNKNOWN +} + +func (m *PartOfSpeech) GetCase() PartOfSpeech_Case { + if m != nil { + return m.Case + } + return PartOfSpeech_CASE_UNKNOWN +} + +func (m *PartOfSpeech) GetForm() PartOfSpeech_Form { + if m != nil { + return m.Form + } + return PartOfSpeech_FORM_UNKNOWN +} + +func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender { + if m != nil { + return m.Gender + } + return PartOfSpeech_GENDER_UNKNOWN +} + +func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood { + if m != nil { + return m.Mood + } + return PartOfSpeech_MOOD_UNKNOWN +} + +func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number { + if m != nil { + return m.Number + } + return PartOfSpeech_NUMBER_UNKNOWN +} + +func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person { + if m != nil { + return m.Person + } + return PartOfSpeech_PERSON_UNKNOWN +} + +func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper { + if m != nil { + return m.Proper + } + return PartOfSpeech_PROPER_UNKNOWN +} + +func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity { + if m != nil { + return m.Reciprocity + } + return PartOfSpeech_RECIPROCITY_UNKNOWN +} + +func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense { + if m != nil { + return m.Tense + } + return PartOfSpeech_TENSE_UNKNOWN +} + +func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice { + if m != nil { + return m.Voice + } + return PartOfSpeech_VOICE_UNKNOWN +} + +// Represents dependency parse tree information for a token. +type DependencyEdge struct { + // Represents the head of this token in the dependency tree. + // This is the index of the token which has an arc going to this token. + // The index is the position of the token in the array of tokens returned + // by the API method. If this token is a root token, then the + // `head_token_index` is its own index. + HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex" json:"head_token_index,omitempty"` + // The parse label for the token. + Label DependencyEdge_Label `protobuf:"varint,2,opt,name=label,enum=google.cloud.language.v1beta2.DependencyEdge_Label" json:"label,omitempty"` +} + +func (m *DependencyEdge) Reset() { *m = DependencyEdge{} } +func (m *DependencyEdge) String() string { return proto.CompactTextString(m) } +func (*DependencyEdge) ProtoMessage() {} +func (*DependencyEdge) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DependencyEdge) GetHeadTokenIndex() int32 { + if m != nil { + return m.HeadTokenIndex + } + return 0 +} + +func (m *DependencyEdge) GetLabel() DependencyEdge_Label { + if m != nil { + return m.Label + } + return DependencyEdge_UNKNOWN +} + +// Represents a mention for an entity in the text. Currently, proper noun +// mentions are supported. +type EntityMention struct { + // The mention text. + Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // The type of the entity mention. + Type EntityMention_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta2.EntityMention_Type" json:"type,omitempty"` + // For calls to [AnalyzeEntitySentiment][] or if + // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to + // true, this field will contain the sentiment expressed for this mention of + // the entity in the provided document. + Sentiment *Sentiment `protobuf:"bytes,3,opt,name=sentiment" json:"sentiment,omitempty"` +} + +func (m *EntityMention) Reset() { *m = EntityMention{} } +func (m *EntityMention) String() string { return proto.CompactTextString(m) } +func (*EntityMention) ProtoMessage() {} +func (*EntityMention) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EntityMention) GetText() *TextSpan { + if m != nil { + return m.Text + } + return nil +} + +func (m *EntityMention) GetType() EntityMention_Type { + if m != nil { + return m.Type + } + return EntityMention_TYPE_UNKNOWN +} + +func (m *EntityMention) GetSentiment() *Sentiment { + if m != nil { + return m.Sentiment + } + return nil +} + +// Represents an output piece of text. +type TextSpan struct { + // The content of the output text. + Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` + // The API calculates the beginning offset of the content in the original + // document according to the [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the API request. + BeginOffset int32 `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset" json:"begin_offset,omitempty"` +} + +func (m *TextSpan) Reset() { *m = TextSpan{} } +func (m *TextSpan) String() string { return proto.CompactTextString(m) } +func (*TextSpan) ProtoMessage() {} +func (*TextSpan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *TextSpan) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *TextSpan) GetBeginOffset() int32 { + if m != nil { + return m.BeginOffset + } + return 0 +} + +// Represents a category returned from the text classifier. +type ClassificationCategory struct { + // The name of the category representing the document. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The classifier's confidence of the category. Number represents how certain + // the classifier is that this category represents the given text. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *ClassificationCategory) Reset() { *m = ClassificationCategory{} } +func (m *ClassificationCategory) String() string { return proto.CompactTextString(m) } +func (*ClassificationCategory) ProtoMessage() {} +func (*ClassificationCategory) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ClassificationCategory) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ClassificationCategory) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// The sentiment analysis request message. +type AnalyzeSentimentRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate sentence offsets for the + // sentence sentiment. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta2.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSentimentRequest) Reset() { *m = AnalyzeSentimentRequest{} } +func (m *AnalyzeSentimentRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentRequest) ProtoMessage() {} +func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AnalyzeSentimentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The sentiment analysis response message. +type AnalyzeSentimentResponse struct { + // The overall sentiment of the input document. + DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` + // The sentiment for all the sentences in the document. + Sentences []*Sentence `protobuf:"bytes,3,rep,name=sentences" json:"sentences,omitempty"` +} + +func (m *AnalyzeSentimentResponse) Reset() { *m = AnalyzeSentimentResponse{} } +func (m *AnalyzeSentimentResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSentimentResponse) ProtoMessage() {} +func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnalyzeSentimentResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +// The entity-level sentiment analysis request message. +type AnalyzeEntitySentimentRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta2.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeEntitySentimentRequest) Reset() { *m = AnalyzeEntitySentimentRequest{} } +func (m *AnalyzeEntitySentimentRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitySentimentRequest) ProtoMessage() {} +func (*AnalyzeEntitySentimentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *AnalyzeEntitySentimentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeEntitySentimentRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The entity-level sentiment analysis response message. +type AnalyzeEntitySentimentResponse struct { + // The recognized entities in the input document with associated sentiments. + Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeEntitySentimentResponse) Reset() { *m = AnalyzeEntitySentimentResponse{} } +func (m *AnalyzeEntitySentimentResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitySentimentResponse) ProtoMessage() {} +func (*AnalyzeEntitySentimentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *AnalyzeEntitySentimentResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnalyzeEntitySentimentResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The entity analysis request message. +type AnalyzeEntitiesRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta2.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeEntitiesRequest) Reset() { *m = AnalyzeEntitiesRequest{} } +func (m *AnalyzeEntitiesRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesRequest) ProtoMessage() {} +func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *AnalyzeEntitiesRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The entity analysis response message. +type AnalyzeEntitiesResponse struct { + // The recognized entities in the input document. + Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. + Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeEntitiesResponse) Reset() { *m = AnalyzeEntitiesResponse{} } +func (m *AnalyzeEntitiesResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeEntitiesResponse) ProtoMessage() {} +func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnalyzeEntitiesResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The syntax analysis request message. +type AnalyzeSyntaxRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta2.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnalyzeSyntaxRequest) Reset() { *m = AnalyzeSyntaxRequest{} } +func (m *AnalyzeSyntaxRequest) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxRequest) ProtoMessage() {} +func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *AnalyzeSyntaxRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// The syntax analysis response message. +type AnalyzeSyntaxResponse struct { + // Sentences in the input document. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. + Language string `protobuf:"bytes,3,opt,name=language" json:"language,omitempty"` +} + +func (m *AnalyzeSyntaxResponse) Reset() { *m = AnalyzeSyntaxResponse{} } +func (m *AnalyzeSyntaxResponse) String() string { return proto.CompactTextString(m) } +func (*AnalyzeSyntaxResponse) ProtoMessage() {} +func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnalyzeSyntaxResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +// The document classification request message. +type ClassifyTextRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` +} + +func (m *ClassifyTextRequest) Reset() { *m = ClassifyTextRequest{} } +func (m *ClassifyTextRequest) String() string { return proto.CompactTextString(m) } +func (*ClassifyTextRequest) ProtoMessage() {} +func (*ClassifyTextRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ClassifyTextRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +// The document classification response message. +type ClassifyTextResponse struct { + // Categories representing the input document. + Categories []*ClassificationCategory `protobuf:"bytes,1,rep,name=categories" json:"categories,omitempty"` +} + +func (m *ClassifyTextResponse) Reset() { *m = ClassifyTextResponse{} } +func (m *ClassifyTextResponse) String() string { return proto.CompactTextString(m) } +func (*ClassifyTextResponse) ProtoMessage() {} +func (*ClassifyTextResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ClassifyTextResponse) GetCategories() []*ClassificationCategory { + if m != nil { + return m.Categories + } + return nil +} + +// The request message for the text annotation API, which can perform multiple +// analysis types (sentiment, entities, and syntax) in one call. +type AnnotateTextRequest struct { + // Input document. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The enabled features. + Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // The encoding type used by the API to calculate offsets. + EncodingType EncodingType `protobuf:"varint,3,opt,name=encoding_type,json=encodingType,enum=google.cloud.language.v1beta2.EncodingType" json:"encoding_type,omitempty"` +} + +func (m *AnnotateTextRequest) Reset() { *m = AnnotateTextRequest{} } +func (m *AnnotateTextRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest) ProtoMessage() {} +func (*AnnotateTextRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *AnnotateTextRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *AnnotateTextRequest) GetFeatures() *AnnotateTextRequest_Features { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateTextRequest) GetEncodingType() EncodingType { + if m != nil { + return m.EncodingType + } + return EncodingType_NONE +} + +// All available features for sentiment, syntax, and semantic analysis. +// Setting each one to true will enable that specific analysis for the input. +type AnnotateTextRequest_Features struct { + // Extract syntax information. + ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax" json:"extract_syntax,omitempty"` + // Extract entities. + ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities" json:"extract_entities,omitempty"` + // Extract document-level sentiment. + ExtractDocumentSentiment bool `protobuf:"varint,3,opt,name=extract_document_sentiment,json=extractDocumentSentiment" json:"extract_document_sentiment,omitempty"` + // Extract entities and their associated sentiment. + ExtractEntitySentiment bool `protobuf:"varint,4,opt,name=extract_entity_sentiment,json=extractEntitySentiment" json:"extract_entity_sentiment,omitempty"` + // Classify the full document into categories. + ClassifyText bool `protobuf:"varint,6,opt,name=classify_text,json=classifyText" json:"classify_text,omitempty"` +} + +func (m *AnnotateTextRequest_Features) Reset() { *m = AnnotateTextRequest_Features{} } +func (m *AnnotateTextRequest_Features) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextRequest_Features) ProtoMessage() {} +func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool { + if m != nil { + return m.ExtractSyntax + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractEntities() bool { + if m != nil { + return m.ExtractEntities + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool { + if m != nil { + return m.ExtractDocumentSentiment + } + return false +} + +func (m *AnnotateTextRequest_Features) GetExtractEntitySentiment() bool { + if m != nil { + return m.ExtractEntitySentiment + } + return false +} + +func (m *AnnotateTextRequest_Features) GetClassifyText() bool { + if m != nil { + return m.ClassifyText + } + return false +} + +// The text annotations response message. +type AnnotateTextResponse struct { + // Sentences in the input document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax]. + Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"` + // Tokens, along with their syntactic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax]. + Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"` + // Entities, along with their semantic information, in the input document. + // Populated if the user enables + // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entities]. + Entities []*Entity `protobuf:"bytes,3,rep,name=entities" json:"entities,omitempty"` + // The overall sentiment for the document. Populated if the user enables + // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment]. + DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"` + // The language of the text, which will be the same as the language specified + // in the request or, if not specified, the automatically-detected language. + // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details. + Language string `protobuf:"bytes,5,opt,name=language" json:"language,omitempty"` + // Categories identified in the input document. + Categories []*ClassificationCategory `protobuf:"bytes,6,rep,name=categories" json:"categories,omitempty"` +} + +func (m *AnnotateTextResponse) Reset() { *m = AnnotateTextResponse{} } +func (m *AnnotateTextResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateTextResponse) ProtoMessage() {} +func (*AnnotateTextResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *AnnotateTextResponse) GetSentences() []*Sentence { + if m != nil { + return m.Sentences + } + return nil +} + +func (m *AnnotateTextResponse) GetTokens() []*Token { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *AnnotateTextResponse) GetEntities() []*Entity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment { + if m != nil { + return m.DocumentSentiment + } + return nil +} + +func (m *AnnotateTextResponse) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + +func (m *AnnotateTextResponse) GetCategories() []*ClassificationCategory { + if m != nil { + return m.Categories + } + return nil +} + +func init() { + proto.RegisterType((*Document)(nil), "google.cloud.language.v1beta2.Document") + proto.RegisterType((*Sentence)(nil), "google.cloud.language.v1beta2.Sentence") + proto.RegisterType((*Entity)(nil), "google.cloud.language.v1beta2.Entity") + proto.RegisterType((*Token)(nil), "google.cloud.language.v1beta2.Token") + proto.RegisterType((*Sentiment)(nil), "google.cloud.language.v1beta2.Sentiment") + proto.RegisterType((*PartOfSpeech)(nil), "google.cloud.language.v1beta2.PartOfSpeech") + proto.RegisterType((*DependencyEdge)(nil), "google.cloud.language.v1beta2.DependencyEdge") + proto.RegisterType((*EntityMention)(nil), "google.cloud.language.v1beta2.EntityMention") + proto.RegisterType((*TextSpan)(nil), "google.cloud.language.v1beta2.TextSpan") + proto.RegisterType((*ClassificationCategory)(nil), "google.cloud.language.v1beta2.ClassificationCategory") + proto.RegisterType((*AnalyzeSentimentRequest)(nil), "google.cloud.language.v1beta2.AnalyzeSentimentRequest") + proto.RegisterType((*AnalyzeSentimentResponse)(nil), "google.cloud.language.v1beta2.AnalyzeSentimentResponse") + proto.RegisterType((*AnalyzeEntitySentimentRequest)(nil), "google.cloud.language.v1beta2.AnalyzeEntitySentimentRequest") + proto.RegisterType((*AnalyzeEntitySentimentResponse)(nil), "google.cloud.language.v1beta2.AnalyzeEntitySentimentResponse") + proto.RegisterType((*AnalyzeEntitiesRequest)(nil), "google.cloud.language.v1beta2.AnalyzeEntitiesRequest") + proto.RegisterType((*AnalyzeEntitiesResponse)(nil), "google.cloud.language.v1beta2.AnalyzeEntitiesResponse") + proto.RegisterType((*AnalyzeSyntaxRequest)(nil), "google.cloud.language.v1beta2.AnalyzeSyntaxRequest") + proto.RegisterType((*AnalyzeSyntaxResponse)(nil), "google.cloud.language.v1beta2.AnalyzeSyntaxResponse") + proto.RegisterType((*ClassifyTextRequest)(nil), "google.cloud.language.v1beta2.ClassifyTextRequest") + proto.RegisterType((*ClassifyTextResponse)(nil), "google.cloud.language.v1beta2.ClassifyTextResponse") + proto.RegisterType((*AnnotateTextRequest)(nil), "google.cloud.language.v1beta2.AnnotateTextRequest") + proto.RegisterType((*AnnotateTextRequest_Features)(nil), "google.cloud.language.v1beta2.AnnotateTextRequest.Features") + proto.RegisterType((*AnnotateTextResponse)(nil), "google.cloud.language.v1beta2.AnnotateTextResponse") + proto.RegisterEnum("google.cloud.language.v1beta2.EncodingType", EncodingType_name, EncodingType_value) + proto.RegisterEnum("google.cloud.language.v1beta2.Document_Type", Document_Type_name, Document_Type_value) + proto.RegisterEnum("google.cloud.language.v1beta2.Entity_Type", Entity_Type_name, Entity_Type_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Tag", PartOfSpeech_Tag_name, PartOfSpeech_Tag_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Aspect", PartOfSpeech_Aspect_name, PartOfSpeech_Aspect_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Case", PartOfSpeech_Case_name, PartOfSpeech_Case_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Form", PartOfSpeech_Form_name, PartOfSpeech_Form_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Gender", PartOfSpeech_Gender_name, PartOfSpeech_Gender_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Mood", PartOfSpeech_Mood_name, PartOfSpeech_Mood_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Number", PartOfSpeech_Number_name, PartOfSpeech_Number_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Person", PartOfSpeech_Person_name, PartOfSpeech_Person_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Proper", PartOfSpeech_Proper_name, PartOfSpeech_Proper_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Reciprocity", PartOfSpeech_Reciprocity_name, PartOfSpeech_Reciprocity_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Tense", PartOfSpeech_Tense_name, PartOfSpeech_Tense_value) + proto.RegisterEnum("google.cloud.language.v1beta2.PartOfSpeech_Voice", PartOfSpeech_Voice_name, PartOfSpeech_Voice_value) + proto.RegisterEnum("google.cloud.language.v1beta2.DependencyEdge_Label", DependencyEdge_Label_name, DependencyEdge_Label_value) + proto.RegisterEnum("google.cloud.language.v1beta2.EntityMention_Type", EntityMention_Type_name, EntityMention_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LanguageService service + +type LanguageServiceClient interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) + // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes + // sentiment associated with each entity and its mentions. + AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) + // Classifies a document into categories. + ClassifyText(ctx context.Context, in *ClassifyTextRequest, opts ...grpc.CallOption) (*ClassifyTextResponse, error) + // A convenience method that provides all syntax, sentiment, entity, and + // classification features in one call. + AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) +} + +type languageServiceClient struct { + cc *grpc.ClientConn +} + +func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient { + return &languageServiceClient{cc} +} + +func (c *languageServiceClient) AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) { + out := new(AnalyzeSentimentResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) { + out := new(AnalyzeEntitiesResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) { + out := new(AnalyzeEntitySentimentResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) { + out := new(AnalyzeSyntaxResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) ClassifyText(ctx context.Context, in *ClassifyTextRequest, opts ...grpc.CallOption) (*ClassifyTextResponse, error) { + out := new(ClassifyTextResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/ClassifyText", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *languageServiceClient) AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) { + out := new(AnnotateTextResponse) + err := grpc.Invoke(ctx, "/google.cloud.language.v1beta2.LanguageService/AnnotateText", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LanguageService service + +type LanguageServiceServer interface { + // Analyzes the sentiment of the provided text. + AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error) + // Finds named entities (currently proper names and common nouns) in the text + // along with entity types, salience, mentions for each entity, and + // other properties. + AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error) + // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes + // sentiment associated with each entity and its mentions. + AnalyzeEntitySentiment(context.Context, *AnalyzeEntitySentimentRequest) (*AnalyzeEntitySentimentResponse, error) + // Analyzes the syntax of the text and provides sentence boundaries and + // tokenization along with part of speech tags, dependency trees, and other + // properties. + AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error) + // Classifies a document into categories. + ClassifyText(context.Context, *ClassifyTextRequest) (*ClassifyTextResponse, error) + // A convenience method that provides all syntax, sentiment, entity, and + // classification features in one call. + AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error) +} + +func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer) { + s.RegisterService(&_LanguageService_serviceDesc, srv) +} + +func _LanguageService_AnalyzeSentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSentimentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, req.(*AnalyzeSentimentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeEntitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeEntities(ctx, req.(*AnalyzeEntitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeEntitySentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeEntitySentimentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, req.(*AnalyzeEntitySentimentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnalyzeSyntax_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnalyzeSyntaxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, req.(*AnalyzeSyntaxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_ClassifyText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClassifyTextRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).ClassifyText(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/ClassifyText", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).ClassifyText(ctx, req.(*ClassifyTextRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LanguageService_AnnotateText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateTextRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LanguageServiceServer).AnnotateText(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.language.v1beta2.LanguageService/AnnotateText", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LanguageServiceServer).AnnotateText(ctx, req.(*AnnotateTextRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LanguageService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.language.v1beta2.LanguageService", + HandlerType: (*LanguageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnalyzeSentiment", + Handler: _LanguageService_AnalyzeSentiment_Handler, + }, + { + MethodName: "AnalyzeEntities", + Handler: _LanguageService_AnalyzeEntities_Handler, + }, + { + MethodName: "AnalyzeEntitySentiment", + Handler: _LanguageService_AnalyzeEntitySentiment_Handler, + }, + { + MethodName: "AnalyzeSyntax", + Handler: _LanguageService_AnalyzeSyntax_Handler, + }, + { + MethodName: "ClassifyText", + Handler: _LanguageService_ClassifyText_Handler, + }, + { + MethodName: "AnnotateText", + Handler: _LanguageService_AnnotateText_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/language/v1beta2/language_service.proto", +} + +func init() { + proto.RegisterFile("google/cloud/language/v1beta2/language_service.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 3019 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0x4b, 0x73, 0xdb, 0xc6, + 0xfd, 0x06, 0x5f, 0xa2, 0x96, 0x92, 0xbc, 0x86, 0x1d, 0x9b, 0x7f, 0xfd, 0xf3, 0x70, 0xe0, 0xb8, + 0x56, 0xec, 0x44, 0x8a, 0x25, 0xc7, 0x71, 0x6d, 0xe7, 0x01, 0x01, 0x4b, 0x0a, 0x32, 0x09, 0xc0, + 0x0b, 0x80, 0x92, 0x7d, 0xe1, 0xc0, 0x24, 0xc4, 0x70, 0x22, 0x02, 0x2c, 0x01, 0x79, 0xac, 0x5e, + 0x32, 0xcd, 0x4c, 0x8f, 0x99, 0x1e, 0xf2, 0x11, 0x7a, 0xe8, 0xb4, 0x33, 0x9d, 0xb4, 0xd3, 0x99, + 0x4e, 0x7b, 0xe8, 0x27, 0xe8, 0xb1, 0x33, 0xfd, 0x04, 0xfd, 0x00, 0x3d, 0xb6, 0xb7, 0xce, 0x6f, + 0x77, 0x41, 0x82, 0xb2, 0x62, 0x89, 0x8e, 0xa7, 0x93, 0xde, 0x76, 0x7f, 0xf8, 0xbd, 0x9f, 0xbb, + 0x4b, 0xa2, 0x5b, 0xbd, 0x28, 0xea, 0xed, 0x07, 0x6b, 0x9d, 0xfd, 0xe8, 0xa0, 0xbb, 0xb6, 0xef, + 0x87, 0xbd, 0x03, 0xbf, 0x17, 0xac, 0x3d, 0xbd, 0xf9, 0x24, 0x48, 0xfc, 0xf5, 0x31, 0xa0, 0x1d, + 0x07, 0xa3, 0xa7, 0xfd, 0x4e, 0xb0, 0x3a, 0x1c, 0x45, 0x49, 0x24, 0xbf, 0xc1, 0xa9, 0x56, 0x19, + 0xd5, 0x6a, 0x8a, 0xb4, 0x2a, 0xa8, 0x96, 0x5f, 0x17, 0x4c, 0xfd, 0x61, 0x7f, 0xcd, 0x0f, 0xc3, + 0x28, 0xf1, 0x93, 0x7e, 0x14, 0xc6, 0x9c, 0x78, 0xf9, 0x8a, 0xf8, 0xba, 0x1f, 0x85, 0xbd, 0xd1, + 0x41, 0x18, 0xf6, 0xc3, 0xde, 0x5a, 0x34, 0x0c, 0x46, 0x53, 0x48, 0x6f, 0x09, 0x24, 0xb6, 0x7b, + 0x72, 0xb0, 0xb7, 0x96, 0xf4, 0x07, 0x41, 0x9c, 0xf8, 0x83, 0xa1, 0x40, 0xb8, 0x24, 0x10, 0x46, + 0xc3, 0xce, 0x5a, 0x9c, 0xf8, 0xc9, 0x81, 0xa0, 0x54, 0xfe, 0x29, 0xa1, 0xb2, 0x1e, 0x75, 0x0e, + 0x06, 0x41, 0x98, 0xc8, 0x9f, 0xa1, 0x42, 0x72, 0x38, 0x0c, 0xaa, 0xd2, 0x65, 0x69, 0x65, 0x69, + 0xfd, 0xbd, 0xd5, 0x17, 0xea, 0xbd, 0x9a, 0x92, 0xad, 0xba, 0x87, 0xc3, 0x80, 0x32, 0x4a, 0x79, + 0x19, 0xcd, 0x75, 0xa2, 0x30, 0x09, 0xc2, 0xa4, 0x9a, 0xbb, 0x2c, 0xad, 0xcc, 0x6f, 0x9d, 0xa1, + 0x29, 0x40, 0x5e, 0x41, 0x67, 0x7b, 0x9d, 0xb8, 0x2d, 0xb6, 0xed, 0x83, 0x51, 0xbf, 0x9a, 0x17, + 0x38, 0x8b, 0xbd, 0x4e, 0xac, 0x71, 0xb8, 0x37, 0xea, 0xcb, 0xcb, 0xa8, 0x9c, 0x4a, 0xab, 0x16, + 0x00, 0x85, 0x8e, 0xf7, 0xca, 0x6d, 0x54, 0x00, 0x79, 0xf2, 0x05, 0x84, 0xdd, 0x47, 0x36, 0x69, + 0x7b, 0xa6, 0x63, 0x13, 0xcd, 0xa8, 0x19, 0x44, 0xc7, 0x67, 0xe4, 0x25, 0x84, 0xec, 0x86, 0x6a, + 0x98, 0x6d, 0x97, 0xec, 0xba, 0x58, 0x92, 0xcb, 0xa8, 0xb0, 0xe5, 0x36, 0x1b, 0x38, 0xb7, 0x59, + 0x46, 0xa5, 0x38, 0x3a, 0x18, 0x75, 0x02, 0xe5, 0x17, 0x12, 0x2a, 0x3b, 0x01, 0x08, 0xeb, 0x04, + 0xf2, 0x3d, 0x54, 0x48, 0x82, 0x67, 0x09, 0x33, 0xb9, 0xb2, 0x7e, 0xed, 0x04, 0x93, 0xdd, 0xe0, + 0x59, 0xe2, 0x0c, 0xfd, 0x90, 0x32, 0x22, 0xb9, 0x86, 0xe6, 0xe3, 0x20, 0x04, 0x5f, 0x0b, 0x7b, + 0x2b, 0xeb, 0x2b, 0x27, 0x70, 0x70, 0x52, 0x7c, 0x3a, 0x21, 0x55, 0xbe, 0x29, 0xa0, 0x12, 0x09, + 0x93, 0x7e, 0x72, 0x28, 0xcb, 0xa8, 0x10, 0xfa, 0x03, 0x1e, 0x82, 0x79, 0xca, 0xd6, 0xf2, 0x27, + 0x22, 0x2c, 0x39, 0x16, 0x96, 0xeb, 0x27, 0x48, 0xe0, 0x8c, 0xb2, 0x41, 0xb1, 0x50, 0x79, 0x10, + 0x24, 0x7e, 0xd7, 0x4f, 0xfc, 0x6a, 0xfe, 0x72, 0x7e, 0xa5, 0xb2, 0xbe, 0x71, 0x3a, 0x1e, 0x4d, + 0x41, 0x45, 0xc2, 0x64, 0x74, 0x48, 0xc7, 0x4c, 0x20, 0x3e, 0xb1, 0xbf, 0xdf, 0x07, 0x07, 0xb2, + 0xf8, 0xe4, 0xe8, 0x78, 0x2f, 0x6f, 0x81, 0xb0, 0x90, 0x25, 0x67, 0xb5, 0xc8, 0x84, 0xbd, 0x77, + 0x2a, 0x61, 0x4d, 0x4e, 0x44, 0xc7, 0xd4, 0xd3, 0xde, 0x2d, 0xbd, 0xb4, 0x77, 0x97, 0xef, 0xa1, + 0xc5, 0x29, 0x43, 0x64, 0x8c, 0xf2, 0x5f, 0x04, 0x87, 0xc2, 0xc5, 0xb0, 0x94, 0x2f, 0xa0, 0xe2, + 0x53, 0x7f, 0xff, 0x80, 0xbb, 0x78, 0x9e, 0xf2, 0xcd, 0xdd, 0xdc, 0x1d, 0x49, 0x39, 0x14, 0xe9, + 0x56, 0x41, 0x73, 0x9e, 0xf9, 0xc0, 0xb4, 0x76, 0x4c, 0x7c, 0x46, 0x46, 0xa8, 0x64, 0x13, 0xea, + 0x58, 0x26, 0x96, 0xe4, 0x05, 0x54, 0x6e, 0x58, 0x9a, 0xea, 0x1a, 0x96, 0x89, 0x73, 0x32, 0x46, + 0x0b, 0x16, 0xad, 0xab, 0xa6, 0xf1, 0x98, 0x43, 0xf2, 0xf2, 0x3c, 0x2a, 0x92, 0x16, 0x31, 0x5d, + 0x5c, 0x90, 0xcf, 0xa2, 0xca, 0x8e, 0x45, 0x1f, 0xb4, 0xad, 0x5a, 0x5b, 0xa5, 0x2e, 0x2e, 0xca, + 0xe7, 0xd0, 0xa2, 0x66, 0x99, 0x8e, 0xd7, 0x24, 0xb4, 0x5d, 0xb7, 0x2c, 0x1d, 0x97, 0x00, 0xdd, + 0x72, 0xb7, 0x08, 0xc5, 0x73, 0xca, 0xcf, 0x73, 0xa8, 0xe8, 0x46, 0x5f, 0x04, 0xe1, 0xf7, 0x4b, + 0xd2, 0x87, 0x68, 0x69, 0xe8, 0x8f, 0x92, 0x76, 0xb4, 0xd7, 0x8e, 0x87, 0x41, 0xd0, 0xf9, 0x5c, + 0x64, 0xea, 0x8d, 0x13, 0xd8, 0xd8, 0xfe, 0x28, 0xb1, 0xf6, 0x1c, 0x46, 0x42, 0x17, 0x86, 0x99, + 0x9d, 0xdc, 0x42, 0x67, 0xbb, 0xc1, 0x30, 0x08, 0xbb, 0x41, 0xd8, 0x39, 0x6c, 0x07, 0xdd, 0x5e, + 0xc0, 0x2a, 0xb9, 0xb2, 0xfe, 0xfe, 0x49, 0x2d, 0x63, 0x4c, 0x45, 0xba, 0xbd, 0x80, 0x2e, 0x75, + 0xa7, 0xf6, 0x10, 0x86, 0xfd, 0x60, 0x30, 0xf0, 0x45, 0xd1, 0xf3, 0x8d, 0xf2, 0x29, 0x9a, 0x1f, + 0xc7, 0x55, 0x7e, 0x1d, 0xcd, 0x0f, 0xfc, 0x5e, 0xd8, 0x4f, 0x0e, 0xba, 0x3c, 0x5a, 0x39, 0x3a, + 0x01, 0x00, 0x83, 0xb8, 0x13, 0x8d, 0xb8, 0x3a, 0x39, 0xca, 0x37, 0xca, 0x9f, 0xcf, 0xa1, 0x85, + 0xac, 0x35, 0xb2, 0x8a, 0xf2, 0x89, 0xdf, 0x13, 0x6d, 0x6e, 0x6d, 0x06, 0x3f, 0xac, 0xba, 0x7e, + 0x8f, 0x02, 0xad, 0xbc, 0x8d, 0x4a, 0x7e, 0x3c, 0x0c, 0x3a, 0x89, 0xa8, 0xca, 0xf5, 0x59, 0xb8, + 0xa8, 0x8c, 0x92, 0x0a, 0x0e, 0xb2, 0x8e, 0x0a, 0x1d, 0x3f, 0xe6, 0x4a, 0x2f, 0xad, 0x7f, 0x30, + 0x0b, 0x27, 0xcd, 0x8f, 0x03, 0xca, 0xa8, 0x81, 0xcb, 0x5e, 0x34, 0x1a, 0x30, 0xdf, 0xcd, 0xc8, + 0xa5, 0x16, 0x8d, 0x06, 0x94, 0x51, 0x83, 0x5d, 0x3d, 0x08, 0xc9, 0xa8, 0x5a, 0x9c, 0xdd, 0xae, + 0x3a, 0xa3, 0xa4, 0x82, 0x03, 0x68, 0x34, 0x88, 0xa2, 0x2e, 0xab, 0xdd, 0x19, 0x35, 0x6a, 0x46, + 0x51, 0x97, 0x32, 0x6a, 0xd0, 0x28, 0x3c, 0x18, 0x3c, 0x09, 0x46, 0xd5, 0xb9, 0xd9, 0x35, 0x32, + 0x19, 0x25, 0x15, 0x1c, 0x80, 0xd7, 0x30, 0x18, 0xc5, 0x51, 0x58, 0x2d, 0xcf, 0xce, 0xcb, 0x66, + 0x94, 0x54, 0x70, 0x60, 0xbc, 0x46, 0x30, 0x89, 0xab, 0xf3, 0x2f, 0xc1, 0x8b, 0x51, 0x52, 0xc1, + 0x41, 0x7e, 0x84, 0x2a, 0xa3, 0xa0, 0xd3, 0x1f, 0x8e, 0xa2, 0x4e, 0x3f, 0x39, 0xac, 0x22, 0xc6, + 0xf0, 0xa3, 0x59, 0x18, 0xd2, 0x09, 0x39, 0xcd, 0xf2, 0x92, 0xeb, 0xa8, 0x98, 0x04, 0x61, 0x1c, + 0x54, 0x2b, 0x8c, 0xe9, 0xcd, 0x99, 0xb2, 0x1d, 0x08, 0x29, 0xa7, 0x07, 0x46, 0x4f, 0xa3, 0x7e, + 0x27, 0xa8, 0x2e, 0xcc, 0xce, 0xa8, 0x05, 0x84, 0x94, 0xd3, 0x2b, 0x5f, 0x4b, 0x28, 0xef, 0xfa, + 0xbd, 0xe9, 0x96, 0x3a, 0x87, 0xf2, 0xaa, 0xbe, 0x8d, 0x25, 0xbe, 0xb0, 0x71, 0x8e, 0x2f, 0x5a, + 0x38, 0x0f, 0x33, 0x5c, 0xb3, 0xcc, 0x6d, 0x5c, 0x00, 0x90, 0x4e, 0xa0, 0x71, 0x96, 0x51, 0xc1, + 0xb4, 0x3c, 0x13, 0x97, 0x00, 0x64, 0x7a, 0x4d, 0x3c, 0x07, 0x20, 0x9b, 0x5a, 0x26, 0x2e, 0x03, + 0xc8, 0xa6, 0x2e, 0x9e, 0x87, 0x5e, 0x6a, 0x7b, 0xa6, 0xe6, 0x62, 0x04, 0x5f, 0x5b, 0x84, 0x6e, + 0xe2, 0x8a, 0x5c, 0x44, 0xd2, 0x2e, 0x5e, 0x80, 0x6f, 0x6a, 0xad, 0x66, 0xec, 0xe2, 0x45, 0xc5, + 0x42, 0x25, 0x5e, 0x90, 0xb2, 0x8c, 0x96, 0x54, 0x38, 0x4d, 0xb8, 0xed, 0x89, 0x62, 0x70, 0xa2, + 0x20, 0xb4, 0x46, 0x34, 0xd7, 0x68, 0x11, 0x2c, 0x41, 0x87, 0x37, 0x9a, 0x19, 0x48, 0x0e, 0xda, + 0xba, 0x4d, 0xad, 0x3a, 0x25, 0x8e, 0x03, 0x80, 0xbc, 0xf2, 0x2f, 0x09, 0x15, 0xa0, 0x30, 0x01, + 0x57, 0x53, 0x1d, 0x32, 0xcd, 0x4d, 0xd5, 0x34, 0xcf, 0x51, 0x05, 0xb7, 0x45, 0x34, 0xaf, 0xea, + 0xa0, 0x99, 0xa1, 0x36, 0x70, 0x8e, 0x0f, 0x84, 0xa6, 0xdd, 0x20, 0x4d, 0x62, 0x32, 0x8c, 0x3c, + 0xcc, 0x1a, 0x9d, 0x63, 0x17, 0x60, 0xd6, 0xd4, 0x89, 0x69, 0xb0, 0x5d, 0x91, 0x69, 0x62, 0x3a, + 0x2e, 0xf5, 0x00, 0x59, 0x6d, 0xe0, 0xd2, 0x64, 0x16, 0xb5, 0x08, 0x9e, 0x03, 0x59, 0xa6, 0xd5, + 0x34, 0x4c, 0xbe, 0x2f, 0x83, 0xbf, 0xad, 0xcd, 0x86, 0xf1, 0xd0, 0x23, 0x78, 0x1e, 0x04, 0xdb, + 0x2a, 0x75, 0x39, 0x2f, 0x04, 0x82, 0x6d, 0x4a, 0x6c, 0xcb, 0x31, 0x60, 0x6c, 0xa9, 0x0d, 0x5c, + 0x01, 0x67, 0x50, 0x52, 0x6b, 0x90, 0x5d, 0xa3, 0x45, 0xda, 0x60, 0x06, 0x5e, 0x00, 0x34, 0x4a, + 0x1a, 0x8c, 0x21, 0x07, 0x2d, 0x82, 0xcc, 0x56, 0x2a, 0x73, 0x49, 0xf9, 0x56, 0x42, 0x05, 0xe8, + 0x26, 0xa0, 0x5c, 0xcd, 0xa2, 0xcd, 0x8c, 0xe9, 0x0b, 0xa8, 0xac, 0xea, 0xa0, 0x90, 0xda, 0x10, + 0x86, 0x7b, 0xbb, 0x46, 0xc3, 0x50, 0xe9, 0x23, 0x9c, 0x03, 0x61, 0x19, 0xc3, 0x1f, 0x13, 0x8a, + 0xf3, 0x8c, 0x85, 0x61, 0xaa, 0x8d, 0x36, 0x31, 0x75, 0xc3, 0xac, 0xe3, 0x02, 0xf8, 0xa2, 0x4e, + 0xa8, 0x67, 0xea, 0xb8, 0x08, 0x6b, 0x4a, 0xd4, 0x86, 0xe1, 0x70, 0xbb, 0x0d, 0x2a, 0x76, 0x73, + 0x10, 0x5a, 0x67, 0xcb, 0xa2, 0x2e, 0x2e, 0x43, 0xd8, 0x1b, 0x96, 0x59, 0xe7, 0xb9, 0x60, 0x51, + 0x9d, 0x50, 0x8c, 0x00, 0x5b, 0x1c, 0x19, 0x35, 0x5c, 0x51, 0x08, 0x2a, 0xf1, 0xb6, 0x05, 0x3a, + 0xd4, 0x89, 0xa9, 0x13, 0x3a, 0xad, 0x74, 0x8d, 0x34, 0x0d, 0xd3, 0x30, 0x45, 0xb4, 0x9a, 0xaa, + 0xa3, 0x79, 0x0d, 0xd8, 0xe6, 0x40, 0x05, 0x93, 0x78, 0x2e, 0x28, 0xab, 0x7c, 0x89, 0x0a, 0xd0, + 0xb3, 0x40, 0xe9, 0xa6, 0x65, 0xe9, 0x19, 0x16, 0x17, 0x10, 0xd6, 0x2c, 0x53, 0x17, 0x8e, 0x6d, + 0xc3, 0x57, 0x2c, 0x41, 0x70, 0x58, 0x1a, 0xa9, 0x22, 0x89, 0x60, 0x6f, 0xea, 0x86, 0x70, 0x64, + 0x1e, 0x3c, 0x6d, 0x98, 0x2e, 0xa1, 0xd4, 0xaa, 0xa7, 0xd1, 0xaf, 0xa0, 0xb9, 0x6d, 0x8f, 0xe7, + 0x58, 0x11, 0x92, 0xce, 0xf1, 0x36, 0xb7, 0x21, 0xbd, 0x01, 0x50, 0x52, 0x3e, 0x43, 0x25, 0xde, + 0xec, 0xc0, 0x0e, 0xd3, 0x6b, 0x6e, 0x1e, 0xb5, 0xc3, 0x31, 0xcc, 0xba, 0xd7, 0x50, 0x29, 0x96, + 0xd8, 0xf9, 0xa5, 0xe1, 0x51, 0x96, 0x72, 0x65, 0x54, 0xd0, 0x3d, 0xb5, 0x81, 0xf3, 0x8a, 0x8b, + 0x4a, 0xbc, 0xc5, 0x01, 0x07, 0x7e, 0xbe, 0xc9, 0x70, 0x98, 0x47, 0xc5, 0x9a, 0x41, 0x1d, 0x97, + 0x93, 0x3b, 0x04, 0x6c, 0xc2, 0x39, 0x00, 0xbb, 0x5b, 0x06, 0xd5, 0x71, 0x1e, 0x0c, 0x9d, 0x24, + 0x8c, 0x38, 0x1f, 0x15, 0x94, 0x3b, 0xa8, 0xc4, 0x9b, 0x1d, 0xe3, 0x4a, 0x2d, 0x7b, 0x4a, 0x2f, + 0xd0, 0x84, 0xc1, 0xb8, 0x4b, 0x4c, 0xcb, 0x6d, 0x8b, 0x7d, 0x4e, 0xd9, 0x46, 0x95, 0x4c, 0x57, + 0x93, 0x2f, 0xa1, 0xf3, 0x94, 0x68, 0x86, 0x4d, 0x2d, 0xcd, 0x70, 0x1f, 0x4d, 0xd7, 0x54, 0xfa, + 0x81, 0xa5, 0x16, 0xd8, 0x6f, 0x99, 0xed, 0x0c, 0x2c, 0xa7, 0xc4, 0xa8, 0xc8, 0x9a, 0x19, 0xf8, + 0xd5, 0x25, 0xe6, 0x54, 0x4d, 0xbe, 0x86, 0xce, 0x65, 0x03, 0xc4, 0x3e, 0x73, 0x2b, 0x6b, 0x9e, + 0xeb, 0x51, 0xc2, 0x9d, 0x64, 0xab, 0x8e, 0x8b, 0xf3, 0x10, 0x04, 0x9b, 0x12, 0x87, 0x1f, 0xe8, + 0x16, 0xd1, 0xfc, 0xb8, 0x17, 0xe0, 0x22, 0xbf, 0x7c, 0x78, 0xe9, 0xbe, 0xa4, 0x6c, 0xa2, 0x22, + 0x6b, 0x7c, 0x20, 0xb4, 0x65, 0x19, 0x1a, 0x99, 0x36, 0x5c, 0xd5, 0x26, 0x4d, 0x40, 0x53, 0xd3, + 0x9e, 0x90, 0x63, 0x22, 0xd4, 0xb4, 0x97, 0xfc, 0xbe, 0x8c, 0x96, 0xa6, 0x4f, 0x4d, 0xf2, 0x0a, + 0xc2, 0x9f, 0x07, 0x7e, 0xb7, 0x9d, 0xc0, 0xd9, 0xb0, 0xdd, 0x0f, 0xbb, 0xc1, 0x33, 0x76, 0x94, + 0x29, 0xd2, 0x25, 0x80, 0xb3, 0x23, 0xa3, 0x01, 0x50, 0xd9, 0x40, 0xc5, 0x7d, 0xff, 0x49, 0xb0, + 0x2f, 0xce, 0x28, 0x1b, 0x33, 0x9d, 0xce, 0x56, 0x1b, 0x40, 0x4a, 0x39, 0x07, 0xe5, 0xd7, 0x73, + 0xa8, 0xc8, 0x00, 0xcf, 0x9d, 0x84, 0xd5, 0xcd, 0x4d, 0x4a, 0x5a, 0x58, 0x62, 0x2d, 0x15, 0x8a, + 0x98, 0x67, 0x85, 0xaa, 0xb7, 0xb4, 0x06, 0xef, 0x5f, 0xaa, 0xde, 0x6a, 0x5a, 0x3a, 0x2e, 0x80, + 0x1b, 0x55, 0x58, 0x15, 0x19, 0x82, 0x6d, 0x5b, 0x50, 0xbc, 0x00, 0x74, 0x5d, 0x8a, 0xe7, 0x58, + 0xc7, 0xf7, 0x76, 0x79, 0xa7, 0x52, 0xbd, 0x5d, 0x70, 0x02, 0x9e, 0x97, 0x4b, 0x28, 0xa7, 0x69, + 0x18, 0x01, 0x89, 0xc6, 0xd8, 0x57, 0xc6, 0x13, 0x81, 0xb5, 0x71, 0x0d, 0xea, 0x00, 0x2f, 0x32, + 0x2f, 0xc2, 0x92, 0x91, 0x2d, 0xf1, 0x59, 0x61, 0xe3, 0xb3, 0xe9, 0xd0, 0xc0, 0x80, 0xa0, 0x1b, + 0x8e, 0x66, 0x79, 0xd4, 0x21, 0xf8, 0x1c, 0x4b, 0x7c, 0x6b, 0x73, 0x1b, 0xcb, 0xb0, 0x22, 0xbb, + 0x76, 0x03, 0x9f, 0x67, 0x0d, 0xd6, 0x22, 0xce, 0x8e, 0xe1, 0x6e, 0xe1, 0x0b, 0x00, 0x37, 0x00, + 0xe3, 0x35, 0x58, 0x35, 0x55, 0xfa, 0x00, 0x5f, 0x04, 0x6e, 0xcd, 0x1d, 0x82, 0x2f, 0xf1, 0x45, + 0x0b, 0x57, 0xd9, 0x04, 0x22, 0x75, 0xfc, 0x7f, 0xa0, 0xa8, 0x69, 0xe2, 0x65, 0x60, 0x62, 0xda, + 0xc2, 0xe6, 0xff, 0x07, 0x0d, 0x4d, 0xa6, 0xe1, 0xeb, 0xa0, 0x80, 0x39, 0xd6, 0xf0, 0x8d, 0x74, + 0x74, 0xbd, 0xc9, 0xfa, 0x08, 0x2b, 0x58, 0xfc, 0x16, 0x8c, 0x27, 0x1b, 0x5f, 0x16, 0xed, 0x59, + 0x75, 0xd5, 0x5d, 0xc3, 0xc1, 0x6f, 0xf3, 0x94, 0xa0, 0x2e, 0x70, 0x54, 0xd8, 0x58, 0x63, 0x8e, + 0xb8, 0xc2, 0xf2, 0x12, 0x34, 0x7c, 0x87, 0xaf, 0x1c, 0x07, 0x5f, 0x65, 0xb8, 0x96, 0xe3, 0x82, + 0x4e, 0x3f, 0x12, 0xe9, 0xca, 0xb0, 0xaf, 0x8d, 0x37, 0xe6, 0x36, 0x5e, 0xe1, 0x95, 0x47, 0xc0, + 0x33, 0xef, 0xf2, 0xd9, 0x49, 0x6a, 0xf8, 0xba, 0x58, 0xd9, 0xf8, 0x06, 0x93, 0x42, 0x2d, 0xb3, + 0x81, 0xdf, 0x4b, 0x07, 0xea, 0xfb, 0x60, 0xa1, 0xed, 0xe0, 0x55, 0xb0, 0xf0, 0xa1, 0xa7, 0x9a, + 0x4c, 0x9f, 0x35, 0xc0, 0xa4, 0x1a, 0x2c, 0x3f, 0x80, 0x0f, 0x6c, 0x49, 0x49, 0x03, 0xdf, 0x64, + 0x1f, 0x74, 0x6a, 0xd9, 0x78, 0x1d, 0x58, 0x80, 0x80, 0x0d, 0xd0, 0x81, 0x92, 0xa6, 0xa9, 0x9a, + 0x2e, 0xbe, 0xc5, 0x2b, 0x17, 0xec, 0x34, 0x75, 0xaf, 0x89, 0x3f, 0x04, 0xe9, 0xd4, 0xb2, 0x5c, + 0x7c, 0x1b, 0x56, 0x0e, 0x38, 0xe7, 0x23, 0xb6, 0xf2, 0x6a, 0x35, 0x7c, 0x07, 0x56, 0x4c, 0xe2, + 0x8f, 0x59, 0xd3, 0xb1, 0x6c, 0x43, 0xc3, 0x77, 0xd9, 0x60, 0x07, 0xe0, 0xbd, 0xa9, 0x41, 0x74, + 0x1f, 0x50, 0x76, 0x99, 0xd9, 0x1f, 0xb3, 0x76, 0xe5, 0xb1, 0x59, 0xff, 0x09, 0xa3, 0x34, 0xdc, + 0x06, 0xc1, 0x9f, 0xf2, 0x79, 0xd4, 0xb2, 0xb7, 0x80, 0xfa, 0x33, 0x91, 0x72, 0x50, 0x86, 0x58, + 0x65, 0xd9, 0xe9, 0xed, 0xb6, 0x5a, 0x78, 0x13, 0x96, 0x3a, 0x93, 0xaa, 0x01, 0x4a, 0xcd, 0xa2, + 0xc4, 0xa8, 0x9b, 0x58, 0x07, 0x57, 0x3c, 0xd8, 0xc1, 0x84, 0x4d, 0x18, 0xc3, 0x71, 0x71, 0x8d, + 0x9f, 0x49, 0x9a, 0x1a, 0xae, 0xb3, 0x04, 0xb0, 0x9a, 0x3c, 0x2f, 0xb7, 0x60, 0x22, 0xa4, 0x3b, + 0x16, 0x78, 0x83, 0x61, 0x7a, 0x4d, 0x0d, 0x6f, 0x83, 0x5b, 0x34, 0xcb, 0xc6, 0x0f, 0xc0, 0x13, + 0xba, 0xe1, 0xb0, 0xe1, 0x4d, 0x74, 0xdc, 0x60, 0xa5, 0xe0, 0xd8, 0xb8, 0x09, 0xb8, 0x75, 0x10, + 0x6f, 0xb2, 0x15, 0xc4, 0xda, 0x02, 0x83, 0x0c, 0xb3, 0x06, 0x50, 0x9b, 0xa5, 0x21, 0x71, 0xf0, + 0x43, 0x96, 0x67, 0xcc, 0x60, 0xaa, 0x7c, 0x9d, 0x43, 0x8b, 0x53, 0x97, 0xea, 0xef, 0x77, 0x81, + 0x24, 0x53, 0xcf, 0x0f, 0x37, 0x67, 0xb9, 0xcd, 0x67, 0x5f, 0x21, 0xa6, 0xae, 0xf3, 0xf9, 0x97, + 0x7f, 0x2c, 0xf9, 0x40, 0xdc, 0xc8, 0x31, 0x5a, 0x10, 0x0f, 0x40, 0xc7, 0x0d, 0x13, 0x84, 0x4a, + 0x9a, 0xd5, 0x6c, 0xc2, 0xa5, 0x5c, 0xa9, 0xa3, 0x72, 0x6a, 0x92, 0x5c, 0x9d, 0x3c, 0x50, 0xf1, + 0xfb, 0xff, 0xf8, 0x79, 0xea, 0x6d, 0xb4, 0xf0, 0x24, 0xe8, 0xf5, 0xc3, 0x76, 0xb4, 0xb7, 0x17, + 0x07, 0xfc, 0x5e, 0x57, 0xa4, 0x15, 0x06, 0xb3, 0x18, 0x48, 0x69, 0xa0, 0x8b, 0xda, 0xbe, 0x1f, + 0xc7, 0xfd, 0xbd, 0x7e, 0x87, 0xbd, 0xbf, 0x69, 0x7e, 0x12, 0xf4, 0xa2, 0xd1, 0xf1, 0xcf, 0x36, + 0x6f, 0x22, 0xd4, 0x89, 0xc2, 0xbd, 0x7e, 0x97, 0xbd, 0x93, 0xf0, 0xbb, 0x6a, 0x06, 0xa2, 0xfc, + 0x4e, 0x42, 0x97, 0xd4, 0xd0, 0xdf, 0x3f, 0xfc, 0x69, 0x30, 0x31, 0x34, 0xf8, 0xc9, 0x41, 0x10, + 0x27, 0xb2, 0x86, 0xca, 0x5d, 0xf1, 0xbc, 0x76, 0xca, 0xa0, 0xa5, 0xaf, 0x71, 0x74, 0x4c, 0x28, + 0xdb, 0x68, 0x31, 0x08, 0x3b, 0x51, 0xb7, 0x1f, 0xf6, 0xda, 0x99, 0x08, 0xde, 0x38, 0x31, 0x82, + 0x9c, 0x86, 0xc5, 0x6e, 0x21, 0xc8, 0xec, 0x94, 0xbf, 0x4b, 0xa8, 0xfa, 0xbc, 0xca, 0xf1, 0x30, + 0x82, 0xd1, 0xba, 0x83, 0xe4, 0x54, 0x74, 0x7b, 0x12, 0x69, 0x69, 0xc6, 0x48, 0x9f, 0x4b, 0x79, + 0x4c, 0xee, 0xfc, 0xd9, 0xe7, 0xc0, 0xdc, 0xf4, 0x73, 0xa0, 0x4c, 0x78, 0x56, 0x81, 0x43, 0x63, + 0xf1, 0xb8, 0x75, 0xed, 0x14, 0xb2, 0x00, 0x9f, 0x4e, 0x28, 0x95, 0x3f, 0x4a, 0xe8, 0x0d, 0x61, + 0x18, 0x4f, 0xe0, 0xff, 0x95, 0x88, 0x7c, 0x89, 0xde, 0xfc, 0x2e, 0xbd, 0x45, 0x58, 0x54, 0x54, + 0x06, 0x58, 0xd2, 0x0f, 0xe2, 0xaa, 0xc4, 0x1c, 0x74, 0xf5, 0x54, 0x25, 0x4c, 0xc7, 0x64, 0x2f, + 0x0a, 0x00, 0x9c, 0xf8, 0x2f, 0x66, 0x35, 0xe8, 0x07, 0xf1, 0x0f, 0xdc, 0x65, 0xcf, 0xc6, 0x65, + 0x37, 0x51, 0xf8, 0xbf, 0xe3, 0xab, 0xdf, 0x4a, 0xe8, 0x42, 0x5a, 0x3e, 0x87, 0x61, 0xe2, 0x3f, + 0xfb, 0x81, 0x7b, 0xea, 0x4f, 0x12, 0x7a, 0xed, 0x88, 0xbe, 0xc2, 0x51, 0x53, 0x65, 0x27, 0xbd, + 0x6c, 0xd9, 0xc9, 0xf7, 0x51, 0x89, 0x9d, 0x62, 0xe3, 0x6a, 0x8e, 0xf1, 0x78, 0xe7, 0xa4, 0xc9, + 0x04, 0xc8, 0x54, 0xd0, 0x4c, 0xb9, 0x3a, 0x7f, 0xc4, 0xd5, 0x8f, 0xd1, 0x79, 0xd1, 0xaa, 0x0f, + 0xa1, 0xf7, 0xbf, 0x4a, 0x47, 0x2b, 0x03, 0x74, 0x61, 0x9a, 0xb7, 0x70, 0x8a, 0x87, 0x50, 0x87, + 0x0f, 0x84, 0x49, 0xfe, 0x7c, 0x78, 0x02, 0xfb, 0xe3, 0xe7, 0x09, 0xcd, 0x30, 0x52, 0x7e, 0x56, + 0x40, 0xe7, 0x55, 0xfe, 0xbb, 0x50, 0xf0, 0xaa, 0x6d, 0x91, 0x77, 0x50, 0x79, 0x2f, 0xf0, 0x93, + 0x83, 0x51, 0x10, 0x8b, 0x77, 0xe1, 0x7b, 0x27, 0x30, 0x39, 0x46, 0x95, 0xd5, 0x9a, 0x60, 0x41, + 0xc7, 0xcc, 0x9e, 0xcf, 0xc6, 0xfc, 0xf7, 0xcc, 0xc6, 0xe5, 0x7f, 0x4b, 0xa8, 0x9c, 0x0a, 0x92, + 0xaf, 0xa2, 0xa5, 0xe0, 0x59, 0x32, 0xf2, 0x3b, 0x49, 0x3b, 0x66, 0xa9, 0xc9, 0x5c, 0x50, 0xa6, + 0x8b, 0x02, 0xca, 0xf3, 0x55, 0x7e, 0x17, 0xe1, 0x14, 0x6d, 0x5c, 0xd8, 0x39, 0x86, 0x78, 0x56, + 0xc0, 0xd3, 0x1e, 0x20, 0xdf, 0x47, 0xcb, 0x29, 0xea, 0x31, 0x63, 0x2c, 0xcf, 0x88, 0xaa, 0x02, + 0x43, 0x7f, 0x6e, 0x46, 0xdd, 0x41, 0xd5, 0x29, 0x41, 0x87, 0x19, 0xda, 0x02, 0xa3, 0xbd, 0x98, + 0x15, 0x38, 0xe9, 0xd3, 0xf2, 0x15, 0xb4, 0xd8, 0x11, 0xd9, 0xd4, 0x66, 0x87, 0xb4, 0x12, 0x43, + 0x5f, 0xe8, 0x64, 0x52, 0x4c, 0xf9, 0x4d, 0x1e, 0x3a, 0x47, 0xd6, 0xf1, 0x3f, 0xa4, 0x42, 0xcc, + 0xb6, 0xcd, 0xfc, 0xcb, 0xb5, 0xcd, 0xe3, 0x0f, 0x0f, 0x85, 0x57, 0x7b, 0x78, 0x28, 0x1e, 0x39, + 0x3c, 0x4c, 0x17, 0x6c, 0xe9, 0x15, 0x15, 0xec, 0xf5, 0x3b, 0x68, 0x21, 0x9b, 0xc6, 0xfc, 0x66, + 0x60, 0x12, 0x7c, 0x06, 0x56, 0x9e, 0x5b, 0xbb, 0xc3, 0x2f, 0xcb, 0x9e, 0x5b, 0xbb, 0x79, 0x9b, + 0x5f, 0x96, 0x3d, 0xb7, 0xb6, 0xb1, 0x8e, 0xf3, 0xeb, 0x7f, 0x29, 0xa3, 0xb3, 0x0d, 0x21, 0xd1, + 0xe1, 0xbf, 0x21, 0xcb, 0x7f, 0x90, 0x10, 0x3e, 0x7a, 0xe6, 0x92, 0x6f, 0x9f, 0x58, 0xa4, 0xc7, + 0x9e, 0x2b, 0x97, 0x3f, 0x9a, 0x99, 0x8e, 0xe7, 0x99, 0xb2, 0xfa, 0xd5, 0xdf, 0xfe, 0xf1, 0x4d, + 0x6e, 0x45, 0xb9, 0x32, 0xfe, 0xb1, 0x3b, 0x75, 0x75, 0x7c, 0xd7, 0x3f, 0x42, 0x74, 0x57, 0xba, + 0x2e, 0x7f, 0x2b, 0xa1, 0xb3, 0x47, 0xa6, 0xac, 0xfc, 0xe1, 0xe9, 0x84, 0x1f, 0x39, 0x46, 0x2c, + 0xdf, 0x9e, 0x95, 0x4c, 0xa8, 0xfc, 0x3e, 0x53, 0xf9, 0x9a, 0xa2, 0x7c, 0xb7, 0xca, 0x29, 0x0d, + 0x68, 0xfc, 0xd7, 0x23, 0x07, 0x99, 0x4c, 0x89, 0xde, 0x9f, 0x41, 0x83, 0xe7, 0x4e, 0x8e, 0xcb, + 0x1f, 0xbf, 0x24, 0xb5, 0x30, 0xe3, 0x16, 0x33, 0x63, 0x55, 0x79, 0xf7, 0x04, 0x33, 0x0e, 0xa7, + 0xfc, 0xff, 0x2b, 0x09, 0x2d, 0x4e, 0x8d, 0x6e, 0x79, 0xe3, 0x94, 0xa1, 0xcf, 0x1e, 0x4c, 0x96, + 0x6f, 0xcd, 0x46, 0x24, 0x54, 0xbe, 0xc1, 0x54, 0xbe, 0xaa, 0x5c, 0x7e, 0x41, 0xb2, 0x30, 0x0a, + 0xd0, 0xf4, 0x97, 0x12, 0x5a, 0xc8, 0x8e, 0x53, 0x79, 0xfd, 0x74, 0x15, 0x98, 0x9d, 0xeb, 0xcb, + 0x1b, 0x33, 0xd1, 0x08, 0x35, 0xaf, 0x33, 0x35, 0xdf, 0x51, 0xde, 0x3a, 0x46, 0xcd, 0x6c, 0xf7, + 0x4d, 0xb5, 0xcc, 0x36, 0xe0, 0x13, 0xb5, 0x3c, 0x66, 0x4c, 0x2e, 0x6f, 0xcc, 0x44, 0x73, 0x0a, + 0x2d, 0xfd, 0x0c, 0xc1, 0x5d, 0xe9, 0xfa, 0xe6, 0x57, 0x12, 0x7a, 0xbb, 0x13, 0x0d, 0x5e, 0x2c, + 0x66, 0xf3, 0xc2, 0x91, 0x16, 0x63, 0x8f, 0xa2, 0x24, 0xb2, 0xa5, 0xc7, 0x44, 0x90, 0xf5, 0x22, + 0x20, 0x59, 0x8d, 0x46, 0xbd, 0xb5, 0x5e, 0x10, 0xb2, 0xff, 0x89, 0xac, 0xf1, 0x4f, 0xfe, 0xb0, + 0x1f, 0x7f, 0xc7, 0x9f, 0x5f, 0xee, 0xa5, 0x80, 0x27, 0x25, 0x46, 0xb1, 0xf1, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xcc, 0x93, 0x36, 0x44, 0x2d, 0x23, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/job_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/job_service.pb.go new file mode 100644 index 000000000..b99add0ec --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/job_service.pb.go @@ -0,0 +1,1822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1/job_service.proto + +/* +Package ml is a generated protocol buffer package. + +It is generated from these files: + google/cloud/ml/v1/job_service.proto + google/cloud/ml/v1/model_service.proto + google/cloud/ml/v1/operation_metadata.proto + google/cloud/ml/v1/prediction_service.proto + google/cloud/ml/v1/project_service.proto + +It has these top-level messages: + TrainingInput + HyperparameterSpec + ParameterSpec + HyperparameterOutput + TrainingOutput + PredictionInput + PredictionOutput + Job + CreateJobRequest + ListJobsRequest + ListJobsResponse + GetJobRequest + CancelJobRequest + Model + Version + ManualScaling + CreateModelRequest + ListModelsRequest + ListModelsResponse + GetModelRequest + DeleteModelRequest + CreateVersionRequest + ListVersionsRequest + ListVersionsResponse + GetVersionRequest + DeleteVersionRequest + SetDefaultVersionRequest + OperationMetadata + PredictRequest + GetConfigRequest + GetConfigResponse +*/ +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A scale tier is an abstract representation of the resources Cloud ML +// will allocate to a training job. When selecting a scale tier for your +// training job, you should consider the size of your training dataset and +// the complexity of your model. As the tiers increase, virtual machines are +// added to handle your job, and the individual machines in the cluster +// generally have more memory and greater processing power than they do at +// lower tiers. The number of training units charged per hour of processing +// increases as tiers get more advanced. Refer to the +// [pricing guide](/ml/pricing) for more details. Note that in addition to +// incurring costs, your use of training resources is constrained by the +// [quota policy](/ml/quota). +type TrainingInput_ScaleTier int32 + +const ( + // A single worker instance. This tier is suitable for learning how to use + // Cloud ML, and for experimenting with new models using small datasets. + TrainingInput_BASIC TrainingInput_ScaleTier = 0 + // Many workers and a few parameter servers. + TrainingInput_STANDARD_1 TrainingInput_ScaleTier = 1 + // A large number of workers with many parameter servers. + TrainingInput_PREMIUM_1 TrainingInput_ScaleTier = 3 + // A single worker instance [with a GPU](ml/docs/how-tos/using-gpus). + TrainingInput_BASIC_GPU TrainingInput_ScaleTier = 6 + // The CUSTOM tier is not a set tier, but rather enables you to use your + // own cluster specification. When you use this tier, set values to + // configure your processing cluster according to these guidelines: + // + // * You _must_ set `TrainingInput.masterType` to specify the type + // of machine to use for your master node. This is the only required + // setting. + // + // * You _may_ set `TrainingInput.workerCount` to specify the number of + // workers to use. If you specify one or more workers, you _must_ also + // set `TrainingInput.workerType` to specify the type of machine to use + // for your worker nodes. + // + // * You _may_ set `TrainingInput.parameterServerCount` to specify the + // number of parameter servers to use. If you specify one or more + // parameter servers, you _must_ also set + // `TrainingInput.parameterServerType` to specify the type of machine to + // use for your parameter servers. + // + // Note that all of your workers must use the same machine type, which can + // be different from your parameter server type and master type. Your + // parameter servers must likewise use the same machine type, which can be + // different from your worker type and master type. + TrainingInput_CUSTOM TrainingInput_ScaleTier = 5 +) + +var TrainingInput_ScaleTier_name = map[int32]string{ + 0: "BASIC", + 1: "STANDARD_1", + 3: "PREMIUM_1", + 6: "BASIC_GPU", + 5: "CUSTOM", +} +var TrainingInput_ScaleTier_value = map[string]int32{ + "BASIC": 0, + "STANDARD_1": 1, + "PREMIUM_1": 3, + "BASIC_GPU": 6, + "CUSTOM": 5, +} + +func (x TrainingInput_ScaleTier) String() string { + return proto.EnumName(TrainingInput_ScaleTier_name, int32(x)) +} +func (TrainingInput_ScaleTier) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// The available types of optimization goals. +type HyperparameterSpec_GoalType int32 + +const ( + // Goal Type will default to maximize. + HyperparameterSpec_GOAL_TYPE_UNSPECIFIED HyperparameterSpec_GoalType = 0 + // Maximize the goal metric. + HyperparameterSpec_MAXIMIZE HyperparameterSpec_GoalType = 1 + // Minimize the goal metric. + HyperparameterSpec_MINIMIZE HyperparameterSpec_GoalType = 2 +) + +var HyperparameterSpec_GoalType_name = map[int32]string{ + 0: "GOAL_TYPE_UNSPECIFIED", + 1: "MAXIMIZE", + 2: "MINIMIZE", +} +var HyperparameterSpec_GoalType_value = map[string]int32{ + "GOAL_TYPE_UNSPECIFIED": 0, + "MAXIMIZE": 1, + "MINIMIZE": 2, +} + +func (x HyperparameterSpec_GoalType) String() string { + return proto.EnumName(HyperparameterSpec_GoalType_name, int32(x)) +} +func (HyperparameterSpec_GoalType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +// The type of the parameter. +type ParameterSpec_ParameterType int32 + +const ( + // You must specify a valid type. Using this unspecified type will result in + // an error. + ParameterSpec_PARAMETER_TYPE_UNSPECIFIED ParameterSpec_ParameterType = 0 + // Type for real-valued parameters. + ParameterSpec_DOUBLE ParameterSpec_ParameterType = 1 + // Type for integral parameters. + ParameterSpec_INTEGER ParameterSpec_ParameterType = 2 + // The parameter is categorical, with a value chosen from the categories + // field. + ParameterSpec_CATEGORICAL ParameterSpec_ParameterType = 3 + // The parameter is real valued, with a fixed set of feasible points. If + // `type==DISCRETE`, feasible_points must be provided, and + // {`min_value`, `max_value`} will be ignored. + ParameterSpec_DISCRETE ParameterSpec_ParameterType = 4 +) + +var ParameterSpec_ParameterType_name = map[int32]string{ + 0: "PARAMETER_TYPE_UNSPECIFIED", + 1: "DOUBLE", + 2: "INTEGER", + 3: "CATEGORICAL", + 4: "DISCRETE", +} +var ParameterSpec_ParameterType_value = map[string]int32{ + "PARAMETER_TYPE_UNSPECIFIED": 0, + "DOUBLE": 1, + "INTEGER": 2, + "CATEGORICAL": 3, + "DISCRETE": 4, +} + +func (x ParameterSpec_ParameterType) String() string { + return proto.EnumName(ParameterSpec_ParameterType_name, int32(x)) +} +func (ParameterSpec_ParameterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +// The type of scaling that should be applied to this parameter. +type ParameterSpec_ScaleType int32 + +const ( + // By default, no scaling is applied. + ParameterSpec_NONE ParameterSpec_ScaleType = 0 + // Scales the feasible space to (0, 1) linearly. + ParameterSpec_UNIT_LINEAR_SCALE ParameterSpec_ScaleType = 1 + // Scales the feasible space logarithmically to (0, 1). The entire feasible + // space must be strictly positive. + ParameterSpec_UNIT_LOG_SCALE ParameterSpec_ScaleType = 2 + // Scales the feasible space "reverse" logarithmically to (0, 1). The result + // is that values close to the top of the feasible space are spread out more + // than points near the bottom. The entire feasible space must be strictly + // positive. + ParameterSpec_UNIT_REVERSE_LOG_SCALE ParameterSpec_ScaleType = 3 +) + +var ParameterSpec_ScaleType_name = map[int32]string{ + 0: "NONE", + 1: "UNIT_LINEAR_SCALE", + 2: "UNIT_LOG_SCALE", + 3: "UNIT_REVERSE_LOG_SCALE", +} +var ParameterSpec_ScaleType_value = map[string]int32{ + "NONE": 0, + "UNIT_LINEAR_SCALE": 1, + "UNIT_LOG_SCALE": 2, + "UNIT_REVERSE_LOG_SCALE": 3, +} + +func (x ParameterSpec_ScaleType) String() string { + return proto.EnumName(ParameterSpec_ScaleType_name, int32(x)) +} +func (ParameterSpec_ScaleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +// The format used to separate data instances in the source files. +type PredictionInput_DataFormat int32 + +const ( + // Unspecified format. + PredictionInput_DATA_FORMAT_UNSPECIFIED PredictionInput_DataFormat = 0 + // The source file is a text file with instances separated by the + // new-line character. + PredictionInput_TEXT PredictionInput_DataFormat = 1 + // The source file is a TFRecord file. + PredictionInput_TF_RECORD PredictionInput_DataFormat = 2 + // The source file is a GZIP-compressed TFRecord file. + PredictionInput_TF_RECORD_GZIP PredictionInput_DataFormat = 3 +) + +var PredictionInput_DataFormat_name = map[int32]string{ + 0: "DATA_FORMAT_UNSPECIFIED", + 1: "TEXT", + 2: "TF_RECORD", + 3: "TF_RECORD_GZIP", +} +var PredictionInput_DataFormat_value = map[string]int32{ + "DATA_FORMAT_UNSPECIFIED": 0, + "TEXT": 1, + "TF_RECORD": 2, + "TF_RECORD_GZIP": 3, +} + +func (x PredictionInput_DataFormat) String() string { + return proto.EnumName(PredictionInput_DataFormat_name, int32(x)) +} +func (PredictionInput_DataFormat) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + +// Describes the job state. +type Job_State int32 + +const ( + // The job state is unspecified. + Job_STATE_UNSPECIFIED Job_State = 0 + // The job has been just created and processing has not yet begun. + Job_QUEUED Job_State = 1 + // The service is preparing to run the job. + Job_PREPARING Job_State = 2 + // The job is in progress. + Job_RUNNING Job_State = 3 + // The job completed successfully. + Job_SUCCEEDED Job_State = 4 + // The job failed. + // `error_message` should contain the details of the failure. + Job_FAILED Job_State = 5 + // The job is being cancelled. + // `error_message` should describe the reason for the cancellation. + Job_CANCELLING Job_State = 6 + // The job has been cancelled. + // `error_message` should describe the reason for the cancellation. + Job_CANCELLED Job_State = 7 +) + +var Job_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "QUEUED", + 2: "PREPARING", + 3: "RUNNING", + 4: "SUCCEEDED", + 5: "FAILED", + 6: "CANCELLING", + 7: "CANCELLED", +} +var Job_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "QUEUED": 1, + "PREPARING": 2, + "RUNNING": 3, + "SUCCEEDED": 4, + "FAILED": 5, + "CANCELLING": 6, + "CANCELLED": 7, +} + +func (x Job_State) String() string { + return proto.EnumName(Job_State_name, int32(x)) +} +func (Job_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Represents input parameters for a training job. +type TrainingInput struct { + // Required. Specifies the machine types, the number of replicas for workers + // and parameter servers. + ScaleTier TrainingInput_ScaleTier `protobuf:"varint,1,opt,name=scale_tier,json=scaleTier,enum=google.cloud.ml.v1.TrainingInput_ScaleTier" json:"scale_tier,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's master worker. + // + // The following types are supported: + // + //
+ //
standard
+ //
+ // A basic machine configuration suitable for training simple models with + // small to moderate datasets. + //
+ //
large_model
+ //
+ // A machine with a lot of memory, specially suited for parameter servers + // when your model is large (having many hidden layers or layers with very + // large numbers of nodes). + //
+ //
complex_model_s
+ //
+ // A machine suitable for the master and workers of the cluster when your + // model requires more computation than the standard machine can handle + // satisfactorily. + //
+ //
complex_model_m
+ //
+ // A machine with roughly twice the number of cores and roughly double the + // memory of complex_model_s. + //
+ //
complex_model_l
+ //
+ // A machine with roughly twice the number of cores and roughly double the + // memory of complex_model_m. + //
+ //
standard_gpu
+ //
+ // A machine equivalent to standard that + // also includes a + // + // GPU that you can use in your trainer. + //
+ //
complex_model_m_gpu
+ //
+ // A machine equivalent to + // coplex_model_m that also includes + // four GPUs. + //
+ //
+ // + // You must set this value when `scaleTier` is set to `CUSTOM`. + MasterType string `protobuf:"bytes,2,opt,name=master_type,json=masterType" json:"master_type,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's worker nodes. + // + // The supported values are the same as those described in the entry for + // `masterType`. + // + // This value must be present when `scaleTier` is set to `CUSTOM` and + // `workerCount` is greater than zero. + WorkerType string `protobuf:"bytes,3,opt,name=worker_type,json=workerType" json:"worker_type,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's parameter server. + // + // The supported values are the same as those described in the entry for + // `master_type`. + // + // This value must be present when `scaleTier` is set to `CUSTOM` and + // `parameter_server_count` is greater than zero. + ParameterServerType string `protobuf:"bytes,4,opt,name=parameter_server_type,json=parameterServerType" json:"parameter_server_type,omitempty"` + // Optional. The number of worker replicas to use for the training job. Each + // replica in the cluster will be of the type specified in `worker_type`. + // + // This value can only be used when `scale_tier` is set to `CUSTOM`. If you + // set this value, you must also set `worker_type`. + WorkerCount int64 `protobuf:"varint,5,opt,name=worker_count,json=workerCount" json:"worker_count,omitempty"` + // Optional. The number of parameter server replicas to use for the training + // job. Each replica in the cluster will be of the type specified in + // `parameter_server_type`. + // + // This value can only be used when `scale_tier` is set to `CUSTOM`.If you + // set this value, you must also set `parameter_server_type`. + ParameterServerCount int64 `protobuf:"varint,6,opt,name=parameter_server_count,json=parameterServerCount" json:"parameter_server_count,omitempty"` + // Required. The Google Cloud Storage location of the packages with + // the training program and any additional dependencies. + PackageUris []string `protobuf:"bytes,7,rep,name=package_uris,json=packageUris" json:"package_uris,omitempty"` + // Required. The Python module name to run after installing the packages. + PythonModule string `protobuf:"bytes,8,opt,name=python_module,json=pythonModule" json:"python_module,omitempty"` + // Optional. Command line arguments to pass to the program. + Args []string `protobuf:"bytes,10,rep,name=args" json:"args,omitempty"` + // Optional. The set of Hyperparameters to tune. + Hyperparameters *HyperparameterSpec `protobuf:"bytes,12,opt,name=hyperparameters" json:"hyperparameters,omitempty"` + // Required. The Google Compute Engine region to run the training job in. + Region string `protobuf:"bytes,14,opt,name=region" json:"region,omitempty"` + // Optional. A Google Cloud Storage path in which to store training outputs + // and other data needed for training. This path is passed to your TensorFlow + // program as the 'job_dir' command-line argument. The benefit of specifying + // this field is that Cloud ML validates the path for use in training. + JobDir string `protobuf:"bytes,16,opt,name=job_dir,json=jobDir" json:"job_dir,omitempty"` + // Optional. The Google Cloud ML runtime version to use for training. If not + // set, Google Cloud ML will choose the latest stable version. + RuntimeVersion string `protobuf:"bytes,15,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` +} + +func (m *TrainingInput) Reset() { *m = TrainingInput{} } +func (m *TrainingInput) String() string { return proto.CompactTextString(m) } +func (*TrainingInput) ProtoMessage() {} +func (*TrainingInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TrainingInput) GetScaleTier() TrainingInput_ScaleTier { + if m != nil { + return m.ScaleTier + } + return TrainingInput_BASIC +} + +func (m *TrainingInput) GetMasterType() string { + if m != nil { + return m.MasterType + } + return "" +} + +func (m *TrainingInput) GetWorkerType() string { + if m != nil { + return m.WorkerType + } + return "" +} + +func (m *TrainingInput) GetParameterServerType() string { + if m != nil { + return m.ParameterServerType + } + return "" +} + +func (m *TrainingInput) GetWorkerCount() int64 { + if m != nil { + return m.WorkerCount + } + return 0 +} + +func (m *TrainingInput) GetParameterServerCount() int64 { + if m != nil { + return m.ParameterServerCount + } + return 0 +} + +func (m *TrainingInput) GetPackageUris() []string { + if m != nil { + return m.PackageUris + } + return nil +} + +func (m *TrainingInput) GetPythonModule() string { + if m != nil { + return m.PythonModule + } + return "" +} + +func (m *TrainingInput) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *TrainingInput) GetHyperparameters() *HyperparameterSpec { + if m != nil { + return m.Hyperparameters + } + return nil +} + +func (m *TrainingInput) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *TrainingInput) GetJobDir() string { + if m != nil { + return m.JobDir + } + return "" +} + +func (m *TrainingInput) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +// Represents a set of hyperparameters to optimize. +type HyperparameterSpec struct { + // Required. The type of goal to use for tuning. Available types are + // `MAXIMIZE` and `MINIMIZE`. + // + // Defaults to `MAXIMIZE`. + Goal HyperparameterSpec_GoalType `protobuf:"varint,1,opt,name=goal,enum=google.cloud.ml.v1.HyperparameterSpec_GoalType" json:"goal,omitempty"` + // Required. The set of parameters to tune. + Params []*ParameterSpec `protobuf:"bytes,2,rep,name=params" json:"params,omitempty"` + // Optional. How many training trials should be attempted to optimize + // the specified hyperparameters. + // + // Defaults to one. + MaxTrials int32 `protobuf:"varint,3,opt,name=max_trials,json=maxTrials" json:"max_trials,omitempty"` + // Optional. The number of training trials to run concurrently. + // You can reduce the time it takes to perform hyperparameter tuning by adding + // trials in parallel. However, each trail only benefits from the information + // gained in completed trials. That means that a trial does not get access to + // the results of trials running at the same time, which could reduce the + // quality of the overall optimization. + // + // Each trial will use the same scale tier and machine types. + // + // Defaults to one. + MaxParallelTrials int32 `protobuf:"varint,4,opt,name=max_parallel_trials,json=maxParallelTrials" json:"max_parallel_trials,omitempty"` + // Optional. The Tensorflow summary tag name to use for optimizing trials. For + // current versions of Tensorflow, this tag name should exactly match what is + // shown in Tensorboard, including all scopes. For versions of Tensorflow + // prior to 0.12, this should be only the tag passed to tf.Summary. + // By default, "training/hptuning/metric" will be used. + HyperparameterMetricTag string `protobuf:"bytes,5,opt,name=hyperparameter_metric_tag,json=hyperparameterMetricTag" json:"hyperparameter_metric_tag,omitempty"` +} + +func (m *HyperparameterSpec) Reset() { *m = HyperparameterSpec{} } +func (m *HyperparameterSpec) String() string { return proto.CompactTextString(m) } +func (*HyperparameterSpec) ProtoMessage() {} +func (*HyperparameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HyperparameterSpec) GetGoal() HyperparameterSpec_GoalType { + if m != nil { + return m.Goal + } + return HyperparameterSpec_GOAL_TYPE_UNSPECIFIED +} + +func (m *HyperparameterSpec) GetParams() []*ParameterSpec { + if m != nil { + return m.Params + } + return nil +} + +func (m *HyperparameterSpec) GetMaxTrials() int32 { + if m != nil { + return m.MaxTrials + } + return 0 +} + +func (m *HyperparameterSpec) GetMaxParallelTrials() int32 { + if m != nil { + return m.MaxParallelTrials + } + return 0 +} + +func (m *HyperparameterSpec) GetHyperparameterMetricTag() string { + if m != nil { + return m.HyperparameterMetricTag + } + return "" +} + +// Represents a single hyperparameter to optimize. +type ParameterSpec struct { + // Required. The parameter name must be unique amongst all ParameterConfigs in + // a HyperparameterSpec message. E.g., "learning_rate". + ParameterName string `protobuf:"bytes,1,opt,name=parameter_name,json=parameterName" json:"parameter_name,omitempty"` + // Required. The type of the parameter. + Type ParameterSpec_ParameterType `protobuf:"varint,4,opt,name=type,enum=google.cloud.ml.v1.ParameterSpec_ParameterType" json:"type,omitempty"` + // Required if type is `DOUBLE` or `INTEGER`. This field + // should be unset if type is `CATEGORICAL`. This value should be integers if + // type is INTEGER. + MinValue float64 `protobuf:"fixed64,2,opt,name=min_value,json=minValue" json:"min_value,omitempty"` + // Required if typeis `DOUBLE` or `INTEGER`. This field + // should be unset if type is `CATEGORICAL`. This value should be integers if + // type is `INTEGER`. + MaxValue float64 `protobuf:"fixed64,3,opt,name=max_value,json=maxValue" json:"max_value,omitempty"` + // Required if type is `CATEGORICAL`. The list of possible categories. + CategoricalValues []string `protobuf:"bytes,5,rep,name=categorical_values,json=categoricalValues" json:"categorical_values,omitempty"` + // Required if type is `DISCRETE`. + // A list of feasible points. + // The list should be in strictly increasing order. For instance, this + // parameter might have possible settings of 1.5, 2.5, and 4.0. This list + // should not contain more than 1,000 values. + DiscreteValues []float64 `protobuf:"fixed64,6,rep,packed,name=discrete_values,json=discreteValues" json:"discrete_values,omitempty"` + // Optional. How the parameter should be scaled to the hypercube. + // Leave unset for categorical parameters. + // Some kind of scaling is strongly recommended for real or integral + // parameters (e.g., `UNIT_LINEAR_SCALE`). + ScaleType ParameterSpec_ScaleType `protobuf:"varint,7,opt,name=scale_type,json=scaleType,enum=google.cloud.ml.v1.ParameterSpec_ScaleType" json:"scale_type,omitempty"` +} + +func (m *ParameterSpec) Reset() { *m = ParameterSpec{} } +func (m *ParameterSpec) String() string { return proto.CompactTextString(m) } +func (*ParameterSpec) ProtoMessage() {} +func (*ParameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ParameterSpec) GetParameterName() string { + if m != nil { + return m.ParameterName + } + return "" +} + +func (m *ParameterSpec) GetType() ParameterSpec_ParameterType { + if m != nil { + return m.Type + } + return ParameterSpec_PARAMETER_TYPE_UNSPECIFIED +} + +func (m *ParameterSpec) GetMinValue() float64 { + if m != nil { + return m.MinValue + } + return 0 +} + +func (m *ParameterSpec) GetMaxValue() float64 { + if m != nil { + return m.MaxValue + } + return 0 +} + +func (m *ParameterSpec) GetCategoricalValues() []string { + if m != nil { + return m.CategoricalValues + } + return nil +} + +func (m *ParameterSpec) GetDiscreteValues() []float64 { + if m != nil { + return m.DiscreteValues + } + return nil +} + +func (m *ParameterSpec) GetScaleType() ParameterSpec_ScaleType { + if m != nil { + return m.ScaleType + } + return ParameterSpec_NONE +} + +// Represents the result of a single hyperparameter tuning trial from a +// training job. The TrainingOutput object that is returned on successful +// completion of a training job with hyperparameter tuning includes a list +// of HyperparameterOutput objects, one for each successful trial. +type HyperparameterOutput struct { + // The trial id for these results. + TrialId string `protobuf:"bytes,1,opt,name=trial_id,json=trialId" json:"trial_id,omitempty"` + // The hyperparameters given to this trial. + Hyperparameters map[string]string `protobuf:"bytes,2,rep,name=hyperparameters" json:"hyperparameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The final objective metric seen for this trial. + FinalMetric *HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,3,opt,name=final_metric,json=finalMetric" json:"final_metric,omitempty"` + // All recorded object metrics for this trial. + AllMetrics []*HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,4,rep,name=all_metrics,json=allMetrics" json:"all_metrics,omitempty"` +} + +func (m *HyperparameterOutput) Reset() { *m = HyperparameterOutput{} } +func (m *HyperparameterOutput) String() string { return proto.CompactTextString(m) } +func (*HyperparameterOutput) ProtoMessage() {} +func (*HyperparameterOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *HyperparameterOutput) GetTrialId() string { + if m != nil { + return m.TrialId + } + return "" +} + +func (m *HyperparameterOutput) GetHyperparameters() map[string]string { + if m != nil { + return m.Hyperparameters + } + return nil +} + +func (m *HyperparameterOutput) GetFinalMetric() *HyperparameterOutput_HyperparameterMetric { + if m != nil { + return m.FinalMetric + } + return nil +} + +func (m *HyperparameterOutput) GetAllMetrics() []*HyperparameterOutput_HyperparameterMetric { + if m != nil { + return m.AllMetrics + } + return nil +} + +// An observed value of a metric. +type HyperparameterOutput_HyperparameterMetric struct { + // The global training step for this metric. + TrainingStep int64 `protobuf:"varint,1,opt,name=training_step,json=trainingStep" json:"training_step,omitempty"` + // The objective value at this training step. + ObjectiveValue float64 `protobuf:"fixed64,2,opt,name=objective_value,json=objectiveValue" json:"objective_value,omitempty"` +} + +func (m *HyperparameterOutput_HyperparameterMetric) Reset() { + *m = HyperparameterOutput_HyperparameterMetric{} +} +func (m *HyperparameterOutput_HyperparameterMetric) String() string { return proto.CompactTextString(m) } +func (*HyperparameterOutput_HyperparameterMetric) ProtoMessage() {} +func (*HyperparameterOutput_HyperparameterMetric) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *HyperparameterOutput_HyperparameterMetric) GetTrainingStep() int64 { + if m != nil { + return m.TrainingStep + } + return 0 +} + +func (m *HyperparameterOutput_HyperparameterMetric) GetObjectiveValue() float64 { + if m != nil { + return m.ObjectiveValue + } + return 0 +} + +// Represents results of a training job. Output only. +type TrainingOutput struct { + // The number of hyperparameter tuning trials that completed successfully. + // Only set for hyperparameter tuning jobs. + CompletedTrialCount int64 `protobuf:"varint,1,opt,name=completed_trial_count,json=completedTrialCount" json:"completed_trial_count,omitempty"` + // Results for individual Hyperparameter trials. + // Only set for hyperparameter tuning jobs. + Trials []*HyperparameterOutput `protobuf:"bytes,2,rep,name=trials" json:"trials,omitempty"` + // The amount of ML units consumed by the job. + ConsumedMlUnits float64 `protobuf:"fixed64,3,opt,name=consumed_ml_units,json=consumedMlUnits" json:"consumed_ml_units,omitempty"` + // Whether this job is a hyperparameter tuning job. + IsHyperparameterTuningJob bool `protobuf:"varint,4,opt,name=is_hyperparameter_tuning_job,json=isHyperparameterTuningJob" json:"is_hyperparameter_tuning_job,omitempty"` +} + +func (m *TrainingOutput) Reset() { *m = TrainingOutput{} } +func (m *TrainingOutput) String() string { return proto.CompactTextString(m) } +func (*TrainingOutput) ProtoMessage() {} +func (*TrainingOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *TrainingOutput) GetCompletedTrialCount() int64 { + if m != nil { + return m.CompletedTrialCount + } + return 0 +} + +func (m *TrainingOutput) GetTrials() []*HyperparameterOutput { + if m != nil { + return m.Trials + } + return nil +} + +func (m *TrainingOutput) GetConsumedMlUnits() float64 { + if m != nil { + return m.ConsumedMlUnits + } + return 0 +} + +func (m *TrainingOutput) GetIsHyperparameterTuningJob() bool { + if m != nil { + return m.IsHyperparameterTuningJob + } + return false +} + +// Represents input parameters for a prediction job. +type PredictionInput struct { + // Required. The model or the version to use for prediction. + // + // Types that are valid to be assigned to ModelVersion: + // *PredictionInput_ModelName + // *PredictionInput_VersionName + // *PredictionInput_Uri + ModelVersion isPredictionInput_ModelVersion `protobuf_oneof:"model_version"` + // Required. The format of the input data files. + DataFormat PredictionInput_DataFormat `protobuf:"varint,3,opt,name=data_format,json=dataFormat,enum=google.cloud.ml.v1.PredictionInput_DataFormat" json:"data_format,omitempty"` + // Required. The Google Cloud Storage location of the input data files. + // May contain wildcards. + InputPaths []string `protobuf:"bytes,4,rep,name=input_paths,json=inputPaths" json:"input_paths,omitempty"` + // Required. The output Google Cloud Storage location. + OutputPath string `protobuf:"bytes,5,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` + // Optional. The maximum number of workers to be used for parallel processing. + // Defaults to 10 if not specified. + MaxWorkerCount int64 `protobuf:"varint,6,opt,name=max_worker_count,json=maxWorkerCount" json:"max_worker_count,omitempty"` + // Required. The Google Compute Engine region to run the prediction job in. + Region string `protobuf:"bytes,7,opt,name=region" json:"region,omitempty"` + // Optional. The Google Cloud ML runtime version to use for this batch + // prediction. If not set, Google Cloud ML will pick the runtime version used + // during the CreateVersion request for this model version, or choose the + // latest stable version when model version information is not available + // such as when the model is specified by uri. + RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` +} + +func (m *PredictionInput) Reset() { *m = PredictionInput{} } +func (m *PredictionInput) String() string { return proto.CompactTextString(m) } +func (*PredictionInput) ProtoMessage() {} +func (*PredictionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isPredictionInput_ModelVersion interface { + isPredictionInput_ModelVersion() +} + +type PredictionInput_ModelName struct { + ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,oneof"` +} +type PredictionInput_VersionName struct { + VersionName string `protobuf:"bytes,2,opt,name=version_name,json=versionName,oneof"` +} +type PredictionInput_Uri struct { + Uri string `protobuf:"bytes,9,opt,name=uri,oneof"` +} + +func (*PredictionInput_ModelName) isPredictionInput_ModelVersion() {} +func (*PredictionInput_VersionName) isPredictionInput_ModelVersion() {} +func (*PredictionInput_Uri) isPredictionInput_ModelVersion() {} + +func (m *PredictionInput) GetModelVersion() isPredictionInput_ModelVersion { + if m != nil { + return m.ModelVersion + } + return nil +} + +func (m *PredictionInput) GetModelName() string { + if x, ok := m.GetModelVersion().(*PredictionInput_ModelName); ok { + return x.ModelName + } + return "" +} + +func (m *PredictionInput) GetVersionName() string { + if x, ok := m.GetModelVersion().(*PredictionInput_VersionName); ok { + return x.VersionName + } + return "" +} + +func (m *PredictionInput) GetUri() string { + if x, ok := m.GetModelVersion().(*PredictionInput_Uri); ok { + return x.Uri + } + return "" +} + +func (m *PredictionInput) GetDataFormat() PredictionInput_DataFormat { + if m != nil { + return m.DataFormat + } + return PredictionInput_DATA_FORMAT_UNSPECIFIED +} + +func (m *PredictionInput) GetInputPaths() []string { + if m != nil { + return m.InputPaths + } + return nil +} + +func (m *PredictionInput) GetOutputPath() string { + if m != nil { + return m.OutputPath + } + return "" +} + +func (m *PredictionInput) GetMaxWorkerCount() int64 { + if m != nil { + return m.MaxWorkerCount + } + return 0 +} + +func (m *PredictionInput) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *PredictionInput) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PredictionInput) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PredictionInput_OneofMarshaler, _PredictionInput_OneofUnmarshaler, _PredictionInput_OneofSizer, []interface{}{ + (*PredictionInput_ModelName)(nil), + (*PredictionInput_VersionName)(nil), + (*PredictionInput_Uri)(nil), + } +} + +func _PredictionInput_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PredictionInput) + // model_version + switch x := m.ModelVersion.(type) { + case *PredictionInput_ModelName: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ModelName) + case *PredictionInput_VersionName: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.VersionName) + case *PredictionInput_Uri: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("PredictionInput.ModelVersion has unexpected type %T", x) + } + return nil +} + +func _PredictionInput_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PredictionInput) + switch tag { + case 1: // model_version.model_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_ModelName{x} + return true, err + case 2: // model_version.version_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_VersionName{x} + return true, err + case 9: // model_version.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_Uri{x} + return true, err + default: + return false, nil + } +} + +func _PredictionInput_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PredictionInput) + // model_version + switch x := m.ModelVersion.(type) { + case *PredictionInput_ModelName: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ModelName))) + n += len(x.ModelName) + case *PredictionInput_VersionName: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.VersionName))) + n += len(x.VersionName) + case *PredictionInput_Uri: + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents results of a prediction job. +type PredictionOutput struct { + // The output Google Cloud Storage location provided at the job creation time. + OutputPath string `protobuf:"bytes,1,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` + // The number of generated predictions. + PredictionCount int64 `protobuf:"varint,2,opt,name=prediction_count,json=predictionCount" json:"prediction_count,omitempty"` + // The number of data instances which resulted in errors. + ErrorCount int64 `protobuf:"varint,3,opt,name=error_count,json=errorCount" json:"error_count,omitempty"` + // Node hours used by the batch prediction job. + NodeHours float64 `protobuf:"fixed64,4,opt,name=node_hours,json=nodeHours" json:"node_hours,omitempty"` +} + +func (m *PredictionOutput) Reset() { *m = PredictionOutput{} } +func (m *PredictionOutput) String() string { return proto.CompactTextString(m) } +func (*PredictionOutput) ProtoMessage() {} +func (*PredictionOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PredictionOutput) GetOutputPath() string { + if m != nil { + return m.OutputPath + } + return "" +} + +func (m *PredictionOutput) GetPredictionCount() int64 { + if m != nil { + return m.PredictionCount + } + return 0 +} + +func (m *PredictionOutput) GetErrorCount() int64 { + if m != nil { + return m.ErrorCount + } + return 0 +} + +func (m *PredictionOutput) GetNodeHours() float64 { + if m != nil { + return m.NodeHours + } + return 0 +} + +// Represents a training or prediction job. +type Job struct { + // Required. The user-specified id of the job. + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId" json:"job_id,omitempty"` + // Required. Parameters to create a job. + // + // Types that are valid to be assigned to Input: + // *Job_TrainingInput + // *Job_PredictionInput + Input isJob_Input `protobuf_oneof:"input"` + // Output only. When the job was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. When the job processing was started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Output only. When the job processing was completed. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Output only. The detailed state of a job. + State Job_State `protobuf:"varint,7,opt,name=state,enum=google.cloud.ml.v1.Job_State" json:"state,omitempty"` + // Output only. The details of a failure or a cancellation. + ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` + // Output only. The current result of the job. + // + // Types that are valid to be assigned to Output: + // *Job_TrainingOutput + // *Job_PredictionOutput + Output isJob_Output `protobuf_oneof:"output"` +} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isJob_Input interface { + isJob_Input() +} +type isJob_Output interface { + isJob_Output() +} + +type Job_TrainingInput struct { + TrainingInput *TrainingInput `protobuf:"bytes,2,opt,name=training_input,json=trainingInput,oneof"` +} +type Job_PredictionInput struct { + PredictionInput *PredictionInput `protobuf:"bytes,3,opt,name=prediction_input,json=predictionInput,oneof"` +} +type Job_TrainingOutput struct { + TrainingOutput *TrainingOutput `protobuf:"bytes,9,opt,name=training_output,json=trainingOutput,oneof"` +} +type Job_PredictionOutput struct { + PredictionOutput *PredictionOutput `protobuf:"bytes,10,opt,name=prediction_output,json=predictionOutput,oneof"` +} + +func (*Job_TrainingInput) isJob_Input() {} +func (*Job_PredictionInput) isJob_Input() {} +func (*Job_TrainingOutput) isJob_Output() {} +func (*Job_PredictionOutput) isJob_Output() {} + +func (m *Job) GetInput() isJob_Input { + if m != nil { + return m.Input + } + return nil +} +func (m *Job) GetOutput() isJob_Output { + if m != nil { + return m.Output + } + return nil +} + +func (m *Job) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *Job) GetTrainingInput() *TrainingInput { + if x, ok := m.GetInput().(*Job_TrainingInput); ok { + return x.TrainingInput + } + return nil +} + +func (m *Job) GetPredictionInput() *PredictionInput { + if x, ok := m.GetInput().(*Job_PredictionInput); ok { + return x.PredictionInput + } + return nil +} + +func (m *Job) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Job) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Job) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Job) GetState() Job_State { + if m != nil { + return m.State + } + return Job_STATE_UNSPECIFIED +} + +func (m *Job) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func (m *Job) GetTrainingOutput() *TrainingOutput { + if x, ok := m.GetOutput().(*Job_TrainingOutput); ok { + return x.TrainingOutput + } + return nil +} + +func (m *Job) GetPredictionOutput() *PredictionOutput { + if x, ok := m.GetOutput().(*Job_PredictionOutput); ok { + return x.PredictionOutput + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ + (*Job_TrainingInput)(nil), + (*Job_PredictionInput)(nil), + (*Job_TrainingOutput)(nil), + (*Job_PredictionOutput)(nil), + } +} + +func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Job) + // input + switch x := m.Input.(type) { + case *Job_TrainingInput: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrainingInput); err != nil { + return err + } + case *Job_PredictionInput: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PredictionInput); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.Input has unexpected type %T", x) + } + // output + switch x := m.Output.(type) { + case *Job_TrainingOutput: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrainingOutput); err != nil { + return err + } + case *Job_PredictionOutput: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PredictionOutput); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.Output has unexpected type %T", x) + } + return nil +} + +func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Job) + switch tag { + case 2: // input.training_input + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrainingInput) + err := b.DecodeMessage(msg) + m.Input = &Job_TrainingInput{msg} + return true, err + case 3: // input.prediction_input + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PredictionInput) + err := b.DecodeMessage(msg) + m.Input = &Job_PredictionInput{msg} + return true, err + case 9: // output.training_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrainingOutput) + err := b.DecodeMessage(msg) + m.Output = &Job_TrainingOutput{msg} + return true, err + case 10: // output.prediction_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PredictionOutput) + err := b.DecodeMessage(msg) + m.Output = &Job_PredictionOutput{msg} + return true, err + default: + return false, nil + } +} + +func _Job_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Job) + // input + switch x := m.Input.(type) { + case *Job_TrainingInput: + s := proto.Size(x.TrainingInput) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PredictionInput: + s := proto.Size(x.PredictionInput) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // output + switch x := m.Output.(type) { + case *Job_TrainingOutput: + s := proto.Size(x.TrainingOutput) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PredictionOutput: + s := proto.Size(x.PredictionOutput) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for the CreateJob method. +type CreateJobRequest struct { + // Required. The project name. + // + // Authorization: requires `Editor` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The job to create. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *CreateJobRequest) Reset() { *m = CreateJobRequest{} } +func (m *CreateJobRequest) String() string { return proto.CompactTextString(m) } +func (*CreateJobRequest) ProtoMessage() {} +func (*CreateJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CreateJobRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Request message for the ListJobs method. +type ListJobsRequest struct { + // Required. The name of the project for which to list jobs. + // + // Authorization: requires `Viewer` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. Specifies the subset of jobs to retrieve. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of jobs to retrieve per "page" of results. If there + // are more remaining results than this number, the response message will + // contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } +func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobsRequest) ProtoMessage() {} +func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ListJobsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListJobs method. +type ListJobsResponse struct { + // The list of jobs. + Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } +func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobsResponse) ProtoMessage() {} +func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ListJobsResponse) GetJobs() []*Job { + if m != nil { + return m.Jobs + } + return nil +} + +func (m *ListJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetJob method. +type GetJobRequest struct { + // Required. The name of the job to get the description of. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } +func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobRequest) ProtoMessage() {} +func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *GetJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the CancelJob method. +type CancelJobRequest struct { + // Required. The name of the job to cancel. + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } +func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } +func (*CancelJobRequest) ProtoMessage() {} +func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CancelJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*TrainingInput)(nil), "google.cloud.ml.v1.TrainingInput") + proto.RegisterType((*HyperparameterSpec)(nil), "google.cloud.ml.v1.HyperparameterSpec") + proto.RegisterType((*ParameterSpec)(nil), "google.cloud.ml.v1.ParameterSpec") + proto.RegisterType((*HyperparameterOutput)(nil), "google.cloud.ml.v1.HyperparameterOutput") + proto.RegisterType((*HyperparameterOutput_HyperparameterMetric)(nil), "google.cloud.ml.v1.HyperparameterOutput.HyperparameterMetric") + proto.RegisterType((*TrainingOutput)(nil), "google.cloud.ml.v1.TrainingOutput") + proto.RegisterType((*PredictionInput)(nil), "google.cloud.ml.v1.PredictionInput") + proto.RegisterType((*PredictionOutput)(nil), "google.cloud.ml.v1.PredictionOutput") + proto.RegisterType((*Job)(nil), "google.cloud.ml.v1.Job") + proto.RegisterType((*CreateJobRequest)(nil), "google.cloud.ml.v1.CreateJobRequest") + proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.ml.v1.ListJobsRequest") + proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.ml.v1.ListJobsResponse") + proto.RegisterType((*GetJobRequest)(nil), "google.cloud.ml.v1.GetJobRequest") + proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.ml.v1.CancelJobRequest") + proto.RegisterEnum("google.cloud.ml.v1.TrainingInput_ScaleTier", TrainingInput_ScaleTier_name, TrainingInput_ScaleTier_value) + proto.RegisterEnum("google.cloud.ml.v1.HyperparameterSpec_GoalType", HyperparameterSpec_GoalType_name, HyperparameterSpec_GoalType_value) + proto.RegisterEnum("google.cloud.ml.v1.ParameterSpec_ParameterType", ParameterSpec_ParameterType_name, ParameterSpec_ParameterType_value) + proto.RegisterEnum("google.cloud.ml.v1.ParameterSpec_ScaleType", ParameterSpec_ScaleType_name, ParameterSpec_ScaleType_value) + proto.RegisterEnum("google.cloud.ml.v1.PredictionInput_DataFormat", PredictionInput_DataFormat_name, PredictionInput_DataFormat_value) + proto.RegisterEnum("google.cloud.ml.v1.Job_State", Job_State_name, Job_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for JobService service + +type JobServiceClient interface { + // Creates a training or a batch prediction job. + CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) + // Lists the jobs in the project. + ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) + // Describes a job. + GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) + // Cancels a running job. + CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type jobServiceClient struct { + cc *grpc.ClientConn +} + +func NewJobServiceClient(cc *grpc.ClientConn) JobServiceClient { + return &jobServiceClient{cc} +} + +func (c *jobServiceClient) CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.JobService/CreateJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { + out := new(ListJobsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.JobService/ListJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.JobService/GetJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.JobService/CancelJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for JobService service + +type JobServiceServer interface { + // Creates a training or a batch prediction job. + CreateJob(context.Context, *CreateJobRequest) (*Job, error) + // Lists the jobs in the project. + ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + // Describes a job. + GetJob(context.Context, *GetJobRequest) (*Job, error) + // Cancels a running job. + CancelJob(context.Context, *CancelJobRequest) (*google_protobuf1.Empty, error) +} + +func RegisterJobServiceServer(s *grpc.Server, srv JobServiceServer) { + s.RegisterService(&_JobService_serviceDesc, srv) +} + +func _JobService_CreateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).CreateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.JobService/CreateJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).CreateJob(ctx, req.(*CreateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).ListJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.JobService/ListJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).ListJobs(ctx, req.(*ListJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).GetJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.JobService/GetJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).GetJob(ctx, req.(*GetJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).CancelJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.JobService/CancelJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).CancelJob(ctx, req.(*CancelJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _JobService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1.JobService", + HandlerType: (*JobServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateJob", + Handler: _JobService_CreateJob_Handler, + }, + { + MethodName: "ListJobs", + Handler: _JobService_ListJobs_Handler, + }, + { + MethodName: "GetJob", + Handler: _JobService_GetJob_Handler, + }, + { + MethodName: "CancelJob", + Handler: _JobService_CancelJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1/job_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1/job_service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2070 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdb, 0x6e, 0x1b, 0xc9, + 0x11, 0x15, 0xaf, 0x22, 0x8b, 0x12, 0x39, 0x6e, 0x5b, 0x36, 0x4d, 0xdb, 0x6b, 0x79, 0xe4, 0x38, + 0xb2, 0x17, 0x21, 0x21, 0xed, 0x06, 0xc8, 0x7a, 0xb1, 0x48, 0x28, 0x72, 0x2c, 0x51, 0x10, 0x29, + 0xa6, 0x39, 0x74, 0x36, 0x46, 0x90, 0x49, 0x93, 0x6c, 0xd3, 0x23, 0xcf, 0x2d, 0x33, 0x4d, 0x45, + 0xda, 0x85, 0x81, 0x20, 0x08, 0xf2, 0x03, 0x79, 0x0f, 0xf2, 0x4d, 0xc9, 0x1f, 0x04, 0x01, 0xf2, + 0x01, 0x79, 0x0e, 0x10, 0xf4, 0x85, 0xc3, 0x8b, 0x28, 0xd9, 0x48, 0xde, 0xd8, 0xa7, 0x4e, 0x55, + 0x75, 0x57, 0x55, 0x57, 0xd7, 0x10, 0x9e, 0x8e, 0x7d, 0x7f, 0xec, 0xd0, 0xda, 0xd0, 0xf1, 0x27, + 0xa3, 0x9a, 0xeb, 0xd4, 0xce, 0xf7, 0x6a, 0x67, 0xfe, 0xc0, 0x8a, 0x68, 0x78, 0x6e, 0x0f, 0x69, + 0x35, 0x08, 0x7d, 0xe6, 0x23, 0x24, 0x59, 0x55, 0xc1, 0xaa, 0xba, 0x4e, 0xf5, 0x7c, 0xaf, 0xf2, + 0x50, 0x69, 0x92, 0xc0, 0xae, 0x11, 0xcf, 0xf3, 0x19, 0x61, 0xb6, 0xef, 0x45, 0x52, 0xa3, 0xb2, + 0x35, 0x2f, 0x9d, 0xb0, 0x77, 0x0a, 0x7e, 0xa0, 0x60, 0xb1, 0x1a, 0x4c, 0xde, 0xd6, 0xa8, 0x1b, + 0xb0, 0x4b, 0x25, 0x7c, 0xbc, 0x2c, 0x64, 0xb6, 0x4b, 0x23, 0x46, 0xdc, 0x40, 0x12, 0xf4, 0x3f, + 0x66, 0x60, 0xd3, 0x0c, 0x89, 0xed, 0xd9, 0xde, 0xb8, 0xe5, 0x05, 0x13, 0x86, 0x8e, 0x01, 0xa2, + 0x21, 0x71, 0xa8, 0xc5, 0x6c, 0x1a, 0x96, 0x13, 0xdb, 0x89, 0xdd, 0xe2, 0xfe, 0xe7, 0xd5, 0xab, + 0xbb, 0xad, 0x2e, 0xa8, 0x55, 0x7b, 0x5c, 0xc7, 0xb4, 0x69, 0x88, 0xf3, 0xd1, 0xf4, 0x27, 0x7a, + 0x0c, 0x05, 0x97, 0x44, 0x8c, 0x86, 0x16, 0xbb, 0x0c, 0x68, 0x39, 0xb9, 0x9d, 0xd8, 0xcd, 0x63, + 0x90, 0x90, 0x79, 0x19, 0x50, 0x4e, 0xf8, 0x9d, 0x1f, 0xbe, 0x9f, 0x12, 0x52, 0x92, 0x20, 0x21, + 0x41, 0xd8, 0x87, 0xad, 0x80, 0x84, 0xc4, 0xa5, 0xdc, 0x08, 0x8f, 0xe0, 0x94, 0x9a, 0x16, 0xd4, + 0xdb, 0xb1, 0xb0, 0x27, 0x64, 0x42, 0xe7, 0x09, 0x6c, 0x28, 0xa3, 0x43, 0x7f, 0xe2, 0xb1, 0x72, + 0x66, 0x3b, 0xb1, 0x9b, 0xc2, 0xca, 0x51, 0x83, 0x43, 0xe8, 0x4b, 0xb8, 0x7b, 0xc5, 0xac, 0x24, + 0x67, 0x05, 0xf9, 0xce, 0x92, 0x5d, 0xa9, 0xf5, 0x04, 0x36, 0x02, 0x32, 0x7c, 0x4f, 0xc6, 0xd4, + 0x9a, 0x84, 0x76, 0x54, 0x5e, 0xdf, 0x4e, 0xed, 0xe6, 0x71, 0x41, 0x61, 0xfd, 0xd0, 0x8e, 0xd0, + 0x0e, 0x6c, 0x06, 0x97, 0xec, 0x9d, 0xef, 0x59, 0xae, 0x3f, 0x9a, 0x38, 0xb4, 0x9c, 0x13, 0xfb, + 0xdc, 0x90, 0x60, 0x5b, 0x60, 0x08, 0x41, 0x9a, 0x84, 0xe3, 0xa8, 0x0c, 0x42, 0x5f, 0xfc, 0x46, + 0x5d, 0x28, 0xbd, 0xbb, 0x0c, 0x68, 0x18, 0x3b, 0x8e, 0xca, 0x1b, 0xdb, 0x89, 0xdd, 0xc2, 0xfe, + 0xb3, 0x55, 0xb1, 0x3f, 0x5a, 0xa0, 0xf6, 0x02, 0x3a, 0xc4, 0xcb, 0xea, 0xe8, 0x2e, 0x64, 0x43, + 0x3a, 0xb6, 0x7d, 0xaf, 0x5c, 0x14, 0x7b, 0x50, 0x2b, 0x74, 0x0f, 0xd6, 0x79, 0x39, 0x8e, 0xec, + 0xb0, 0xac, 0x49, 0xc1, 0x99, 0x3f, 0x68, 0xda, 0x21, 0xfa, 0x21, 0x94, 0xc2, 0x89, 0xc7, 0x2b, + 0xc4, 0x3a, 0xa7, 0x61, 0xc4, 0x35, 0x4b, 0x82, 0x50, 0x54, 0xf0, 0x6b, 0x89, 0xea, 0x5d, 0xc8, + 0xc7, 0xe9, 0x46, 0x79, 0xc8, 0x1c, 0xd4, 0x7b, 0xad, 0x86, 0xb6, 0x86, 0x8a, 0x00, 0x3d, 0xb3, + 0xde, 0x69, 0xd6, 0x71, 0xd3, 0xda, 0xd3, 0x12, 0x68, 0x13, 0xf2, 0x5d, 0x6c, 0xb4, 0x5b, 0xfd, + 0xb6, 0xb5, 0xa7, 0xa5, 0xf8, 0x52, 0x30, 0xad, 0xc3, 0x6e, 0x5f, 0xcb, 0x22, 0x80, 0x6c, 0xa3, + 0xdf, 0x33, 0x4f, 0xdb, 0x5a, 0x46, 0xff, 0x47, 0x12, 0xd0, 0xd5, 0x33, 0xa1, 0x06, 0xa4, 0xc7, + 0x3e, 0x71, 0x54, 0x15, 0xd6, 0x3e, 0x2d, 0x12, 0xd5, 0x43, 0x9f, 0x38, 0xbc, 0x10, 0xb0, 0x50, + 0x46, 0x5f, 0x41, 0x56, 0xc8, 0xa3, 0x72, 0x72, 0x3b, 0xb5, 0x5b, 0xd8, 0x7f, 0xb2, 0xca, 0x4c, + 0x77, 0x21, 0x96, 0x4a, 0x01, 0x3d, 0x02, 0x70, 0xc9, 0x85, 0xc5, 0x42, 0x9b, 0x38, 0x91, 0xa8, + 0xce, 0x0c, 0xce, 0xbb, 0xe4, 0xc2, 0x14, 0x00, 0xaa, 0xc2, 0x6d, 0x2e, 0xe6, 0x64, 0xc7, 0xa1, + 0xce, 0x94, 0x97, 0x16, 0xbc, 0x5b, 0x2e, 0xb9, 0xe8, 0x2a, 0x89, 0xe2, 0xbf, 0x84, 0xfb, 0x8b, + 0x49, 0xb2, 0x5c, 0xca, 0x42, 0x7b, 0x68, 0x31, 0x32, 0x16, 0x55, 0x9a, 0xc7, 0xf7, 0x16, 0x09, + 0x6d, 0x21, 0x37, 0xc9, 0x58, 0xaf, 0x43, 0x6e, 0x7a, 0x2e, 0x74, 0x1f, 0xb6, 0x0e, 0x4f, 0xeb, + 0x27, 0x96, 0xf9, 0xcb, 0xae, 0x61, 0xf5, 0x3b, 0xbd, 0xae, 0xd1, 0x68, 0xbd, 0x6a, 0x19, 0x4d, + 0x6d, 0x0d, 0x6d, 0x40, 0xae, 0x5d, 0xff, 0xb6, 0xd5, 0x6e, 0xbd, 0x31, 0xb4, 0x84, 0x58, 0xb5, + 0x3a, 0x72, 0x95, 0xd4, 0xff, 0x9a, 0x86, 0xcd, 0x85, 0x73, 0xa2, 0x1f, 0x40, 0x71, 0xb6, 0x17, + 0x8f, 0xb8, 0x54, 0x44, 0x3a, 0x8f, 0x37, 0x63, 0xb4, 0x43, 0x5c, 0xca, 0xd3, 0x10, 0xdf, 0xb9, + 0x6b, 0xd2, 0xb0, 0x60, 0x77, 0xb6, 0x92, 0x69, 0xe0, 0xca, 0xe8, 0x01, 0xe4, 0x5d, 0xdb, 0xb3, + 0xce, 0x89, 0x33, 0x91, 0x9d, 0x20, 0x81, 0x73, 0xae, 0xed, 0xbd, 0xe6, 0x6b, 0x21, 0x24, 0x17, + 0x4a, 0x98, 0x52, 0x42, 0x72, 0x21, 0x85, 0x3f, 0x02, 0x34, 0x24, 0x8c, 0x8e, 0xfd, 0xd0, 0x1e, + 0x12, 0x47, 0x92, 0xa2, 0x72, 0x46, 0x5c, 0x9e, 0x5b, 0x73, 0x12, 0xc1, 0x8e, 0x78, 0x19, 0x8f, + 0xec, 0x68, 0x18, 0x52, 0x46, 0xa7, 0xdc, 0xec, 0x76, 0x6a, 0x37, 0x81, 0x8b, 0x53, 0x58, 0x11, + 0x67, 0x9d, 0x8e, 0x1f, 0x6e, 0xfd, 0xfa, 0x4e, 0xb7, 0x78, 0x38, 0x59, 0xfa, 0xfc, 0x60, 0xaa, + 0xd3, 0x5d, 0x06, 0x54, 0x1f, 0xcf, 0x85, 0x56, 0xe4, 0xe8, 0x33, 0xa8, 0x74, 0xeb, 0xb8, 0xde, + 0x36, 0x4c, 0x03, 0xaf, 0x4a, 0x14, 0x40, 0xb6, 0x79, 0xda, 0x3f, 0x38, 0xe1, 0x69, 0x2a, 0xc0, + 0x7a, 0xab, 0x63, 0x1a, 0x87, 0x06, 0xd6, 0x92, 0xa8, 0x04, 0x85, 0x46, 0xdd, 0x34, 0x0e, 0x4f, + 0x71, 0xab, 0x51, 0x3f, 0xd1, 0x52, 0x3c, 0x89, 0xcd, 0x56, 0xaf, 0x81, 0x0d, 0xd3, 0xd0, 0xd2, + 0xfa, 0xaf, 0xa6, 0x77, 0x8f, 0x3b, 0xc9, 0x41, 0xba, 0x73, 0xda, 0x31, 0xb4, 0x35, 0xb4, 0x05, + 0xb7, 0xfa, 0x9d, 0x96, 0x69, 0x9d, 0xb4, 0x3a, 0x46, 0x1d, 0x5b, 0xbd, 0x46, 0x5d, 0x58, 0x46, + 0x50, 0x94, 0xf0, 0xe9, 0xa1, 0xc2, 0x92, 0xa8, 0x02, 0x77, 0x05, 0x86, 0x8d, 0xd7, 0x06, 0xee, + 0x19, 0x73, 0xb2, 0x94, 0xfe, 0xa7, 0x34, 0xdc, 0x59, 0xbc, 0x51, 0xa7, 0x13, 0xc6, 0x5f, 0x85, + 0xfb, 0x90, 0x13, 0xd5, 0x6d, 0xd9, 0x23, 0x55, 0x23, 0xeb, 0x62, 0xdd, 0x1a, 0xa1, 0xf1, 0xd5, + 0xce, 0x25, 0x2f, 0xda, 0x37, 0x1f, 0xbf, 0xaf, 0xd2, 0xfa, 0x12, 0x18, 0x19, 0x1e, 0x0b, 0x2f, + 0xaf, 0x36, 0xb4, 0xdf, 0xc0, 0xc6, 0x5b, 0xdb, 0x23, 0x8e, 0xba, 0x35, 0xa2, 0x4e, 0xfe, 0x77, + 0x2f, 0xf2, 0x6a, 0xe1, 0x82, 0x30, 0x29, 0x17, 0xe8, 0xd7, 0x50, 0x20, 0xce, 0xd4, 0x3e, 0xbf, + 0xc8, 0xa9, 0xff, 0xdf, 0x01, 0x10, 0x47, 0x99, 0x8f, 0x2a, 0xa3, 0xe5, 0xe8, 0x2a, 0xbf, 0x3b, + 0xb0, 0xc9, 0xd4, 0x6b, 0x6a, 0x45, 0x8c, 0x06, 0x22, 0xc4, 0x29, 0xbc, 0x31, 0x05, 0x7b, 0x8c, + 0x06, 0xbc, 0xae, 0xfd, 0xc1, 0x19, 0x1d, 0x32, 0xfb, 0x9c, 0x2e, 0x5c, 0xa3, 0x62, 0x0c, 0x8b, + 0xc2, 0xae, 0x1c, 0x2c, 0x7b, 0x91, 0x01, 0x45, 0x1a, 0xa4, 0xde, 0xd3, 0x4b, 0x95, 0x3e, 0xfe, + 0x13, 0xdd, 0x81, 0xcc, 0xcc, 0x50, 0x1e, 0xcb, 0xc5, 0xcb, 0xe4, 0x4f, 0x12, 0xfa, 0xbf, 0x13, + 0x50, 0x9c, 0x3e, 0xf0, 0xaa, 0x04, 0xf6, 0x61, 0x6b, 0xe8, 0xbb, 0x81, 0x43, 0x19, 0x1d, 0xc9, + 0x56, 0xa7, 0x9e, 0x4c, 0xb9, 0xd9, 0xdb, 0xb1, 0x50, 0x74, 0x3b, 0xf9, 0x62, 0xfe, 0x0c, 0xb2, + 0xaa, 0x29, 0xca, 0x92, 0xd8, 0xfd, 0xd4, 0x58, 0x62, 0xa5, 0x87, 0x5e, 0xc0, 0xad, 0xa1, 0xef, + 0x45, 0x13, 0x97, 0x8e, 0x2c, 0xd7, 0xb1, 0x26, 0x9e, 0xcd, 0x22, 0xd5, 0x21, 0x4a, 0x53, 0x41, + 0xdb, 0xe9, 0x73, 0x18, 0xfd, 0x14, 0x1e, 0xda, 0x91, 0xb5, 0xd4, 0x62, 0xd9, 0x44, 0x84, 0xf5, + 0xcc, 0x1f, 0x88, 0xfe, 0x95, 0xc3, 0xf7, 0xed, 0x68, 0xd1, 0xa3, 0x29, 0x18, 0xc7, 0xfe, 0x40, + 0xff, 0x5b, 0x0a, 0x4a, 0xdd, 0x90, 0x8e, 0xec, 0x21, 0x1f, 0xbc, 0xe4, 0x3c, 0xf4, 0x18, 0xc0, + 0xf5, 0x47, 0xd4, 0x99, 0xeb, 0x8f, 0x47, 0x6b, 0x38, 0x2f, 0x30, 0xd1, 0x1d, 0x77, 0x60, 0x43, + 0x3d, 0x97, 0x92, 0x92, 0x54, 0x94, 0x82, 0x42, 0x05, 0x09, 0x41, 0x6a, 0x12, 0xda, 0xe5, 0xbc, + 0x92, 0xf1, 0x05, 0x3a, 0x85, 0xc2, 0x88, 0x30, 0x62, 0xbd, 0xf5, 0x43, 0x97, 0x30, 0x71, 0xa8, + 0xe2, 0x7e, 0x75, 0x65, 0x03, 0x5a, 0xdc, 0x53, 0xb5, 0x49, 0x18, 0x79, 0x25, 0xb4, 0x30, 0x8c, + 0xe2, 0xdf, 0x7c, 0x9a, 0xb2, 0xb9, 0xdc, 0x0a, 0x08, 0x7b, 0x27, 0xcb, 0x37, 0x8f, 0x41, 0x40, + 0x5d, 0x8e, 0x70, 0x82, 0x2f, 0xc2, 0x2b, 0x18, 0xea, 0xc9, 0x01, 0x09, 0x71, 0x06, 0xda, 0x05, + 0x8d, 0xf7, 0xe1, 0x85, 0xf1, 0x49, 0x4e, 0x44, 0x45, 0x97, 0x5c, 0xfc, 0x62, 0x6e, 0x82, 0x9a, + 0x4d, 0x17, 0xeb, 0x0b, 0xd3, 0xc5, 0x8a, 0x21, 0x22, 0xb7, 0x72, 0x88, 0x78, 0x0d, 0x30, 0x3b, + 0x06, 0x7a, 0x00, 0xf7, 0x9a, 0x75, 0xb3, 0x6e, 0xbd, 0x3a, 0xc5, 0xed, 0xba, 0xb9, 0xd4, 0x2b, + 0x73, 0x90, 0x36, 0x8d, 0x6f, 0x4d, 0x39, 0x51, 0x98, 0xaf, 0x2c, 0x6c, 0x34, 0x4e, 0x71, 0x53, + 0x4b, 0xf2, 0xf6, 0x16, 0x2f, 0xad, 0xc3, 0x37, 0xad, 0xae, 0x96, 0x3a, 0x28, 0xc1, 0xa6, 0xcc, + 0x97, 0x72, 0xaf, 0xff, 0x25, 0x01, 0xda, 0x2c, 0x80, 0xaa, 0x98, 0x97, 0x22, 0x91, 0xb8, 0x12, + 0x89, 0xe7, 0xa0, 0x05, 0xb1, 0x92, 0x8a, 0x44, 0x52, 0x44, 0xa2, 0x34, 0xc3, 0x65, 0x28, 0x1e, + 0x43, 0x81, 0x86, 0xa1, 0x3f, 0x8d, 0x57, 0x4a, 0xb0, 0x40, 0x40, 0x92, 0xf0, 0x08, 0xc0, 0xf3, + 0x47, 0xd4, 0x7a, 0xe7, 0x4f, 0x42, 0x39, 0x1e, 0x24, 0x70, 0x9e, 0x23, 0x47, 0x1c, 0xd0, 0xff, + 0x93, 0x81, 0xd4, 0xb1, 0x3f, 0x40, 0x5b, 0xc0, 0x27, 0xb1, 0x59, 0x87, 0xcd, 0x9c, 0xf9, 0x83, + 0xd6, 0x08, 0x1d, 0x43, 0x31, 0x6e, 0x0e, 0x22, 0x97, 0x62, 0x1f, 0xd7, 0xcc, 0x31, 0x0b, 0x43, + 0xf9, 0xd1, 0x1a, 0x8e, 0xfb, 0x8a, 0x2c, 0xe6, 0xee, 0xc2, 0xa9, 0xa4, 0x35, 0xd9, 0x46, 0x77, + 0x3e, 0xa1, 0xee, 0x8e, 0xd6, 0xe6, 0x0f, 0x2f, 0x2d, 0x7e, 0x0d, 0x85, 0x61, 0x48, 0x09, 0xe3, + 0xdf, 0x0b, 0xae, 0x1c, 0x11, 0x0a, 0xfb, 0x95, 0xa9, 0xb1, 0xe9, 0x77, 0x47, 0xd5, 0x9c, 0x7e, + 0x77, 0x60, 0x90, 0x74, 0x0e, 0xa0, 0xaf, 0x00, 0x22, 0x46, 0x42, 0x26, 0x75, 0x33, 0x1f, 0xd5, + 0xcd, 0x0b, 0xb6, 0x50, 0xfd, 0x31, 0xe4, 0xa8, 0x37, 0x92, 0x8a, 0xd9, 0x8f, 0x2a, 0xae, 0x53, + 0x6f, 0x24, 0xd4, 0xbe, 0x80, 0x4c, 0xc4, 0x08, 0x9b, 0x3e, 0xf7, 0x8f, 0x56, 0x9d, 0xfa, 0xd8, + 0x1f, 0x54, 0x7b, 0x9c, 0x84, 0x25, 0x97, 0xb7, 0x67, 0x99, 0x60, 0x97, 0x46, 0x11, 0x19, 0xc7, + 0x43, 0xbd, 0x00, 0xdb, 0x12, 0x43, 0x6d, 0x28, 0xc5, 0x69, 0x92, 0x75, 0x24, 0x6e, 0x7b, 0x61, + 0x5f, 0xbf, 0x29, 0x4f, 0xb2, 0x1c, 0x8f, 0x12, 0x38, 0xce, 0xb1, 0x2a, 0xd0, 0x1e, 0xdc, 0x9a, + 0xcb, 0x94, 0x32, 0x08, 0xc2, 0xe0, 0xd3, 0x9b, 0x53, 0x15, 0x9b, 0x9c, 0x4b, 0xb5, 0xc4, 0xf4, + 0xdf, 0x27, 0x20, 0x23, 0x4e, 0xc6, 0xe7, 0x85, 0x9e, 0x59, 0x37, 0x57, 0x4c, 0x25, 0x3f, 0xef, + 0x1b, 0x7d, 0xa3, 0x19, 0x4f, 0xef, 0xdd, 0x3a, 0x6e, 0x75, 0x0e, 0xb5, 0x24, 0x1f, 0x52, 0x70, + 0xbf, 0xd3, 0xe1, 0x0b, 0x31, 0xca, 0xf7, 0xfa, 0x8d, 0x86, 0x61, 0x34, 0x8d, 0xa6, 0x96, 0xe6, + 0x6a, 0xaf, 0xea, 0xad, 0x13, 0xa3, 0xa9, 0x65, 0xf8, 0x47, 0x40, 0xa3, 0xde, 0x69, 0x18, 0x27, + 0x27, 0x9c, 0x9a, 0xe5, 0x54, 0xb5, 0x36, 0x9a, 0xda, 0xfa, 0xc1, 0x3a, 0x64, 0x44, 0xd9, 0x1d, + 0xe4, 0x20, 0x2b, 0x4f, 0xa5, 0xf7, 0x41, 0x6b, 0x88, 0x9a, 0x38, 0xf6, 0x07, 0x98, 0xfe, 0x76, + 0x42, 0x23, 0xd1, 0x5e, 0x02, 0x12, 0x52, 0xf5, 0xba, 0xe4, 0xb1, 0x5a, 0xa1, 0xe7, 0x90, 0xe2, + 0x9d, 0x5c, 0xde, 0x80, 0x7b, 0xd7, 0x64, 0x0f, 0x73, 0x8e, 0xfe, 0x01, 0x4a, 0x27, 0x76, 0xc4, + 0x8e, 0xfd, 0x41, 0xf4, 0x31, 0xab, 0x77, 0x21, 0xfb, 0xd6, 0x76, 0x18, 0x0d, 0xd5, 0x43, 0xa8, + 0x56, 0xfc, 0xe2, 0x06, 0xfc, 0x6b, 0x8f, 0xf9, 0xef, 0xa9, 0xa7, 0x3e, 0x39, 0xf3, 0x1c, 0x31, + 0x39, 0xc0, 0xa7, 0x56, 0x21, 0x8e, 0xec, 0xef, 0x64, 0xf5, 0x66, 0x70, 0x8e, 0x03, 0x3d, 0xfb, + 0x3b, 0x3e, 0x11, 0x6a, 0x33, 0xf7, 0x51, 0xe0, 0x7b, 0x11, 0x45, 0x9f, 0x43, 0xfa, 0xcc, 0x1f, + 0x44, 0xe5, 0x84, 0x78, 0x0c, 0xaf, 0xdd, 0xbe, 0x20, 0xa1, 0x67, 0x50, 0xf2, 0xe8, 0x05, 0x6f, + 0x50, 0xf1, 0x0e, 0xe4, 0xee, 0x36, 0x39, 0xdc, 0x9d, 0xee, 0x42, 0xdf, 0x81, 0xcd, 0x43, 0xca, + 0xe6, 0x62, 0x87, 0x20, 0x3d, 0x37, 0xcb, 0x8b, 0xdf, 0xfa, 0x33, 0xd0, 0x1a, 0xc4, 0x1b, 0x52, + 0xe7, 0x66, 0xde, 0xfe, 0xbf, 0x52, 0x00, 0xc7, 0xfe, 0xa0, 0x27, 0xff, 0xab, 0x40, 0x13, 0xc8, + 0xc7, 0xa9, 0x41, 0x2b, 0xeb, 0x6e, 0x39, 0x73, 0x95, 0xeb, 0x4e, 0xa5, 0x3f, 0xff, 0xc3, 0xdf, + 0xff, 0xf9, 0xe7, 0xe4, 0x8e, 0xfe, 0xb0, 0x76, 0xbe, 0x57, 0xfb, 0x5e, 0x46, 0xfe, 0x9b, 0x20, + 0xf4, 0xf9, 0xec, 0x12, 0xd5, 0x5e, 0x7c, 0xa8, 0xf1, 0x53, 0xbf, 0xe4, 0xa9, 0x43, 0xdf, 0x43, + 0x6e, 0x1a, 0x3b, 0xb4, 0xb2, 0x31, 0x2d, 0x25, 0xb6, 0xf2, 0xf4, 0x66, 0x92, 0x0c, 0xbf, 0xfe, + 0x54, 0xec, 0xe0, 0x33, 0x74, 0xe3, 0x0e, 0xd0, 0x19, 0x64, 0x65, 0x3c, 0xd1, 0xca, 0x0e, 0xbb, + 0x10, 0xeb, 0xeb, 0x4f, 0xbb, 0xe8, 0x8b, 0xc7, 0x76, 0xce, 0x93, 0x70, 0x54, 0x7b, 0xf1, 0x01, + 0x5d, 0x42, 0x3e, 0x4e, 0xcb, 0x35, 0xf1, 0x5d, 0xca, 0x5a, 0xe5, 0xee, 0x95, 0x36, 0x67, 0xb8, + 0x01, 0xbb, 0xd4, 0xab, 0xc2, 0xe1, 0xae, 0xbe, 0x73, 0x93, 0xc3, 0x97, 0x43, 0x61, 0xee, 0x65, + 0xe2, 0xc5, 0x01, 0x85, 0xca, 0xd0, 0x77, 0xaf, 0xb8, 0x24, 0x81, 0x5d, 0x3d, 0xdf, 0x3b, 0x28, + 0xcd, 0x8a, 0xa0, 0xcb, 0xfd, 0x74, 0x13, 0x6f, 0xbe, 0x54, 0xd4, 0xb1, 0xef, 0x10, 0x6f, 0x5c, + 0xf5, 0xc3, 0x71, 0x6d, 0x4c, 0x3d, 0xb1, 0x8b, 0x9a, 0x14, 0x91, 0xc0, 0x8e, 0xe6, 0xff, 0xf6, + 0xfa, 0xda, 0x75, 0x06, 0x59, 0x41, 0xf8, 0xe2, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x7b, + 0x72, 0xd5, 0x16, 0x13, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/model_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/model_service.pb.go new file mode 100644 index 000000000..c05f8adf5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/model_service.pb.go @@ -0,0 +1,1049 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1/model_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Represents a machine learning solution. +// +// A model can have multiple versions, each of which is a deployed, trained +// model ready to receive prediction requests. The model itself is just a +// container. +type Model struct { + // Required. The name specified for the model when it was created. + // + // The model name must be unique within the project it is created in. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The description specified for the model when it was created. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Output only. The default version of the model. This version will be used to + // handle prediction requests that do not specify a version. + // + // You can change the default version by calling + // [projects.methods.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault). + DefaultVersion *Version `protobuf:"bytes,3,opt,name=default_version,json=defaultVersion" json:"default_version,omitempty"` + // Optional. The list of regions where the model is going to be deployed. + // Currently only one region per model is supported. + // Defaults to 'us-central1' if nothing is set. + Regions []string `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"` + // Optional. If true, enables StackDriver Logging for online prediction. + // Default is false. + OnlinePredictionLogging bool `protobuf:"varint,5,opt,name=online_prediction_logging,json=onlinePredictionLogging" json:"online_prediction_logging,omitempty"` +} + +func (m *Model) Reset() { *m = Model{} } +func (m *Model) String() string { return proto.CompactTextString(m) } +func (*Model) ProtoMessage() {} +func (*Model) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Model) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Model) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Model) GetDefaultVersion() *Version { + if m != nil { + return m.DefaultVersion + } + return nil +} + +func (m *Model) GetRegions() []string { + if m != nil { + return m.Regions + } + return nil +} + +func (m *Model) GetOnlinePredictionLogging() bool { + if m != nil { + return m.OnlinePredictionLogging + } + return false +} + +// Represents a version of the model. +// +// Each version is a trained model deployed in the cloud, ready to handle +// prediction requests. A model can have multiple versions. You can get +// information about all of the versions of a given model by calling +// [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list). +type Version struct { + // Required.The name specified for the version when it was created. + // + // The version name must be unique within the model it is created in. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The description specified for the version when it was created. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Output only. If true, this version will be used to handle prediction + // requests that do not specify a version. + // + // You can change the default version by calling + // [projects.methods.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault). + IsDefault bool `protobuf:"varint,3,opt,name=is_default,json=isDefault" json:"is_default,omitempty"` + // Required. The Google Cloud Storage location of the trained model used to + // create the version. See the + // [overview of model deployment](/ml/docs/concepts/deployment-overview) for + // more informaiton. + // + // When passing Version to + // [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create) + // the model service uses the specified location as the source of the model. + // Once deployed, the model version is hosted by the prediction service, so + // this location is useful only as a historical record. + DeploymentUri string `protobuf:"bytes,4,opt,name=deployment_uri,json=deploymentUri" json:"deployment_uri,omitempty"` + // Output only. The time the version was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. The time the version was last used for prediction. + LastUseTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=last_use_time,json=lastUseTime" json:"last_use_time,omitempty"` + // Optional. The Google Cloud ML runtime version to use for this deployment. + // If not set, Google Cloud ML will choose a version. + RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` + // Optional. Manually select the number of nodes to use for serving the + // model. If unset (i.e., by default), the number of nodes used to serve + // the model automatically scales with traffic. However, care should be + // taken to ramp up traffic according to the model's ability to scale. If + // your model needs to handle bursts of traffic beyond it's ability to + // scale, it is recommended you set this field appropriately. + ManualScaling *ManualScaling `protobuf:"bytes,9,opt,name=manual_scaling,json=manualScaling" json:"manual_scaling,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Version) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Version) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Version) GetIsDefault() bool { + if m != nil { + return m.IsDefault + } + return false +} + +func (m *Version) GetDeploymentUri() string { + if m != nil { + return m.DeploymentUri + } + return "" +} + +func (m *Version) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Version) GetLastUseTime() *google_protobuf2.Timestamp { + if m != nil { + return m.LastUseTime + } + return nil +} + +func (m *Version) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +func (m *Version) GetManualScaling() *ManualScaling { + if m != nil { + return m.ManualScaling + } + return nil +} + +// Options for manually scaling a model. +type ManualScaling struct { + // The number of nodes to allocate for this model. These nodes are always up, + // starting from the time the model is deployed, so the cost of operating + // this model will be proportional to nodes * number of hours since + // deployment. + Nodes int32 `protobuf:"varint,1,opt,name=nodes" json:"nodes,omitempty"` +} + +func (m *ManualScaling) Reset() { *m = ManualScaling{} } +func (m *ManualScaling) String() string { return proto.CompactTextString(m) } +func (*ManualScaling) ProtoMessage() {} +func (*ManualScaling) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ManualScaling) GetNodes() int32 { + if m != nil { + return m.Nodes + } + return 0 +} + +// Request message for the CreateModel method. +type CreateModelRequest struct { + // Required. The project name. + // + // Authorization: requires `Editor` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The model to create. + Model *Model `protobuf:"bytes,2,opt,name=model" json:"model,omitempty"` +} + +func (m *CreateModelRequest) Reset() { *m = CreateModelRequest{} } +func (m *CreateModelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateModelRequest) ProtoMessage() {} +func (*CreateModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *CreateModelRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateModelRequest) GetModel() *Model { + if m != nil { + return m.Model + } + return nil +} + +// Request message for the ListModels method. +type ListModelsRequest struct { + // Required. The name of the project whose models are to be listed. + // + // Authorization: requires `Viewer` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of models to retrieve per "page" of results. If there + // are more remaining results than this number, the response message will + // contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListModelsRequest) Reset() { *m = ListModelsRequest{} } +func (m *ListModelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListModelsRequest) ProtoMessage() {} +func (*ListModelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *ListModelsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListModelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListModelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListModels method. +type ListModelsResponse struct { + // The list of models. + Models []*Model `protobuf:"bytes,1,rep,name=models" json:"models,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListModelsResponse) Reset() { *m = ListModelsResponse{} } +func (m *ListModelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListModelsResponse) ProtoMessage() {} +func (*ListModelsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *ListModelsResponse) GetModels() []*Model { + if m != nil { + return m.Models + } + return nil +} + +func (m *ListModelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetModel method. +type GetModelRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetModelRequest) Reset() { *m = GetModelRequest{} } +func (m *GetModelRequest) String() string { return proto.CompactTextString(m) } +func (*GetModelRequest) ProtoMessage() {} +func (*GetModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *GetModelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the DeleteModel method. +type DeleteModelRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteModelRequest) Reset() { *m = DeleteModelRequest{} } +func (m *DeleteModelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteModelRequest) ProtoMessage() {} +func (*DeleteModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *DeleteModelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Uploads the provided trained model version to Cloud Machine Learning. +type CreateVersionRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Editor` role on the parent project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The version details. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *CreateVersionRequest) Reset() { *m = CreateVersionRequest{} } +func (m *CreateVersionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVersionRequest) ProtoMessage() {} +func (*CreateVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *CreateVersionRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateVersionRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +// Request message for the ListVersions method. +type ListVersionsRequest struct { + // Required. The name of the model for which to list the version. + // + // Authorization: requires `Viewer` role on the parent project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of versions to retrieve per "page" of results. If + // there are more remaining results than this number, the response message + // will contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListVersionsRequest) Reset() { *m = ListVersionsRequest{} } +func (m *ListVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListVersionsRequest) ProtoMessage() {} +func (*ListVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ListVersionsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListVersionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListVersionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListVersions method. +type ListVersionsResponse struct { + // The list of versions. + Versions []*Version `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListVersionsResponse) Reset() { *m = ListVersionsResponse{} } +func (m *ListVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListVersionsResponse) ProtoMessage() {} +func (*ListVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *ListVersionsResponse) GetVersions() []*Version { + if m != nil { + return m.Versions + } + return nil +} + +func (m *ListVersionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetVersion method. +type GetVersionRequest struct { + // Required. The name of the version. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } +func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionRequest) ProtoMessage() {} +func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *GetVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the DeleteVerionRequest method. +type DeleteVersionRequest struct { + // Required. The name of the version. You can get the names of all the + // versions of a model by calling + // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list). + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteVersionRequest) Reset() { *m = DeleteVersionRequest{} } +func (m *DeleteVersionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVersionRequest) ProtoMessage() {} +func (*DeleteVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DeleteVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the SetDefaultVersion request. +type SetDefaultVersionRequest struct { + // Required. The name of the version to make the default for the model. You + // can get the names of all the versions of a model by calling + // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list). + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *SetDefaultVersionRequest) Reset() { *m = SetDefaultVersionRequest{} } +func (m *SetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*SetDefaultVersionRequest) ProtoMessage() {} +func (*SetDefaultVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *SetDefaultVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Model)(nil), "google.cloud.ml.v1.Model") + proto.RegisterType((*Version)(nil), "google.cloud.ml.v1.Version") + proto.RegisterType((*ManualScaling)(nil), "google.cloud.ml.v1.ManualScaling") + proto.RegisterType((*CreateModelRequest)(nil), "google.cloud.ml.v1.CreateModelRequest") + proto.RegisterType((*ListModelsRequest)(nil), "google.cloud.ml.v1.ListModelsRequest") + proto.RegisterType((*ListModelsResponse)(nil), "google.cloud.ml.v1.ListModelsResponse") + proto.RegisterType((*GetModelRequest)(nil), "google.cloud.ml.v1.GetModelRequest") + proto.RegisterType((*DeleteModelRequest)(nil), "google.cloud.ml.v1.DeleteModelRequest") + proto.RegisterType((*CreateVersionRequest)(nil), "google.cloud.ml.v1.CreateVersionRequest") + proto.RegisterType((*ListVersionsRequest)(nil), "google.cloud.ml.v1.ListVersionsRequest") + proto.RegisterType((*ListVersionsResponse)(nil), "google.cloud.ml.v1.ListVersionsResponse") + proto.RegisterType((*GetVersionRequest)(nil), "google.cloud.ml.v1.GetVersionRequest") + proto.RegisterType((*DeleteVersionRequest)(nil), "google.cloud.ml.v1.DeleteVersionRequest") + proto.RegisterType((*SetDefaultVersionRequest)(nil), "google.cloud.ml.v1.SetDefaultVersionRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ModelService service + +type ModelServiceClient interface { + // Creates a model which will later contain one or more versions. + // + // You must add at least one version before you can request predictions from + // the model. Add versions by calling + // [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create). + CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) + // Lists the models in a project. + // + // Each project can contain multiple models, and each model can have multiple + // versions. + ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) + // Gets information about a model, including its name, the description (if + // set), and the default version (if at least one version of the model has + // been deployed). + GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) + // Deletes a model. + // + // You can only delete a model if there are no versions in it. You can delete + // versions by calling + // [projects.models.versions.delete](/ml/reference/rest/v1/projects.models.versions/delete). + DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Creates a new version of a model from a trained TensorFlow model. + // + // If the version created in the cloud by this call is the first deployed + // version of the specified model, it will be made the default version of the + // model. When you add a version to a model that already has one or more + // versions, the default version does not automatically change. If you want a + // new version to be the default, you must call + // [projects.models.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault). + CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets basic information about all the versions of a model. + // + // If you expect that a model has a lot of versions, or if you need to handle + // only a limited number of results at a time, you can request that the list + // be retrieved in batches (called pages): + ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) + // Gets information about a model version. + // + // Models can have multiple versions. You can call + // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list) + // to get the same information that this method returns for all of the + // versions of a model. + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) + // Deletes a model version. + // + // Each model can have multiple versions deployed and in use at any given + // time. Use this method to remove a single version. + // + // Note: You cannot delete the version that is set as the default version + // of the model unless it is the only remaining version. + DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Designates a version to be the default for the model. + // + // The default version is used for prediction requests made against the model + // that don't specify a version. + // + // The first version to be created for a model is automatically set as the + // default. You must make any subsequent changes to the default version + // setting manually using this method. + SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) +} + +type modelServiceClient struct { + cc *grpc.ClientConn +} + +func NewModelServiceClient(cc *grpc.ClientConn) ModelServiceClient { + return &modelServiceClient{cc} +} + +func (c *modelServiceClient) CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) { + out := new(Model) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/CreateModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) { + out := new(ListModelsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/ListModels", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) { + out := new(Model) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/GetModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/DeleteModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/CreateVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) { + out := new(ListVersionsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/ListVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/GetVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/DeleteVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ModelService/SetDefaultVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ModelService service + +type ModelServiceServer interface { + // Creates a model which will later contain one or more versions. + // + // You must add at least one version before you can request predictions from + // the model. Add versions by calling + // [projects.models.versions.create](/ml/reference/rest/v1/projects.models.versions/create). + CreateModel(context.Context, *CreateModelRequest) (*Model, error) + // Lists the models in a project. + // + // Each project can contain multiple models, and each model can have multiple + // versions. + ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error) + // Gets information about a model, including its name, the description (if + // set), and the default version (if at least one version of the model has + // been deployed). + GetModel(context.Context, *GetModelRequest) (*Model, error) + // Deletes a model. + // + // You can only delete a model if there are no versions in it. You can delete + // versions by calling + // [projects.models.versions.delete](/ml/reference/rest/v1/projects.models.versions/delete). + DeleteModel(context.Context, *DeleteModelRequest) (*google_longrunning.Operation, error) + // Creates a new version of a model from a trained TensorFlow model. + // + // If the version created in the cloud by this call is the first deployed + // version of the specified model, it will be made the default version of the + // model. When you add a version to a model that already has one or more + // versions, the default version does not automatically change. If you want a + // new version to be the default, you must call + // [projects.models.versions.setDefault](/ml/reference/rest/v1/projects.models.versions/setDefault). + CreateVersion(context.Context, *CreateVersionRequest) (*google_longrunning.Operation, error) + // Gets basic information about all the versions of a model. + // + // If you expect that a model has a lot of versions, or if you need to handle + // only a limited number of results at a time, you can request that the list + // be retrieved in batches (called pages): + ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) + // Gets information about a model version. + // + // Models can have multiple versions. You can call + // [projects.models.versions.list](/ml/reference/rest/v1/projects.models.versions/list) + // to get the same information that this method returns for all of the + // versions of a model. + GetVersion(context.Context, *GetVersionRequest) (*Version, error) + // Deletes a model version. + // + // Each model can have multiple versions deployed and in use at any given + // time. Use this method to remove a single version. + // + // Note: You cannot delete the version that is set as the default version + // of the model unless it is the only remaining version. + DeleteVersion(context.Context, *DeleteVersionRequest) (*google_longrunning.Operation, error) + // Designates a version to be the default for the model. + // + // The default version is used for prediction requests made against the model + // that don't specify a version. + // + // The first version to be created for a model is automatically set as the + // default. You must make any subsequent changes to the default version + // setting manually using this method. + SetDefaultVersion(context.Context, *SetDefaultVersionRequest) (*Version, error) +} + +func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer) { + s.RegisterService(&_ModelService_serviceDesc, srv) +} + +func _ModelService_CreateModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).CreateModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/CreateModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).CreateModel(ctx, req.(*CreateModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_ListModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListModelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).ListModels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/ListModels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).ListModels(ctx, req.(*ListModelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_GetModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).GetModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/GetModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).GetModel(ctx, req.(*GetModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_DeleteModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).DeleteModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/DeleteModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).DeleteModel(ctx, req.(*DeleteModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_CreateVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).CreateVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/CreateVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).CreateVersion(ctx, req.(*CreateVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_ListVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).ListVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/ListVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).ListVersions(ctx, req.(*ListVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_DeleteVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).DeleteVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/DeleteVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).DeleteVersion(ctx, req.(*DeleteVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_SetDefaultVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetDefaultVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).SetDefaultVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ModelService/SetDefaultVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).SetDefaultVersion(ctx, req.(*SetDefaultVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ModelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1.ModelService", + HandlerType: (*ModelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateModel", + Handler: _ModelService_CreateModel_Handler, + }, + { + MethodName: "ListModels", + Handler: _ModelService_ListModels_Handler, + }, + { + MethodName: "GetModel", + Handler: _ModelService_GetModel_Handler, + }, + { + MethodName: "DeleteModel", + Handler: _ModelService_DeleteModel_Handler, + }, + { + MethodName: "CreateVersion", + Handler: _ModelService_CreateVersion_Handler, + }, + { + MethodName: "ListVersions", + Handler: _ModelService_ListVersions_Handler, + }, + { + MethodName: "GetVersion", + Handler: _ModelService_GetVersion_Handler, + }, + { + MethodName: "DeleteVersion", + Handler: _ModelService_DeleteVersion_Handler, + }, + { + MethodName: "SetDefaultVersion", + Handler: _ModelService_SetDefaultVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1/model_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1/model_service.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 996 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x96, 0xdb, 0xa6, 0x4d, 0x5e, 0x36, 0xad, 0x3a, 0x14, 0xc8, 0x66, 0x29, 0x04, 0xaf, 0xda, + 0x86, 0x00, 0xb6, 0x52, 0x8a, 0x10, 0x59, 0x01, 0xd2, 0x52, 0x69, 0x39, 0xec, 0x8a, 0xca, 0xdd, + 0xe5, 0x80, 0x84, 0x2c, 0x6f, 0x32, 0x6b, 0x06, 0xec, 0x19, 0xe3, 0x19, 0x07, 0x58, 0x58, 0x21, + 0xc1, 0x91, 0x23, 0xdc, 0xf9, 0xa3, 0x38, 0x71, 0xe7, 0xc6, 0x99, 0x3b, 0x9a, 0x1f, 0x4e, 0xed, + 0xc4, 0x89, 0x0b, 0x12, 0x37, 0xcf, 0x9b, 0xef, 0xcd, 0xfb, 0xe6, 0x7d, 0xef, 0xbd, 0x31, 0x1c, + 0x87, 0x8c, 0x85, 0x11, 0x76, 0x27, 0x11, 0xcb, 0xa6, 0x6e, 0x1c, 0xb9, 0xb3, 0x91, 0x1b, 0xb3, + 0x29, 0x8e, 0x7c, 0x8e, 0xd3, 0x19, 0x99, 0x60, 0x27, 0x49, 0x99, 0x60, 0x08, 0x69, 0x9c, 0xa3, + 0x70, 0x4e, 0x1c, 0x39, 0xb3, 0x51, 0xef, 0x25, 0xe3, 0x1b, 0x24, 0xc4, 0x0d, 0x28, 0x65, 0x22, + 0x10, 0x84, 0x51, 0xae, 0x3d, 0x7a, 0xcf, 0x17, 0x77, 0x33, 0xf1, 0xb9, 0x31, 0xdf, 0x36, 0xe6, + 0x88, 0xd1, 0x30, 0xcd, 0x28, 0x25, 0x34, 0x74, 0x59, 0x82, 0xd3, 0x92, 0xef, 0x2b, 0x06, 0xa4, + 0x56, 0x8f, 0xb3, 0x27, 0xae, 0x20, 0x31, 0xe6, 0x22, 0x88, 0x13, 0x0d, 0xb0, 0xff, 0xb0, 0xa0, + 0xf1, 0x40, 0xd2, 0x44, 0x08, 0xb6, 0x68, 0x10, 0xe3, 0xae, 0xd5, 0xb7, 0x06, 0x2d, 0x4f, 0x7d, + 0xa3, 0x3e, 0xb4, 0xa7, 0x98, 0x4f, 0x52, 0x92, 0xc8, 0x43, 0xbb, 0x1b, 0x6a, 0xab, 0x68, 0x42, + 0xe7, 0xb0, 0x37, 0xc5, 0x4f, 0x82, 0x2c, 0x12, 0xfe, 0x0c, 0xa7, 0x5c, 0xa2, 0x36, 0xfb, 0xd6, + 0xa0, 0x7d, 0x7a, 0xcb, 0x59, 0xbe, 0xa8, 0xf3, 0x89, 0x86, 0x78, 0xbb, 0xc6, 0xc7, 0xac, 0x51, + 0x17, 0x76, 0x52, 0x1c, 0x4a, 0xde, 0xdd, 0xad, 0xfe, 0xe6, 0xa0, 0xe5, 0xe5, 0x4b, 0x34, 0x86, + 0x9b, 0x8c, 0x46, 0x84, 0x62, 0x3f, 0x49, 0xf1, 0x94, 0x4c, 0x64, 0x50, 0x3f, 0x62, 0x61, 0x48, + 0x68, 0xd8, 0x6d, 0xf4, 0xad, 0x41, 0xd3, 0x7b, 0x51, 0x03, 0x2e, 0xe6, 0xfb, 0xf7, 0xf5, 0xb6, + 0xfd, 0xf7, 0x06, 0xec, 0xe4, 0x11, 0xfe, 0xdb, 0xed, 0x0e, 0x01, 0x08, 0xf7, 0x0d, 0x59, 0x75, + 0xb1, 0xa6, 0xd7, 0x22, 0xfc, 0x5c, 0x1b, 0xd0, 0x11, 0xec, 0x4e, 0x71, 0x12, 0xb1, 0x6f, 0x63, + 0x4c, 0x85, 0x9f, 0xa5, 0xa4, 0xbb, 0xa5, 0xce, 0xe8, 0x5c, 0x59, 0x1f, 0xa5, 0x04, 0xdd, 0x81, + 0xf6, 0x24, 0xc5, 0x81, 0xc0, 0xbe, 0xcc, 0xbe, 0x62, 0xdd, 0x3e, 0xed, 0xe5, 0xf9, 0xc9, 0xa5, + 0x71, 0x1e, 0xe6, 0xd2, 0x78, 0xa0, 0xe1, 0xd2, 0x80, 0xde, 0x87, 0x4e, 0x14, 0x70, 0xe1, 0x67, + 0xdc, 0xb8, 0x6f, 0xd7, 0xba, 0xb7, 0xa5, 0xc3, 0x23, 0xae, 0xfd, 0x4f, 0x60, 0x2f, 0xcd, 0xa8, + 0xf4, 0x9c, 0x0b, 0xd4, 0x54, 0x24, 0x77, 0x8d, 0x39, 0xcf, 0xd0, 0x47, 0xb0, 0x1b, 0x07, 0x34, + 0x0b, 0x22, 0x9f, 0x4f, 0x82, 0x48, 0xa6, 0xb7, 0xa5, 0x22, 0xbd, 0x5a, 0x25, 0xe4, 0x03, 0x85, + 0xbc, 0xd4, 0x40, 0xaf, 0x13, 0x17, 0x97, 0xf6, 0x11, 0x74, 0x4a, 0xfb, 0xe8, 0x00, 0x1a, 0x94, + 0x4d, 0x31, 0x57, 0xd9, 0x6f, 0x78, 0x7a, 0x61, 0x7f, 0x06, 0xe8, 0x43, 0x75, 0x4f, 0x55, 0x7f, + 0x1e, 0xfe, 0x2a, 0xc3, 0x5c, 0xa0, 0x17, 0x60, 0x3b, 0x09, 0x52, 0x4c, 0x85, 0x91, 0xca, 0xac, + 0x90, 0x0b, 0x0d, 0xd5, 0x4e, 0x4a, 0xa6, 0xf6, 0xe9, 0xcd, 0x4a, 0x56, 0xea, 0x20, 0x8d, 0xb3, + 0x43, 0xd8, 0xbf, 0x4f, 0xb8, 0x50, 0x36, 0x5e, 0x77, 0xfa, 0x21, 0x40, 0x12, 0x84, 0xd8, 0x17, + 0xec, 0x4b, 0x4c, 0x8d, 0x8a, 0x2d, 0x69, 0x79, 0x28, 0x0d, 0xe8, 0x16, 0xa8, 0x85, 0xcf, 0xc9, + 0x53, 0xad, 0x5f, 0xc3, 0x6b, 0x4a, 0xc3, 0x25, 0x79, 0x8a, 0x6d, 0x06, 0xa8, 0x18, 0x88, 0x27, + 0x8c, 0x72, 0x8c, 0x46, 0xb0, 0xad, 0x78, 0xc8, 0x4b, 0x6f, 0xae, 0x27, 0x6c, 0x80, 0xe8, 0x18, + 0xf6, 0x28, 0xfe, 0x46, 0xf8, 0x05, 0x26, 0xba, 0x26, 0x3b, 0xd2, 0x7c, 0x91, 0xb3, 0xb1, 0x8f, + 0x60, 0xef, 0x1e, 0x16, 0xa5, 0xac, 0x55, 0x94, 0xb7, 0x3d, 0x00, 0x74, 0x8e, 0x23, 0xbc, 0x90, + 0xdf, 0x2a, 0x24, 0x86, 0x03, 0xad, 0x44, 0xde, 0x9f, 0x35, 0xd9, 0x7a, 0x1b, 0x76, 0xf2, 0x5a, + 0xda, 0xa8, 0x6f, 0xf6, 0x1c, 0x6b, 0x13, 0x78, 0x4e, 0x26, 0xca, 0xd8, 0xff, 0x57, 0x4d, 0xbe, + 0x86, 0x83, 0x72, 0x28, 0xa3, 0xca, 0x3b, 0xd0, 0x34, 0x6c, 0x72, 0x5d, 0xd6, 0x52, 0x9f, 0x83, + 0xaf, 0xad, 0xcd, 0x09, 0xec, 0xdf, 0xc3, 0x62, 0x21, 0x8f, 0x55, 0x39, 0x1f, 0xc2, 0x81, 0x56, + 0xe7, 0x1a, 0x58, 0x07, 0xba, 0x97, 0x58, 0x9c, 0x97, 0x66, 0xe6, 0x1a, 0xfc, 0xe9, 0x5f, 0x2d, + 0xb8, 0xa1, 0x44, 0xbf, 0xd4, 0x4f, 0x0f, 0xfa, 0x01, 0xda, 0x85, 0x56, 0x43, 0xc7, 0x55, 0x77, + 0x5e, 0xee, 0xc5, 0xde, 0xea, 0x9a, 0xb5, 0xdf, 0xfc, 0xf1, 0xf7, 0x3f, 0x7f, 0xd9, 0x38, 0xb1, + 0x5f, 0x96, 0xef, 0xdc, 0x77, 0x5a, 0xb1, 0xf7, 0x92, 0x94, 0x7d, 0x81, 0x27, 0x82, 0xbb, 0xc3, + 0x67, 0xfa, 0xed, 0xe3, 0x63, 0xdd, 0x8c, 0xe8, 0x27, 0x0b, 0xe0, 0xaa, 0x49, 0xd0, 0x51, 0xd5, + 0xc1, 0x4b, 0xdd, 0xda, 0x3b, 0xae, 0x83, 0x69, 0x55, 0xed, 0x63, 0x45, 0xa6, 0x8f, 0x6a, 0xc8, + 0xa0, 0x14, 0x9a, 0x79, 0xe3, 0xa0, 0xdb, 0x55, 0x67, 0x2f, 0xb4, 0xd5, 0xba, 0x04, 0x94, 0x63, + 0xca, 0xb4, 0x17, 0x22, 0x9a, 0x80, 0xee, 0xf0, 0x19, 0xfa, 0x1e, 0xda, 0x85, 0x2e, 0xac, 0x4e, + 0xfd, 0x72, 0x9b, 0xf6, 0x0e, 0x73, 0x5c, 0xe1, 0x79, 0x77, 0x3e, 0xce, 0x9f, 0xf7, 0x3c, 0xfa, + 0xb0, 0x2e, 0xfa, 0xaf, 0x16, 0x74, 0x4a, 0xad, 0x8d, 0x06, 0xab, 0xb5, 0x2f, 0x57, 0x56, 0x1d, + 0x85, 0xb1, 0xa2, 0x70, 0x66, 0xbf, 0x56, 0x9d, 0xf4, 0x2b, 0x12, 0x6e, 0xde, 0x44, 0xe3, 0x7c, + 0x12, 0x48, 0x5a, 0x37, 0x8a, 0xfd, 0x89, 0x4e, 0x56, 0x29, 0xbd, 0x30, 0x2c, 0x7a, 0x83, 0x7a, + 0xa0, 0x29, 0x8a, 0x91, 0xe2, 0xf7, 0x3a, 0xba, 0x3e, 0x3f, 0x55, 0xa5, 0x57, 0xdd, 0x5b, 0x5d, + 0xa5, 0x4b, 0xdd, 0xdd, 0x5b, 0x37, 0x41, 0x16, 0x58, 0xac, 0x12, 0x6a, 0x4e, 0x41, 0x6a, 0xf6, + 0xb3, 0x05, 0x9d, 0xd2, 0x68, 0xa8, 0xd6, 0xac, 0x6a, 0x7a, 0xd4, 0x69, 0x66, 0xd8, 0x0c, 0xff, + 0x05, 0x9b, 0xdf, 0x2c, 0xd8, 0x5f, 0x1a, 0x3e, 0xe8, 0x8d, 0x2a, 0x46, 0xab, 0x66, 0xd4, 0xfa, + 0x0c, 0x7d, 0xa0, 0x38, 0xbd, 0x6b, 0x9f, 0x5d, 0x9b, 0xd3, 0x98, 0xcf, 0x03, 0x8d, 0xad, 0xe1, + 0xdd, 0x10, 0x7a, 0x13, 0x16, 0x2f, 0x85, 0x08, 0x12, 0xe2, 0xcc, 0x46, 0x77, 0xf7, 0x8b, 0x83, + 0xf0, 0x42, 0xfe, 0x2c, 0x5d, 0x58, 0x9f, 0x9e, 0x19, 0x70, 0xc8, 0xa2, 0x80, 0x86, 0x0e, 0x4b, + 0x43, 0x37, 0xc4, 0x54, 0xfd, 0x4a, 0xb9, 0x7a, 0x2b, 0x48, 0x08, 0x2f, 0xfe, 0xcb, 0xdf, 0x89, + 0xa3, 0xc7, 0xdb, 0x0a, 0xf0, 0xd6, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x07, 0x42, 0xf7, + 0xeb, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/operation_metadata.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/operation_metadata.pb.go new file mode 100644 index 000000000..a41117d50 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/operation_metadata.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1/operation_metadata.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The operation type. +type OperationMetadata_OperationType int32 + +const ( + // Unspecified operation type. + OperationMetadata_OPERATION_TYPE_UNSPECIFIED OperationMetadata_OperationType = 0 + // An operation to create a new version. + OperationMetadata_CREATE_VERSION OperationMetadata_OperationType = 1 + // An operation to delete an existing version. + OperationMetadata_DELETE_VERSION OperationMetadata_OperationType = 2 + // An operation to delete an existing model. + OperationMetadata_DELETE_MODEL OperationMetadata_OperationType = 3 +) + +var OperationMetadata_OperationType_name = map[int32]string{ + 0: "OPERATION_TYPE_UNSPECIFIED", + 1: "CREATE_VERSION", + 2: "DELETE_VERSION", + 3: "DELETE_MODEL", +} +var OperationMetadata_OperationType_value = map[string]int32{ + "OPERATION_TYPE_UNSPECIFIED": 0, + "CREATE_VERSION": 1, + "DELETE_VERSION": 2, + "DELETE_MODEL": 3, +} + +func (x OperationMetadata_OperationType) String() string { + return proto.EnumName(OperationMetadata_OperationType_name, int32(x)) +} +func (OperationMetadata_OperationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +// Represents the metadata of the long-running operation. +type OperationMetadata struct { + // The time the operation was submitted. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time operation processing started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time operation processing completed. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Indicates whether a request to cancel this operation has been made. + IsCancellationRequested bool `protobuf:"varint,4,opt,name=is_cancellation_requested,json=isCancellationRequested" json:"is_cancellation_requested,omitempty"` + // The operation type. + OperationType OperationMetadata_OperationType `protobuf:"varint,5,opt,name=operation_type,json=operationType,enum=google.cloud.ml.v1.OperationMetadata_OperationType" json:"operation_type,omitempty"` + // Contains the name of the model associated with the operation. + ModelName string `protobuf:"bytes,6,opt,name=model_name,json=modelName" json:"model_name,omitempty"` + // Contains the version associated with the operation. + Version *Version `protobuf:"bytes,7,opt,name=version" json:"version,omitempty"` +} + +func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } +func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata) ProtoMessage() {} +func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *OperationMetadata) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *OperationMetadata) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *OperationMetadata) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *OperationMetadata) GetIsCancellationRequested() bool { + if m != nil { + return m.IsCancellationRequested + } + return false +} + +func (m *OperationMetadata) GetOperationType() OperationMetadata_OperationType { + if m != nil { + return m.OperationType + } + return OperationMetadata_OPERATION_TYPE_UNSPECIFIED +} + +func (m *OperationMetadata) GetModelName() string { + if m != nil { + return m.ModelName + } + return "" +} + +func (m *OperationMetadata) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func init() { + proto.RegisterType((*OperationMetadata)(nil), "google.cloud.ml.v1.OperationMetadata") + proto.RegisterEnum("google.cloud.ml.v1.OperationMetadata_OperationType", OperationMetadata_OperationType_name, OperationMetadata_OperationType_value) +} + +func init() { proto.RegisterFile("google/cloud/ml/v1/operation_metadata.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 454 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x5f, 0x6b, 0xdb, 0x30, + 0x14, 0xc5, 0xe7, 0xb6, 0x6b, 0x1a, 0x75, 0x0d, 0x99, 0x1e, 0xb6, 0xcc, 0xfb, 0x17, 0xfa, 0x30, + 0x02, 0x03, 0x99, 0xb4, 0xdb, 0xc3, 0xd6, 0xa7, 0x36, 0xd1, 0x20, 0xd0, 0xc6, 0xc6, 0xf5, 0x0a, + 0xeb, 0x8b, 0x51, 0xed, 0x3b, 0x23, 0x90, 0x25, 0xcf, 0x52, 0x0c, 0xfd, 0x2c, 0xfb, 0xb2, 0x23, + 0x92, 0x4d, 0x33, 0x52, 0xe8, 0xa3, 0xce, 0xfd, 0x9d, 0xab, 0xab, 0x7b, 0x84, 0x3e, 0x17, 0x4a, + 0x15, 0x02, 0x82, 0x4c, 0xa8, 0x55, 0x1e, 0x94, 0x22, 0x68, 0xa6, 0x81, 0xaa, 0xa0, 0x66, 0x86, + 0x2b, 0x99, 0x96, 0x60, 0x58, 0xce, 0x0c, 0x23, 0x55, 0xad, 0x8c, 0xc2, 0xd8, 0xc1, 0xc4, 0xc2, + 0xa4, 0x14, 0xa4, 0x99, 0xfa, 0xef, 0xda, 0x06, 0xac, 0xe2, 0x01, 0x93, 0x52, 0x19, 0xeb, 0xd4, + 0xce, 0xe1, 0x7f, 0x7a, 0xa4, 0x7d, 0xa9, 0x72, 0x10, 0xa9, 0x86, 0xba, 0xe1, 0x19, 0xb4, 0xdc, + 0xc7, 0x96, 0xb3, 0xa7, 0xbb, 0xd5, 0xef, 0xc0, 0xf0, 0x12, 0xb4, 0x61, 0x65, 0xe5, 0x80, 0xe3, + 0xbf, 0x7b, 0xe8, 0x65, 0xd8, 0xcd, 0x75, 0xd5, 0x8e, 0x85, 0xcf, 0xd0, 0x61, 0x56, 0x03, 0x33, + 0x90, 0xae, 0xf9, 0x91, 0x37, 0xf6, 0x26, 0x87, 0x27, 0x3e, 0x69, 0xc7, 0xec, 0x9a, 0x91, 0xa4, + 0x6b, 0x16, 0x23, 0x87, 0xaf, 0x05, 0xfc, 0x0d, 0x21, 0x6d, 0x58, 0x6d, 0x9c, 0x77, 0xe7, 0x49, + 0x6f, 0xdf, 0xd2, 0xd6, 0xfa, 0x15, 0x1d, 0x80, 0xcc, 0x9d, 0x71, 0xf7, 0x49, 0x63, 0x0f, 0x64, + 0x6e, 0x6d, 0xdf, 0xd1, 0x1b, 0xae, 0xd3, 0x8c, 0xc9, 0x0c, 0x84, 0x70, 0x1b, 0xae, 0xe1, 0xcf, + 0x0a, 0xb4, 0x81, 0x7c, 0xb4, 0x37, 0xf6, 0x26, 0x07, 0xf1, 0x6b, 0xae, 0x67, 0x1b, 0xf5, 0xb8, + 0x2b, 0xe3, 0x5b, 0x34, 0x78, 0xc8, 0xc5, 0xdc, 0x57, 0x30, 0x7a, 0x3e, 0xf6, 0x26, 0x83, 0x93, + 0x53, 0xb2, 0x1d, 0x0a, 0xd9, 0xda, 0xd4, 0x83, 0x92, 0xdc, 0x57, 0x10, 0x1f, 0xa9, 0xcd, 0x23, + 0x7e, 0x8f, 0x90, 0x0b, 0x45, 0xb2, 0x12, 0x46, 0xfb, 0x63, 0x6f, 0xd2, 0x8f, 0xfb, 0x56, 0x59, + 0x32, 0xfb, 0xda, 0x5e, 0x03, 0xb5, 0xe6, 0x4a, 0x8e, 0x7a, 0xf6, 0xb1, 0x6f, 0x1f, 0xbb, 0xf3, + 0xc6, 0x21, 0x71, 0xc7, 0x1e, 0x73, 0x74, 0xf4, 0xdf, 0xad, 0xf8, 0x03, 0xf2, 0xc3, 0x88, 0xc6, + 0xe7, 0xc9, 0x22, 0x5c, 0xa6, 0xc9, 0xaf, 0x88, 0xa6, 0x3f, 0x97, 0xd7, 0x11, 0x9d, 0x2d, 0x7e, + 0x2c, 0xe8, 0x7c, 0xf8, 0x0c, 0x63, 0x34, 0x98, 0xc5, 0xf4, 0x3c, 0xa1, 0xe9, 0x0d, 0x8d, 0xaf, + 0x17, 0xe1, 0x72, 0xe8, 0xad, 0xb5, 0x39, 0xbd, 0xa4, 0x1b, 0xda, 0x0e, 0x1e, 0xa2, 0x17, 0xad, + 0x76, 0x15, 0xce, 0xe9, 0xe5, 0x70, 0xf7, 0x42, 0x20, 0x3f, 0x53, 0xe5, 0xd6, 0x54, 0xac, 0xe2, + 0xa4, 0x99, 0x5e, 0xbc, 0xda, 0x5a, 0x47, 0xb4, 0x0e, 0x29, 0xf2, 0x6e, 0xbf, 0xb4, 0x8e, 0x42, + 0x09, 0x26, 0x0b, 0xa2, 0xea, 0x22, 0x28, 0x40, 0xda, 0x08, 0x03, 0x57, 0x62, 0x15, 0xd7, 0x9b, + 0xbf, 0xf7, 0xac, 0x14, 0x77, 0xfb, 0x16, 0x38, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x03, 0xf9, + 0xcc, 0xf1, 0x3c, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/prediction_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/prediction_service.pb.go new file mode 100644 index 000000000..9d27c9c40 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/prediction_service.pb.go @@ -0,0 +1,343 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1/prediction_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api3 "google.golang.org/genproto/googleapis/api/httpbody" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request for predictions to be issued against a trained model. +// +// The body of the request is a single JSON object with a single top-level +// field: +// +//
+//
instances
+//
A JSON array containing values representing the instances to use for +// prediction.
+//
+// +// The structure of each element of the instances list is determined by your +// model's input definition. Instances can include named inputs or can contain +// only unlabeled values. +// +// Not all data includes named inputs. Some instances will be simple +// JSON values (boolean, number, or string). However, instances are often lists +// of simple values, or complex nested lists. Here are some examples of request +// bodies: +// +// CSV data with each row encoded as a string value: +//
+// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
+// 
+// Plain text: +//
+// {"instances": ["the quick brown fox", "la bruja le dio"]}
+// 
+// Sentences encoded as lists of words (vectors of strings): +//
+// {
+//   "instances": [
+//     ["the","quick","brown"],
+//     ["la","bruja","le"],
+//     ...
+//   ]
+// }
+// 
+// Floating point scalar values: +//
+// {"instances": [0.0, 1.1, 2.2]}
+// 
+// Vectors of integers: +//
+// {
+//   "instances": [
+//     [0, 1, 2],
+//     [3, 4, 5],
+//     ...
+//   ]
+// }
+// 
+// Tensors (in this case, two-dimensional tensors): +//
+// {
+//   "instances": [
+//     [
+//       [0, 1, 2],
+//       [3, 4, 5]
+//     ],
+//     ...
+//   ]
+// }
+// 
+// Images can be represented different ways. In this encoding scheme the first +// two dimensions represent the rows and columns of the image, and the third +// contains lists (vectors) of the R, G, and B values for each pixel. +//
+// {
+//   "instances": [
+//     [
+//       [
+//         [138, 30, 66],
+//         [130, 20, 56],
+//         ...
+//       ],
+//       [
+//         [126, 38, 61],
+//         [122, 24, 57],
+//         ...
+//       ],
+//       ...
+//     ],
+//     ...
+//   ]
+// }
+// 
+// JSON strings must be encoded as UTF-8. To send binary data, you must +// base64-encode the data and mark it as binary. To mark a JSON string +// as binary, replace it with a JSON object with a single attribute named `b64`: +//
{"b64": "..."} 
+// For example: +// +// Two Serialized tf.Examples (fake data, for illustrative purposes only): +//
+// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
+// 
+// Two JPEG image byte strings (fake data, for illustrative purposes only): +//
+// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
+// 
+// If your data includes named references, format each instance as a JSON object +// with the named references as the keys: +// +// JSON input data to be preprocessed: +//
+// {
+//   "instances": [
+//     {
+//       "a": 1.0,
+//       "b": true,
+//       "c": "x"
+//     },
+//     {
+//       "a": -2.0,
+//       "b": false,
+//       "c": "y"
+//     }
+//   ]
+// }
+// 
+// Some models have an underlying TensorFlow graph that accepts multiple input +// tensors. In this case, you should use the names of JSON name/value pairs to +// identify the input tensors, as shown in the following exmaples: +// +// For a graph with input tensor aliases "tag" (string) and "image" +// (base64-encoded string): +//
+// {
+//   "instances": [
+//     {
+//       "tag": "beach",
+//       "image": {"b64": "ASa8asdf"}
+//     },
+//     {
+//       "tag": "car",
+//       "image": {"b64": "JLK7ljk3"}
+//     }
+//   ]
+// }
+// 
+// For a graph with input tensor aliases "tag" (string) and "image" +// (3-dimensional array of 8-bit ints): +//
+// {
+//   "instances": [
+//     {
+//       "tag": "beach",
+//       "image": [
+//         [
+//           [138, 30, 66],
+//           [130, 20, 56],
+//           ...
+//         ],
+//         [
+//           [126, 38, 61],
+//           [122, 24, 57],
+//           ...
+//         ],
+//         ...
+//       ]
+//     },
+//     {
+//       "tag": "car",
+//       "image": [
+//         [
+//           [255, 0, 102],
+//           [255, 0, 97],
+//           ...
+//         ],
+//         [
+//           [254, 1, 101],
+//           [254, 2, 93],
+//           ...
+//         ],
+//         ...
+//       ]
+//     },
+//     ...
+//   ]
+// }
+// 
+// If the call is successful, the response body will contain one prediction +// entry per instance in the request body. If prediction fails for any +// instance, the response body will contain no predictions and will contian +// a single error entry instead. +type PredictRequest struct { + // Required. The resource name of a model or a version. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // + // Required. The prediction request body. + HttpBody *google_api3.HttpBody `protobuf:"bytes,2,opt,name=http_body,json=httpBody" json:"http_body,omitempty"` +} + +func (m *PredictRequest) Reset() { *m = PredictRequest{} } +func (m *PredictRequest) String() string { return proto.CompactTextString(m) } +func (*PredictRequest) ProtoMessage() {} +func (*PredictRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *PredictRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PredictRequest) GetHttpBody() *google_api3.HttpBody { + if m != nil { + return m.HttpBody + } + return nil +} + +func init() { + proto.RegisterType((*PredictRequest)(nil), "google.cloud.ml.v1.PredictRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for OnlinePredictionService service + +type OnlinePredictionServiceClient interface { + // Performs prediction on the data in the request. + // + // **** REMOVE FROM GENERATED DOCUMENTATION + Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) +} + +type onlinePredictionServiceClient struct { + cc *grpc.ClientConn +} + +func NewOnlinePredictionServiceClient(cc *grpc.ClientConn) OnlinePredictionServiceClient { + return &onlinePredictionServiceClient{cc} +} + +func (c *onlinePredictionServiceClient) Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) { + out := new(google_api3.HttpBody) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.OnlinePredictionService/Predict", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for OnlinePredictionService service + +type OnlinePredictionServiceServer interface { + // Performs prediction on the data in the request. + // + // **** REMOVE FROM GENERATED DOCUMENTATION + Predict(context.Context, *PredictRequest) (*google_api3.HttpBody, error) +} + +func RegisterOnlinePredictionServiceServer(s *grpc.Server, srv OnlinePredictionServiceServer) { + s.RegisterService(&_OnlinePredictionService_serviceDesc, srv) +} + +func _OnlinePredictionService_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PredictRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OnlinePredictionServiceServer).Predict(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.OnlinePredictionService/Predict", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OnlinePredictionServiceServer).Predict(ctx, req.(*PredictRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _OnlinePredictionService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1.OnlinePredictionService", + HandlerType: (*OnlinePredictionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Predict", + Handler: _OnlinePredictionService_Predict_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1/prediction_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1/prediction_service.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 308 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4f, 0x4b, 0xfb, 0x30, + 0x18, 0xa6, 0xe3, 0xc7, 0x4f, 0x17, 0xc1, 0x43, 0x10, 0x9d, 0x45, 0x64, 0xd4, 0xcb, 0x9c, 0x90, + 0xd0, 0xe9, 0x69, 0xe2, 0x65, 0x27, 0x6f, 0x96, 0x79, 0x10, 0xbc, 0x8c, 0xac, 0x0d, 0x59, 0x24, + 0xcd, 0x1b, 0xdb, 0xac, 0x30, 0xc4, 0x8b, 0x37, 0xcf, 0x7e, 0x34, 0xbf, 0x82, 0x1f, 0x44, 0xd2, + 0x04, 0x99, 0xd4, 0xdb, 0x4b, 0xde, 0xe7, 0x79, 0x9f, 0x3f, 0x41, 0x17, 0x02, 0x40, 0x28, 0x4e, + 0x73, 0x05, 0xeb, 0x82, 0x96, 0x8a, 0x36, 0x29, 0x35, 0x15, 0x2f, 0x64, 0x6e, 0x25, 0xe8, 0x45, + 0xcd, 0xab, 0x46, 0xe6, 0x9c, 0x98, 0x0a, 0x2c, 0x60, 0xec, 0xc1, 0xa4, 0x05, 0x93, 0x52, 0x91, + 0x26, 0x8d, 0x4f, 0xc2, 0x01, 0x66, 0x24, 0x65, 0x5a, 0x83, 0x65, 0x8e, 0x58, 0x7b, 0x46, 0x7c, + 0xbc, 0xb5, 0x5d, 0x59, 0x6b, 0x96, 0x50, 0x6c, 0xfc, 0x2a, 0x79, 0x40, 0xfb, 0x99, 0x17, 0x9a, + 0xf3, 0xe7, 0x35, 0xaf, 0x2d, 0xc6, 0xe8, 0x9f, 0x66, 0x25, 0x1f, 0x44, 0xc3, 0x68, 0xd4, 0x9f, + 0xb7, 0x33, 0x4e, 0x51, 0xdf, 0xf1, 0x16, 0x8e, 0x38, 0xe8, 0x0d, 0xa3, 0xd1, 0xde, 0xe4, 0x80, + 0x04, 0x1b, 0xcc, 0x48, 0x72, 0x6b, 0xad, 0x99, 0x41, 0xb1, 0x99, 0xef, 0xae, 0xc2, 0x34, 0x79, + 0x8f, 0xd0, 0xd1, 0x9d, 0x56, 0x52, 0xf3, 0xec, 0x27, 0xc8, 0xbd, 0xcf, 0x81, 0x35, 0xda, 0x09, + 0x8f, 0x38, 0x21, 0xdd, 0x34, 0xe4, 0xb7, 0xa3, 0xf8, 0x4f, 0xa9, 0xe4, 0xfc, 0xed, 0xf3, 0xeb, + 0xa3, 0x77, 0x96, 0x9c, 0xba, 0xb2, 0x5e, 0x9c, 0xcd, 0x1b, 0x53, 0xc1, 0x13, 0xcf, 0x6d, 0x4d, + 0xc7, 0xe3, 0xd7, 0x69, 0xe8, 0x6f, 0x1a, 0x8d, 0x67, 0x0a, 0xc5, 0x39, 0x94, 0x1d, 0x25, 0x77, + 0xae, 0x49, 0x67, 0x87, 0x1d, 0x83, 0x99, 0xab, 0x26, 0x8b, 0x1e, 0xaf, 0x02, 0x43, 0x80, 0x62, + 0x5a, 0x10, 0xa8, 0x04, 0x15, 0x5c, 0xb7, 0xc5, 0x51, 0xbf, 0x62, 0x46, 0xd6, 0xdb, 0xbf, 0x76, + 0x5d, 0xaa, 0xe5, 0xff, 0x16, 0x70, 0xf9, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x81, 0x8e, 0x25, 0xca, + 0xd5, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/project_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/project_service.pb.go new file mode 100644 index 000000000..751bce74e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1/project_service.pb.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1/project_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Requests service account information associated with a project. +type GetConfigRequest struct { + // Required. The project name. + // + // Authorization: requires `Viewer` role on the specified project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (m *GetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *GetConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Returns service account information associated with a project. +type GetConfigResponse struct { + // The service account Cloud ML uses to access resources in the project. + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // The project number for `service_account`. + ServiceAccountProject int64 `protobuf:"varint,2,opt,name=service_account_project,json=serviceAccountProject" json:"service_account_project,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (m *GetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *GetConfigResponse) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *GetConfigResponse) GetServiceAccountProject() int64 { + if m != nil { + return m.ServiceAccountProject + } + return 0 +} + +func init() { + proto.RegisterType((*GetConfigRequest)(nil), "google.cloud.ml.v1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "google.cloud.ml.v1.GetConfigResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ProjectManagementService service + +type ProjectManagementServiceClient interface { + // Get the service account information associated with your project. You need + // this information in order to grant the service account persmissions for + // the Google Cloud Storage location where you put your model training code + // for training the model with Google Cloud Machine Learning. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) +} + +type projectManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewProjectManagementServiceClient(cc *grpc.ClientConn) ProjectManagementServiceClient { + return &projectManagementServiceClient{cc} +} + +func (c *projectManagementServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1.ProjectManagementService/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ProjectManagementService service + +type ProjectManagementServiceServer interface { + // Get the service account information associated with your project. You need + // this information in order to grant the service account persmissions for + // the Google Cloud Storage location where you put your model training code + // for training the model with Google Cloud Machine Learning. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) +} + +func RegisterProjectManagementServiceServer(s *grpc.Server, srv ProjectManagementServiceServer) { + s.RegisterService(&_ProjectManagementService_serviceDesc, srv) +} + +func _ProjectManagementService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProjectManagementServiceServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1.ProjectManagementService/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProjectManagementServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ProjectManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1.ProjectManagementService", + HandlerType: (*ProjectManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetConfig", + Handler: _ProjectManagementService_GetConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1/project_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1/project_service.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 319 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xbf, 0x4a, 0x43, 0x31, + 0x14, 0xc6, 0xb9, 0x55, 0x84, 0x66, 0xf0, 0x4f, 0x44, 0x2c, 0x45, 0xb0, 0x16, 0xb5, 0xc5, 0x21, + 0xa1, 0x2a, 0x0e, 0x8a, 0x83, 0x75, 0x70, 0x12, 0x4a, 0xdd, 0x5c, 0x4a, 0xbc, 0x1e, 0x43, 0x24, + 0xc9, 0x89, 0x37, 0xe9, 0x5d, 0xc4, 0x41, 0x5f, 0xc1, 0xdd, 0x97, 0xf2, 0x15, 0x7c, 0x10, 0xe9, + 0x4d, 0x94, 0xda, 0x0e, 0x6e, 0x87, 0x73, 0x7e, 0x5f, 0xf2, 0x7d, 0xe7, 0x90, 0xae, 0x44, 0x94, + 0x1a, 0x78, 0xae, 0x71, 0x7c, 0xcf, 0x8d, 0xe6, 0x65, 0x8f, 0xbb, 0x02, 0x1f, 0x21, 0x0f, 0x23, + 0x0f, 0x45, 0xa9, 0x72, 0x60, 0xae, 0xc0, 0x80, 0x94, 0x46, 0x92, 0x55, 0x24, 0x33, 0x9a, 0x95, + 0xbd, 0xe6, 0x56, 0x52, 0x0b, 0xa7, 0xb8, 0xb0, 0x16, 0x83, 0x08, 0x0a, 0xad, 0x8f, 0x8a, 0xf6, + 0x3e, 0x59, 0xbd, 0x82, 0x70, 0x89, 0xf6, 0x41, 0xc9, 0x21, 0x3c, 0x8d, 0xc1, 0x07, 0x4a, 0xc9, + 0xa2, 0x15, 0x06, 0x1a, 0x59, 0x2b, 0xeb, 0xd6, 0x87, 0x55, 0xdd, 0x0e, 0x64, 0x6d, 0x8a, 0xf3, + 0x0e, 0xad, 0x07, 0xda, 0x21, 0x2b, 0xe9, 0xff, 0x91, 0xc8, 0x73, 0x1c, 0xdb, 0x90, 0x34, 0xcb, + 0xa9, 0x7d, 0x11, 0xbb, 0xf4, 0x84, 0x6c, 0xce, 0x80, 0xa3, 0x14, 0xa0, 0x51, 0x6b, 0x65, 0xdd, + 0x85, 0xe1, 0xc6, 0x5f, 0xc1, 0x20, 0x0e, 0x0f, 0x3f, 0x32, 0xd2, 0x48, 0xf5, 0xb5, 0xb0, 0x42, + 0x82, 0x01, 0x1b, 0x6e, 0x22, 0x4a, 0x5f, 0x33, 0x52, 0xff, 0xf5, 0x44, 0x77, 0xd9, 0x7c, 0x76, + 0x36, 0x1b, 0xad, 0xb9, 0xf7, 0x0f, 0x15, 0x83, 0xb5, 0x3b, 0x6f, 0x9f, 0x5f, 0xef, 0xb5, 0x1d, + 0xba, 0x3d, 0x59, 0xf5, 0xf3, 0x64, 0x01, 0xe7, 0xc9, 0xaf, 0xe7, 0x07, 0x2f, 0xa7, 0xf2, 0x47, + 0xd0, 0x57, 0xa4, 0x99, 0xa3, 0x99, 0x7b, 0x54, 0x38, 0xc5, 0xca, 0x5e, 0x7f, 0x3d, 0x79, 0x4f, + 0x8e, 0x07, 0x93, 0x8d, 0x0f, 0xb2, 0xdb, 0xe3, 0x84, 0x4b, 0xd4, 0xc2, 0x4a, 0x86, 0x85, 0xe4, + 0x12, 0x6c, 0x75, 0x0f, 0x1e, 0x47, 0xc2, 0x29, 0x3f, 0x7d, 0xee, 0x33, 0xa3, 0xef, 0x96, 0x2a, + 0xe0, 0xe8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xa5, 0x43, 0x33, 0x0e, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go new file mode 100644 index 000000000..df13ca5e7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go @@ -0,0 +1,1823 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1beta1/job_service.proto + +/* +Package ml is a generated protocol buffer package. + +It is generated from these files: + google/cloud/ml/v1beta1/job_service.proto + google/cloud/ml/v1beta1/model_service.proto + google/cloud/ml/v1beta1/operation_metadata.proto + google/cloud/ml/v1beta1/prediction_service.proto + google/cloud/ml/v1beta1/project_service.proto + +It has these top-level messages: + TrainingInput + HyperparameterSpec + ParameterSpec + HyperparameterOutput + TrainingOutput + PredictionInput + PredictionOutput + Job + CreateJobRequest + ListJobsRequest + ListJobsResponse + GetJobRequest + CancelJobRequest + Model + Version + ManualScaling + CreateModelRequest + ListModelsRequest + ListModelsResponse + GetModelRequest + DeleteModelRequest + CreateVersionRequest + ListVersionsRequest + ListVersionsResponse + GetVersionRequest + DeleteVersionRequest + SetDefaultVersionRequest + OperationMetadata + PredictRequest + GetConfigRequest + GetConfigResponse +*/ +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A scale tier is an abstract representation of the resources Cloud ML +// will allocate to a training job. When selecting a scale tier for your +// training job, you should consider the size of your training dataset and +// the complexity of your model. As the tiers increase, virtual machines are +// added to handle your job, and the individual machines in the cluster +// generally have more memory and greater processing power than they do at +// lower tiers. The number of training units charged per hour of processing +// increases as tiers get more advanced. Refer to the +// [pricing guide](/ml/pricing) for more details. Note that in addition to +// incurring costs, your use of training resources is constrained by the +// [quota policy](/ml/quota). +type TrainingInput_ScaleTier int32 + +const ( + // A single worker instance. This tier is suitable for learning how to use + // Cloud ML, and for experimenting with new models using small datasets. + TrainingInput_BASIC TrainingInput_ScaleTier = 0 + // Many workers and a few parameter servers. + TrainingInput_STANDARD_1 TrainingInput_ScaleTier = 1 + // A large number of workers with many parameter servers. + TrainingInput_PREMIUM_1 TrainingInput_ScaleTier = 3 + // A single worker instance [with a GPU](ml/docs/how-tos/using-gpus). + TrainingInput_BASIC_GPU TrainingInput_ScaleTier = 6 + // The CUSTOM tier is not a set tier, but rather enables you to use your + // own cluster specification. When you use this tier, set values to + // configure your processing cluster according to these guidelines: + // + // * You _must_ set `TrainingInput.masterType` to specify the type + // of machine to use for your master node. This is the only required + // setting. + // + // * You _may_ set `TrainingInput.workerCount` to specify the number of + // workers to use. If you specify one or more workers, you _must_ also + // set `TrainingInput.workerType` to specify the type of machine to use + // for your worker nodes. + // + // * You _may_ set `TrainingInput.parameterServerCount` to specify the + // number of parameter servers to use. If you specify one or more + // parameter servers, you _must_ also set + // `TrainingInput.parameterServerType` to specify the type of machine to + // use for your parameter servers. + // + // Note that all of your workers must use the same machine type, which can + // be different from your parameter server type and master type. Your + // parameter servers must likewise use the same machine type, which can be + // different from your worker type and master type. + TrainingInput_CUSTOM TrainingInput_ScaleTier = 5 +) + +var TrainingInput_ScaleTier_name = map[int32]string{ + 0: "BASIC", + 1: "STANDARD_1", + 3: "PREMIUM_1", + 6: "BASIC_GPU", + 5: "CUSTOM", +} +var TrainingInput_ScaleTier_value = map[string]int32{ + "BASIC": 0, + "STANDARD_1": 1, + "PREMIUM_1": 3, + "BASIC_GPU": 6, + "CUSTOM": 5, +} + +func (x TrainingInput_ScaleTier) String() string { + return proto.EnumName(TrainingInput_ScaleTier_name, int32(x)) +} +func (TrainingInput_ScaleTier) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// The available types of optimization goals. +type HyperparameterSpec_GoalType int32 + +const ( + // Goal Type will default to maximize. + HyperparameterSpec_GOAL_TYPE_UNSPECIFIED HyperparameterSpec_GoalType = 0 + // Maximize the goal metric. + HyperparameterSpec_MAXIMIZE HyperparameterSpec_GoalType = 1 + // Minimize the goal metric. + HyperparameterSpec_MINIMIZE HyperparameterSpec_GoalType = 2 +) + +var HyperparameterSpec_GoalType_name = map[int32]string{ + 0: "GOAL_TYPE_UNSPECIFIED", + 1: "MAXIMIZE", + 2: "MINIMIZE", +} +var HyperparameterSpec_GoalType_value = map[string]int32{ + "GOAL_TYPE_UNSPECIFIED": 0, + "MAXIMIZE": 1, + "MINIMIZE": 2, +} + +func (x HyperparameterSpec_GoalType) String() string { + return proto.EnumName(HyperparameterSpec_GoalType_name, int32(x)) +} +func (HyperparameterSpec_GoalType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +// The type of the parameter. +type ParameterSpec_ParameterType int32 + +const ( + // You must specify a valid type. Using this unspecified type will result in + // an error. + ParameterSpec_PARAMETER_TYPE_UNSPECIFIED ParameterSpec_ParameterType = 0 + // Type for real-valued parameters. + ParameterSpec_DOUBLE ParameterSpec_ParameterType = 1 + // Type for integral parameters. + ParameterSpec_INTEGER ParameterSpec_ParameterType = 2 + // The parameter is categorical, with a value chosen from the categories + // field. + ParameterSpec_CATEGORICAL ParameterSpec_ParameterType = 3 + // The parameter is real valued, with a fixed set of feasible points. If + // `type==DISCRETE`, feasible_points must be provided, and + // {`min_value`, `max_value`} will be ignored. + ParameterSpec_DISCRETE ParameterSpec_ParameterType = 4 +) + +var ParameterSpec_ParameterType_name = map[int32]string{ + 0: "PARAMETER_TYPE_UNSPECIFIED", + 1: "DOUBLE", + 2: "INTEGER", + 3: "CATEGORICAL", + 4: "DISCRETE", +} +var ParameterSpec_ParameterType_value = map[string]int32{ + "PARAMETER_TYPE_UNSPECIFIED": 0, + "DOUBLE": 1, + "INTEGER": 2, + "CATEGORICAL": 3, + "DISCRETE": 4, +} + +func (x ParameterSpec_ParameterType) String() string { + return proto.EnumName(ParameterSpec_ParameterType_name, int32(x)) +} +func (ParameterSpec_ParameterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +// The type of scaling that should be applied to this parameter. +type ParameterSpec_ScaleType int32 + +const ( + // By default, no scaling is applied. + ParameterSpec_NONE ParameterSpec_ScaleType = 0 + // Scales the feasible space to (0, 1) linearly. + ParameterSpec_UNIT_LINEAR_SCALE ParameterSpec_ScaleType = 1 + // Scales the feasible space logarithmically to (0, 1). The entire feasible + // space must be strictly positive. + ParameterSpec_UNIT_LOG_SCALE ParameterSpec_ScaleType = 2 + // Scales the feasible space "reverse" logarithmically to (0, 1). The result + // is that values close to the top of the feasible space are spread out more + // than points near the bottom. The entire feasible space must be strictly + // positive. + ParameterSpec_UNIT_REVERSE_LOG_SCALE ParameterSpec_ScaleType = 3 +) + +var ParameterSpec_ScaleType_name = map[int32]string{ + 0: "NONE", + 1: "UNIT_LINEAR_SCALE", + 2: "UNIT_LOG_SCALE", + 3: "UNIT_REVERSE_LOG_SCALE", +} +var ParameterSpec_ScaleType_value = map[string]int32{ + "NONE": 0, + "UNIT_LINEAR_SCALE": 1, + "UNIT_LOG_SCALE": 2, + "UNIT_REVERSE_LOG_SCALE": 3, +} + +func (x ParameterSpec_ScaleType) String() string { + return proto.EnumName(ParameterSpec_ScaleType_name, int32(x)) +} +func (ParameterSpec_ScaleType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +// The format used to separate data instances in the source files. +type PredictionInput_DataFormat int32 + +const ( + // Unspecified format. + PredictionInput_DATA_FORMAT_UNSPECIFIED PredictionInput_DataFormat = 0 + // The source file is a text file with instances separated by the + // new-line character. + PredictionInput_TEXT PredictionInput_DataFormat = 1 + // The source file is a TFRecord file. + PredictionInput_TF_RECORD PredictionInput_DataFormat = 2 + // The source file is a GZIP-compressed TFRecord file. + PredictionInput_TF_RECORD_GZIP PredictionInput_DataFormat = 3 +) + +var PredictionInput_DataFormat_name = map[int32]string{ + 0: "DATA_FORMAT_UNSPECIFIED", + 1: "TEXT", + 2: "TF_RECORD", + 3: "TF_RECORD_GZIP", +} +var PredictionInput_DataFormat_value = map[string]int32{ + "DATA_FORMAT_UNSPECIFIED": 0, + "TEXT": 1, + "TF_RECORD": 2, + "TF_RECORD_GZIP": 3, +} + +func (x PredictionInput_DataFormat) String() string { + return proto.EnumName(PredictionInput_DataFormat_name, int32(x)) +} +func (PredictionInput_DataFormat) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + +// Describes the job state. +type Job_State int32 + +const ( + // The job state is unspecified. + Job_STATE_UNSPECIFIED Job_State = 0 + // The job has been just created and processing has not yet begun. + Job_QUEUED Job_State = 1 + // The service is preparing to run the job. + Job_PREPARING Job_State = 2 + // The job is in progress. + Job_RUNNING Job_State = 3 + // The job completed successfully. + Job_SUCCEEDED Job_State = 4 + // The job failed. + // `error_message` should contain the details of the failure. + Job_FAILED Job_State = 5 + // The job is being cancelled. + // `error_message` should describe the reason for the cancellation. + Job_CANCELLING Job_State = 6 + // The job has been cancelled. + // `error_message` should describe the reason for the cancellation. + Job_CANCELLED Job_State = 7 +) + +var Job_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "QUEUED", + 2: "PREPARING", + 3: "RUNNING", + 4: "SUCCEEDED", + 5: "FAILED", + 6: "CANCELLING", + 7: "CANCELLED", +} +var Job_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "QUEUED": 1, + "PREPARING": 2, + "RUNNING": 3, + "SUCCEEDED": 4, + "FAILED": 5, + "CANCELLING": 6, + "CANCELLED": 7, +} + +func (x Job_State) String() string { + return proto.EnumName(Job_State_name, int32(x)) +} +func (Job_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Represents input parameters for a training job. +type TrainingInput struct { + // Required. Specifies the machine types, the number of replicas for workers + // and parameter servers. + ScaleTier TrainingInput_ScaleTier `protobuf:"varint,1,opt,name=scale_tier,json=scaleTier,enum=google.cloud.ml.v1beta1.TrainingInput_ScaleTier" json:"scale_tier,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's master worker. + // + // The following types are supported: + // + //
+ //
standard
+ //
+ // A basic machine configuration suitable for training simple models with + // small to moderate datasets. + //
+ //
large_model
+ //
+ // A machine with a lot of memory, specially suited for parameter servers + // when your model is large (having many hidden layers or layers with very + // large numbers of nodes). + //
+ //
complex_model_s
+ //
+ // A machine suitable for the master and workers of the cluster when your + // model requires more computation than the standard machine can handle + // satisfactorily. + //
+ //
complex_model_m
+ //
+ // A machine with roughly twice the number of cores and roughly double the + // memory of complex_model_s. + //
+ //
complex_model_l
+ //
+ // A machine with roughly twice the number of cores and roughly double the + // memory of complex_model_m. + //
+ //
standard_gpu
+ //
+ // A machine equivalent to standard that + // also includes a + // + // GPU that you can use in your trainer. + //
+ //
complex_model_m_gpu
+ //
+ // A machine equivalent to + // coplex_model_m that also includes + // four GPUs. + //
+ //
+ // + // You must set this value when `scaleTier` is set to `CUSTOM`. + MasterType string `protobuf:"bytes,2,opt,name=master_type,json=masterType" json:"master_type,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's worker nodes. + // + // The supported values are the same as those described in the entry for + // `masterType`. + // + // This value must be present when `scaleTier` is set to `CUSTOM` and + // `workerCount` is greater than zero. + WorkerType string `protobuf:"bytes,3,opt,name=worker_type,json=workerType" json:"worker_type,omitempty"` + // Optional. Specifies the type of virtual machine to use for your training + // job's parameter server. + // + // The supported values are the same as those described in the entry for + // `master_type`. + // + // This value must be present when `scaleTier` is set to `CUSTOM` and + // `parameter_server_count` is greater than zero. + ParameterServerType string `protobuf:"bytes,4,opt,name=parameter_server_type,json=parameterServerType" json:"parameter_server_type,omitempty"` + // Optional. The number of worker replicas to use for the training job. Each + // replica in the cluster will be of the type specified in `worker_type`. + // + // This value can only be used when `scale_tier` is set to `CUSTOM`. If you + // set this value, you must also set `worker_type`. + WorkerCount int64 `protobuf:"varint,5,opt,name=worker_count,json=workerCount" json:"worker_count,omitempty"` + // Optional. The number of parameter server replicas to use for the training + // job. Each replica in the cluster will be of the type specified in + // `parameter_server_type`. + // + // This value can only be used when `scale_tier` is set to `CUSTOM`.If you + // set this value, you must also set `parameter_server_type`. + ParameterServerCount int64 `protobuf:"varint,6,opt,name=parameter_server_count,json=parameterServerCount" json:"parameter_server_count,omitempty"` + // Required. The Google Cloud Storage location of the packages with + // the training program and any additional dependencies. + PackageUris []string `protobuf:"bytes,7,rep,name=package_uris,json=packageUris" json:"package_uris,omitempty"` + // Required. The Python module name to run after installing the packages. + PythonModule string `protobuf:"bytes,8,opt,name=python_module,json=pythonModule" json:"python_module,omitempty"` + // Optional. Command line arguments to pass to the program. + Args []string `protobuf:"bytes,10,rep,name=args" json:"args,omitempty"` + // Optional. The set of Hyperparameters to tune. + Hyperparameters *HyperparameterSpec `protobuf:"bytes,12,opt,name=hyperparameters" json:"hyperparameters,omitempty"` + // Required. The Google Compute Engine region to run the training job in. + Region string `protobuf:"bytes,14,opt,name=region" json:"region,omitempty"` + // Optional. A Google Cloud Storage path in which to store training outputs + // and other data needed for training. This path is passed to your TensorFlow + // program as the 'job_dir' command-line argument. The benefit of specifying + // this field is that Cloud ML validates the path for use in training. + JobDir string `protobuf:"bytes,16,opt,name=job_dir,json=jobDir" json:"job_dir,omitempty"` + // Optional. The Google Cloud ML runtime version to use for training. If not + // set, Google Cloud ML will choose the latest stable version. + RuntimeVersion string `protobuf:"bytes,15,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` +} + +func (m *TrainingInput) Reset() { *m = TrainingInput{} } +func (m *TrainingInput) String() string { return proto.CompactTextString(m) } +func (*TrainingInput) ProtoMessage() {} +func (*TrainingInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TrainingInput) GetScaleTier() TrainingInput_ScaleTier { + if m != nil { + return m.ScaleTier + } + return TrainingInput_BASIC +} + +func (m *TrainingInput) GetMasterType() string { + if m != nil { + return m.MasterType + } + return "" +} + +func (m *TrainingInput) GetWorkerType() string { + if m != nil { + return m.WorkerType + } + return "" +} + +func (m *TrainingInput) GetParameterServerType() string { + if m != nil { + return m.ParameterServerType + } + return "" +} + +func (m *TrainingInput) GetWorkerCount() int64 { + if m != nil { + return m.WorkerCount + } + return 0 +} + +func (m *TrainingInput) GetParameterServerCount() int64 { + if m != nil { + return m.ParameterServerCount + } + return 0 +} + +func (m *TrainingInput) GetPackageUris() []string { + if m != nil { + return m.PackageUris + } + return nil +} + +func (m *TrainingInput) GetPythonModule() string { + if m != nil { + return m.PythonModule + } + return "" +} + +func (m *TrainingInput) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *TrainingInput) GetHyperparameters() *HyperparameterSpec { + if m != nil { + return m.Hyperparameters + } + return nil +} + +func (m *TrainingInput) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *TrainingInput) GetJobDir() string { + if m != nil { + return m.JobDir + } + return "" +} + +func (m *TrainingInput) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +// Represents a set of hyperparameters to optimize. +type HyperparameterSpec struct { + // Required. The type of goal to use for tuning. Available types are + // `MAXIMIZE` and `MINIMIZE`. + // + // Defaults to `MAXIMIZE`. + Goal HyperparameterSpec_GoalType `protobuf:"varint,1,opt,name=goal,enum=google.cloud.ml.v1beta1.HyperparameterSpec_GoalType" json:"goal,omitempty"` + // Required. The set of parameters to tune. + Params []*ParameterSpec `protobuf:"bytes,2,rep,name=params" json:"params,omitempty"` + // Optional. How many training trials should be attempted to optimize + // the specified hyperparameters. + // + // Defaults to one. + MaxTrials int32 `protobuf:"varint,3,opt,name=max_trials,json=maxTrials" json:"max_trials,omitempty"` + // Optional. The number of training trials to run concurrently. + // You can reduce the time it takes to perform hyperparameter tuning by adding + // trials in parallel. However, each trail only benefits from the information + // gained in completed trials. That means that a trial does not get access to + // the results of trials running at the same time, which could reduce the + // quality of the overall optimization. + // + // Each trial will use the same scale tier and machine types. + // + // Defaults to one. + MaxParallelTrials int32 `protobuf:"varint,4,opt,name=max_parallel_trials,json=maxParallelTrials" json:"max_parallel_trials,omitempty"` + // Optional. The Tensorflow summary tag name to use for optimizing trials. For + // current versions of Tensorflow, this tag name should exactly match what is + // shown in Tensorboard, including all scopes. For versions of Tensorflow + // prior to 0.12, this should be only the tag passed to tf.Summary. + // By default, "training/hptuning/metric" will be used. + HyperparameterMetricTag string `protobuf:"bytes,5,opt,name=hyperparameter_metric_tag,json=hyperparameterMetricTag" json:"hyperparameter_metric_tag,omitempty"` +} + +func (m *HyperparameterSpec) Reset() { *m = HyperparameterSpec{} } +func (m *HyperparameterSpec) String() string { return proto.CompactTextString(m) } +func (*HyperparameterSpec) ProtoMessage() {} +func (*HyperparameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HyperparameterSpec) GetGoal() HyperparameterSpec_GoalType { + if m != nil { + return m.Goal + } + return HyperparameterSpec_GOAL_TYPE_UNSPECIFIED +} + +func (m *HyperparameterSpec) GetParams() []*ParameterSpec { + if m != nil { + return m.Params + } + return nil +} + +func (m *HyperparameterSpec) GetMaxTrials() int32 { + if m != nil { + return m.MaxTrials + } + return 0 +} + +func (m *HyperparameterSpec) GetMaxParallelTrials() int32 { + if m != nil { + return m.MaxParallelTrials + } + return 0 +} + +func (m *HyperparameterSpec) GetHyperparameterMetricTag() string { + if m != nil { + return m.HyperparameterMetricTag + } + return "" +} + +// Represents a single hyperparameter to optimize. +type ParameterSpec struct { + // Required. The parameter name must be unique amongst all ParameterConfigs in + // a HyperparameterSpec message. E.g., "learning_rate". + ParameterName string `protobuf:"bytes,1,opt,name=parameter_name,json=parameterName" json:"parameter_name,omitempty"` + // Required. The type of the parameter. + Type ParameterSpec_ParameterType `protobuf:"varint,4,opt,name=type,enum=google.cloud.ml.v1beta1.ParameterSpec_ParameterType" json:"type,omitempty"` + // Required if type is `DOUBLE` or `INTEGER`. This field + // should be unset if type is `CATEGORICAL`. This value should be integers if + // type is INTEGER. + MinValue float64 `protobuf:"fixed64,2,opt,name=min_value,json=minValue" json:"min_value,omitempty"` + // Required if typeis `DOUBLE` or `INTEGER`. This field + // should be unset if type is `CATEGORICAL`. This value should be integers if + // type is `INTEGER`. + MaxValue float64 `protobuf:"fixed64,3,opt,name=max_value,json=maxValue" json:"max_value,omitempty"` + // Required if type is `CATEGORICAL`. The list of possible categories. + CategoricalValues []string `protobuf:"bytes,5,rep,name=categorical_values,json=categoricalValues" json:"categorical_values,omitempty"` + // Required if type is `DISCRETE`. + // A list of feasible points. + // The list should be in strictly increasing order. For instance, this + // parameter might have possible settings of 1.5, 2.5, and 4.0. This list + // should not contain more than 1,000 values. + DiscreteValues []float64 `protobuf:"fixed64,6,rep,packed,name=discrete_values,json=discreteValues" json:"discrete_values,omitempty"` + // Optional. How the parameter should be scaled to the hypercube. + // Leave unset for categorical parameters. + // Some kind of scaling is strongly recommended for real or integral + // parameters (e.g., `UNIT_LINEAR_SCALE`). + ScaleType ParameterSpec_ScaleType `protobuf:"varint,7,opt,name=scale_type,json=scaleType,enum=google.cloud.ml.v1beta1.ParameterSpec_ScaleType" json:"scale_type,omitempty"` +} + +func (m *ParameterSpec) Reset() { *m = ParameterSpec{} } +func (m *ParameterSpec) String() string { return proto.CompactTextString(m) } +func (*ParameterSpec) ProtoMessage() {} +func (*ParameterSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ParameterSpec) GetParameterName() string { + if m != nil { + return m.ParameterName + } + return "" +} + +func (m *ParameterSpec) GetType() ParameterSpec_ParameterType { + if m != nil { + return m.Type + } + return ParameterSpec_PARAMETER_TYPE_UNSPECIFIED +} + +func (m *ParameterSpec) GetMinValue() float64 { + if m != nil { + return m.MinValue + } + return 0 +} + +func (m *ParameterSpec) GetMaxValue() float64 { + if m != nil { + return m.MaxValue + } + return 0 +} + +func (m *ParameterSpec) GetCategoricalValues() []string { + if m != nil { + return m.CategoricalValues + } + return nil +} + +func (m *ParameterSpec) GetDiscreteValues() []float64 { + if m != nil { + return m.DiscreteValues + } + return nil +} + +func (m *ParameterSpec) GetScaleType() ParameterSpec_ScaleType { + if m != nil { + return m.ScaleType + } + return ParameterSpec_NONE +} + +// Represents the result of a single hyperparameter tuning trial from a +// training job. The TrainingOutput object that is returned on successful +// completion of a training job with hyperparameter tuning includes a list +// of HyperparameterOutput objects, one for each successful trial. +type HyperparameterOutput struct { + // The trial id for these results. + TrialId string `protobuf:"bytes,1,opt,name=trial_id,json=trialId" json:"trial_id,omitempty"` + // The hyperparameters given to this trial. + Hyperparameters map[string]string `protobuf:"bytes,2,rep,name=hyperparameters" json:"hyperparameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The final objective metric seen for this trial. + FinalMetric *HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,3,opt,name=final_metric,json=finalMetric" json:"final_metric,omitempty"` + // All recorded object metrics for this trial. + AllMetrics []*HyperparameterOutput_HyperparameterMetric `protobuf:"bytes,4,rep,name=all_metrics,json=allMetrics" json:"all_metrics,omitempty"` +} + +func (m *HyperparameterOutput) Reset() { *m = HyperparameterOutput{} } +func (m *HyperparameterOutput) String() string { return proto.CompactTextString(m) } +func (*HyperparameterOutput) ProtoMessage() {} +func (*HyperparameterOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *HyperparameterOutput) GetTrialId() string { + if m != nil { + return m.TrialId + } + return "" +} + +func (m *HyperparameterOutput) GetHyperparameters() map[string]string { + if m != nil { + return m.Hyperparameters + } + return nil +} + +func (m *HyperparameterOutput) GetFinalMetric() *HyperparameterOutput_HyperparameterMetric { + if m != nil { + return m.FinalMetric + } + return nil +} + +func (m *HyperparameterOutput) GetAllMetrics() []*HyperparameterOutput_HyperparameterMetric { + if m != nil { + return m.AllMetrics + } + return nil +} + +// An observed value of a metric. +type HyperparameterOutput_HyperparameterMetric struct { + // The global training step for this metric. + TrainingStep int64 `protobuf:"varint,1,opt,name=training_step,json=trainingStep" json:"training_step,omitempty"` + // The objective value at this training step. + ObjectiveValue float64 `protobuf:"fixed64,2,opt,name=objective_value,json=objectiveValue" json:"objective_value,omitempty"` +} + +func (m *HyperparameterOutput_HyperparameterMetric) Reset() { + *m = HyperparameterOutput_HyperparameterMetric{} +} +func (m *HyperparameterOutput_HyperparameterMetric) String() string { return proto.CompactTextString(m) } +func (*HyperparameterOutput_HyperparameterMetric) ProtoMessage() {} +func (*HyperparameterOutput_HyperparameterMetric) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *HyperparameterOutput_HyperparameterMetric) GetTrainingStep() int64 { + if m != nil { + return m.TrainingStep + } + return 0 +} + +func (m *HyperparameterOutput_HyperparameterMetric) GetObjectiveValue() float64 { + if m != nil { + return m.ObjectiveValue + } + return 0 +} + +// Represents results of a training job. Output only. +type TrainingOutput struct { + // The number of hyperparameter tuning trials that completed successfully. + // Only set for hyperparameter tuning jobs. + CompletedTrialCount int64 `protobuf:"varint,1,opt,name=completed_trial_count,json=completedTrialCount" json:"completed_trial_count,omitempty"` + // Results for individual Hyperparameter trials. + // Only set for hyperparameter tuning jobs. + Trials []*HyperparameterOutput `protobuf:"bytes,2,rep,name=trials" json:"trials,omitempty"` + // The amount of ML units consumed by the job. + ConsumedMlUnits float64 `protobuf:"fixed64,3,opt,name=consumed_ml_units,json=consumedMlUnits" json:"consumed_ml_units,omitempty"` + // Whether this job is a hyperparameter tuning job. + IsHyperparameterTuningJob bool `protobuf:"varint,4,opt,name=is_hyperparameter_tuning_job,json=isHyperparameterTuningJob" json:"is_hyperparameter_tuning_job,omitempty"` +} + +func (m *TrainingOutput) Reset() { *m = TrainingOutput{} } +func (m *TrainingOutput) String() string { return proto.CompactTextString(m) } +func (*TrainingOutput) ProtoMessage() {} +func (*TrainingOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *TrainingOutput) GetCompletedTrialCount() int64 { + if m != nil { + return m.CompletedTrialCount + } + return 0 +} + +func (m *TrainingOutput) GetTrials() []*HyperparameterOutput { + if m != nil { + return m.Trials + } + return nil +} + +func (m *TrainingOutput) GetConsumedMlUnits() float64 { + if m != nil { + return m.ConsumedMlUnits + } + return 0 +} + +func (m *TrainingOutput) GetIsHyperparameterTuningJob() bool { + if m != nil { + return m.IsHyperparameterTuningJob + } + return false +} + +// Represents input parameters for a prediction job. +type PredictionInput struct { + // Required. The model or the version to use for prediction. + // + // Types that are valid to be assigned to ModelVersion: + // *PredictionInput_ModelName + // *PredictionInput_VersionName + // *PredictionInput_Uri + ModelVersion isPredictionInput_ModelVersion `protobuf_oneof:"model_version"` + // Required. The format of the input data files. + DataFormat PredictionInput_DataFormat `protobuf:"varint,3,opt,name=data_format,json=dataFormat,enum=google.cloud.ml.v1beta1.PredictionInput_DataFormat" json:"data_format,omitempty"` + // Required. The Google Cloud Storage location of the input data files. + // May contain wildcards. + InputPaths []string `protobuf:"bytes,4,rep,name=input_paths,json=inputPaths" json:"input_paths,omitempty"` + // Required. The output Google Cloud Storage location. + OutputPath string `protobuf:"bytes,5,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` + // Optional. The maximum number of workers to be used for parallel processing. + // Defaults to 10 if not specified. + MaxWorkerCount int64 `protobuf:"varint,6,opt,name=max_worker_count,json=maxWorkerCount" json:"max_worker_count,omitempty"` + // Required. The Google Compute Engine region to run the prediction job in. + Region string `protobuf:"bytes,7,opt,name=region" json:"region,omitempty"` + // Optional. The Google Cloud ML runtime version to use for this batch + // prediction. If not set, Google Cloud ML will pick the runtime version used + // during the CreateVersion request for this model version, or choose the + // latest stable version when model version information is not available + // such as when the model is specified by uri. + RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` +} + +func (m *PredictionInput) Reset() { *m = PredictionInput{} } +func (m *PredictionInput) String() string { return proto.CompactTextString(m) } +func (*PredictionInput) ProtoMessage() {} +func (*PredictionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isPredictionInput_ModelVersion interface { + isPredictionInput_ModelVersion() +} + +type PredictionInput_ModelName struct { + ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,oneof"` +} +type PredictionInput_VersionName struct { + VersionName string `protobuf:"bytes,2,opt,name=version_name,json=versionName,oneof"` +} +type PredictionInput_Uri struct { + Uri string `protobuf:"bytes,9,opt,name=uri,oneof"` +} + +func (*PredictionInput_ModelName) isPredictionInput_ModelVersion() {} +func (*PredictionInput_VersionName) isPredictionInput_ModelVersion() {} +func (*PredictionInput_Uri) isPredictionInput_ModelVersion() {} + +func (m *PredictionInput) GetModelVersion() isPredictionInput_ModelVersion { + if m != nil { + return m.ModelVersion + } + return nil +} + +func (m *PredictionInput) GetModelName() string { + if x, ok := m.GetModelVersion().(*PredictionInput_ModelName); ok { + return x.ModelName + } + return "" +} + +func (m *PredictionInput) GetVersionName() string { + if x, ok := m.GetModelVersion().(*PredictionInput_VersionName); ok { + return x.VersionName + } + return "" +} + +func (m *PredictionInput) GetUri() string { + if x, ok := m.GetModelVersion().(*PredictionInput_Uri); ok { + return x.Uri + } + return "" +} + +func (m *PredictionInput) GetDataFormat() PredictionInput_DataFormat { + if m != nil { + return m.DataFormat + } + return PredictionInput_DATA_FORMAT_UNSPECIFIED +} + +func (m *PredictionInput) GetInputPaths() []string { + if m != nil { + return m.InputPaths + } + return nil +} + +func (m *PredictionInput) GetOutputPath() string { + if m != nil { + return m.OutputPath + } + return "" +} + +func (m *PredictionInput) GetMaxWorkerCount() int64 { + if m != nil { + return m.MaxWorkerCount + } + return 0 +} + +func (m *PredictionInput) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *PredictionInput) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PredictionInput) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PredictionInput_OneofMarshaler, _PredictionInput_OneofUnmarshaler, _PredictionInput_OneofSizer, []interface{}{ + (*PredictionInput_ModelName)(nil), + (*PredictionInput_VersionName)(nil), + (*PredictionInput_Uri)(nil), + } +} + +func _PredictionInput_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PredictionInput) + // model_version + switch x := m.ModelVersion.(type) { + case *PredictionInput_ModelName: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ModelName) + case *PredictionInput_VersionName: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.VersionName) + case *PredictionInput_Uri: + b.EncodeVarint(9<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("PredictionInput.ModelVersion has unexpected type %T", x) + } + return nil +} + +func _PredictionInput_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PredictionInput) + switch tag { + case 1: // model_version.model_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_ModelName{x} + return true, err + case 2: // model_version.version_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_VersionName{x} + return true, err + case 9: // model_version.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ModelVersion = &PredictionInput_Uri{x} + return true, err + default: + return false, nil + } +} + +func _PredictionInput_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PredictionInput) + // model_version + switch x := m.ModelVersion.(type) { + case *PredictionInput_ModelName: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ModelName))) + n += len(x.ModelName) + case *PredictionInput_VersionName: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.VersionName))) + n += len(x.VersionName) + case *PredictionInput_Uri: + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents results of a prediction job. +type PredictionOutput struct { + // The output Google Cloud Storage location provided at the job creation time. + OutputPath string `protobuf:"bytes,1,opt,name=output_path,json=outputPath" json:"output_path,omitempty"` + // The number of generated predictions. + PredictionCount int64 `protobuf:"varint,2,opt,name=prediction_count,json=predictionCount" json:"prediction_count,omitempty"` + // The number of data instances which resulted in errors. + ErrorCount int64 `protobuf:"varint,3,opt,name=error_count,json=errorCount" json:"error_count,omitempty"` + // Node hours used by the batch prediction job. + NodeHours float64 `protobuf:"fixed64,4,opt,name=node_hours,json=nodeHours" json:"node_hours,omitempty"` +} + +func (m *PredictionOutput) Reset() { *m = PredictionOutput{} } +func (m *PredictionOutput) String() string { return proto.CompactTextString(m) } +func (*PredictionOutput) ProtoMessage() {} +func (*PredictionOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PredictionOutput) GetOutputPath() string { + if m != nil { + return m.OutputPath + } + return "" +} + +func (m *PredictionOutput) GetPredictionCount() int64 { + if m != nil { + return m.PredictionCount + } + return 0 +} + +func (m *PredictionOutput) GetErrorCount() int64 { + if m != nil { + return m.ErrorCount + } + return 0 +} + +func (m *PredictionOutput) GetNodeHours() float64 { + if m != nil { + return m.NodeHours + } + return 0 +} + +// Represents a training or prediction job. +type Job struct { + // Required. The user-specified id of the job. + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId" json:"job_id,omitempty"` + // Required. Parameters to create a job. + // + // Types that are valid to be assigned to Input: + // *Job_TrainingInput + // *Job_PredictionInput + Input isJob_Input `protobuf_oneof:"input"` + // Output only. When the job was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. When the job processing was started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Output only. When the job processing was completed. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Output only. The detailed state of a job. + State Job_State `protobuf:"varint,7,opt,name=state,enum=google.cloud.ml.v1beta1.Job_State" json:"state,omitempty"` + // Output only. The details of a failure or a cancellation. + ErrorMessage string `protobuf:"bytes,8,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` + // Output only. The current result of the job. + // + // Types that are valid to be assigned to Output: + // *Job_TrainingOutput + // *Job_PredictionOutput + Output isJob_Output `protobuf_oneof:"output"` +} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isJob_Input interface { + isJob_Input() +} +type isJob_Output interface { + isJob_Output() +} + +type Job_TrainingInput struct { + TrainingInput *TrainingInput `protobuf:"bytes,2,opt,name=training_input,json=trainingInput,oneof"` +} +type Job_PredictionInput struct { + PredictionInput *PredictionInput `protobuf:"bytes,3,opt,name=prediction_input,json=predictionInput,oneof"` +} +type Job_TrainingOutput struct { + TrainingOutput *TrainingOutput `protobuf:"bytes,9,opt,name=training_output,json=trainingOutput,oneof"` +} +type Job_PredictionOutput struct { + PredictionOutput *PredictionOutput `protobuf:"bytes,10,opt,name=prediction_output,json=predictionOutput,oneof"` +} + +func (*Job_TrainingInput) isJob_Input() {} +func (*Job_PredictionInput) isJob_Input() {} +func (*Job_TrainingOutput) isJob_Output() {} +func (*Job_PredictionOutput) isJob_Output() {} + +func (m *Job) GetInput() isJob_Input { + if m != nil { + return m.Input + } + return nil +} +func (m *Job) GetOutput() isJob_Output { + if m != nil { + return m.Output + } + return nil +} + +func (m *Job) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *Job) GetTrainingInput() *TrainingInput { + if x, ok := m.GetInput().(*Job_TrainingInput); ok { + return x.TrainingInput + } + return nil +} + +func (m *Job) GetPredictionInput() *PredictionInput { + if x, ok := m.GetInput().(*Job_PredictionInput); ok { + return x.PredictionInput + } + return nil +} + +func (m *Job) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Job) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Job) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Job) GetState() Job_State { + if m != nil { + return m.State + } + return Job_STATE_UNSPECIFIED +} + +func (m *Job) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func (m *Job) GetTrainingOutput() *TrainingOutput { + if x, ok := m.GetOutput().(*Job_TrainingOutput); ok { + return x.TrainingOutput + } + return nil +} + +func (m *Job) GetPredictionOutput() *PredictionOutput { + if x, ok := m.GetOutput().(*Job_PredictionOutput); ok { + return x.PredictionOutput + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ + (*Job_TrainingInput)(nil), + (*Job_PredictionInput)(nil), + (*Job_TrainingOutput)(nil), + (*Job_PredictionOutput)(nil), + } +} + +func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Job) + // input + switch x := m.Input.(type) { + case *Job_TrainingInput: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrainingInput); err != nil { + return err + } + case *Job_PredictionInput: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PredictionInput); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.Input has unexpected type %T", x) + } + // output + switch x := m.Output.(type) { + case *Job_TrainingOutput: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrainingOutput); err != nil { + return err + } + case *Job_PredictionOutput: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PredictionOutput); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.Output has unexpected type %T", x) + } + return nil +} + +func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Job) + switch tag { + case 2: // input.training_input + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrainingInput) + err := b.DecodeMessage(msg) + m.Input = &Job_TrainingInput{msg} + return true, err + case 3: // input.prediction_input + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PredictionInput) + err := b.DecodeMessage(msg) + m.Input = &Job_PredictionInput{msg} + return true, err + case 9: // output.training_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrainingOutput) + err := b.DecodeMessage(msg) + m.Output = &Job_TrainingOutput{msg} + return true, err + case 10: // output.prediction_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PredictionOutput) + err := b.DecodeMessage(msg) + m.Output = &Job_PredictionOutput{msg} + return true, err + default: + return false, nil + } +} + +func _Job_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Job) + // input + switch x := m.Input.(type) { + case *Job_TrainingInput: + s := proto.Size(x.TrainingInput) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PredictionInput: + s := proto.Size(x.PredictionInput) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // output + switch x := m.Output.(type) { + case *Job_TrainingOutput: + s := proto.Size(x.TrainingOutput) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PredictionOutput: + s := proto.Size(x.PredictionOutput) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for the CreateJob method. +type CreateJobRequest struct { + // Required. The project name. + // + // Authorization: requires `Editor` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The job to create. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *CreateJobRequest) Reset() { *m = CreateJobRequest{} } +func (m *CreateJobRequest) String() string { return proto.CompactTextString(m) } +func (*CreateJobRequest) ProtoMessage() {} +func (*CreateJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CreateJobRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// Request message for the ListJobs method. +type ListJobsRequest struct { + // Required. The name of the project for which to list jobs. + // + // Authorization: requires `Viewer` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. Specifies the subset of jobs to retrieve. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of jobs to retrieve per "page" of results. If there + // are more remaining results than this number, the response message will + // contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } +func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobsRequest) ProtoMessage() {} +func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ListJobsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListJobs method. +type ListJobsResponse struct { + // The list of jobs. + Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } +func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobsResponse) ProtoMessage() {} +func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ListJobsResponse) GetJobs() []*Job { + if m != nil { + return m.Jobs + } + return nil +} + +func (m *ListJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetJob method. +type GetJobRequest struct { + // Required. The name of the job to get the description of. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } +func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobRequest) ProtoMessage() {} +func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *GetJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the CancelJob method. +type CancelJobRequest struct { + // Required. The name of the job to cancel. + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } +func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } +func (*CancelJobRequest) ProtoMessage() {} +func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CancelJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*TrainingInput)(nil), "google.cloud.ml.v1beta1.TrainingInput") + proto.RegisterType((*HyperparameterSpec)(nil), "google.cloud.ml.v1beta1.HyperparameterSpec") + proto.RegisterType((*ParameterSpec)(nil), "google.cloud.ml.v1beta1.ParameterSpec") + proto.RegisterType((*HyperparameterOutput)(nil), "google.cloud.ml.v1beta1.HyperparameterOutput") + proto.RegisterType((*HyperparameterOutput_HyperparameterMetric)(nil), "google.cloud.ml.v1beta1.HyperparameterOutput.HyperparameterMetric") + proto.RegisterType((*TrainingOutput)(nil), "google.cloud.ml.v1beta1.TrainingOutput") + proto.RegisterType((*PredictionInput)(nil), "google.cloud.ml.v1beta1.PredictionInput") + proto.RegisterType((*PredictionOutput)(nil), "google.cloud.ml.v1beta1.PredictionOutput") + proto.RegisterType((*Job)(nil), "google.cloud.ml.v1beta1.Job") + proto.RegisterType((*CreateJobRequest)(nil), "google.cloud.ml.v1beta1.CreateJobRequest") + proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.ml.v1beta1.ListJobsRequest") + proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.ml.v1beta1.ListJobsResponse") + proto.RegisterType((*GetJobRequest)(nil), "google.cloud.ml.v1beta1.GetJobRequest") + proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.ml.v1beta1.CancelJobRequest") + proto.RegisterEnum("google.cloud.ml.v1beta1.TrainingInput_ScaleTier", TrainingInput_ScaleTier_name, TrainingInput_ScaleTier_value) + proto.RegisterEnum("google.cloud.ml.v1beta1.HyperparameterSpec_GoalType", HyperparameterSpec_GoalType_name, HyperparameterSpec_GoalType_value) + proto.RegisterEnum("google.cloud.ml.v1beta1.ParameterSpec_ParameterType", ParameterSpec_ParameterType_name, ParameterSpec_ParameterType_value) + proto.RegisterEnum("google.cloud.ml.v1beta1.ParameterSpec_ScaleType", ParameterSpec_ScaleType_name, ParameterSpec_ScaleType_value) + proto.RegisterEnum("google.cloud.ml.v1beta1.PredictionInput_DataFormat", PredictionInput_DataFormat_name, PredictionInput_DataFormat_value) + proto.RegisterEnum("google.cloud.ml.v1beta1.Job_State", Job_State_name, Job_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for JobService service + +type JobServiceClient interface { + // Creates a training or a batch prediction job. + CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) + // Lists the jobs in the project. + ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) + // Describes a job. + GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) + // Cancels a running job. + CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type jobServiceClient struct { + cc *grpc.ClientConn +} + +func NewJobServiceClient(cc *grpc.ClientConn) JobServiceClient { + return &jobServiceClient{cc} +} + +func (c *jobServiceClient) CreateJob(ctx context.Context, in *CreateJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/CreateJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { + out := new(ListJobsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/ListJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/GetJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobServiceClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.JobService/CancelJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for JobService service + +type JobServiceServer interface { + // Creates a training or a batch prediction job. + CreateJob(context.Context, *CreateJobRequest) (*Job, error) + // Lists the jobs in the project. + ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + // Describes a job. + GetJob(context.Context, *GetJobRequest) (*Job, error) + // Cancels a running job. + CancelJob(context.Context, *CancelJobRequest) (*google_protobuf1.Empty, error) +} + +func RegisterJobServiceServer(s *grpc.Server, srv JobServiceServer) { + s.RegisterService(&_JobService_serviceDesc, srv) +} + +func _JobService_CreateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).CreateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.JobService/CreateJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).CreateJob(ctx, req.(*CreateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).ListJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.JobService/ListJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).ListJobs(ctx, req.(*ListJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).GetJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.JobService/GetJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).GetJob(ctx, req.(*GetJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobService_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobServiceServer).CancelJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.JobService/CancelJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobServiceServer).CancelJob(ctx, req.(*CancelJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _JobService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1beta1.JobService", + HandlerType: (*JobServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateJob", + Handler: _JobService_CreateJob_Handler, + }, + { + MethodName: "ListJobs", + Handler: _JobService_ListJobs_Handler, + }, + { + MethodName: "GetJob", + Handler: _JobService_GetJob_Handler, + }, + { + MethodName: "CancelJob", + Handler: _JobService_CancelJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1beta1/job_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1beta1/job_service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2082 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, + 0x11, 0x16, 0x9f, 0x22, 0x8b, 0x12, 0x39, 0x6e, 0x5b, 0x36, 0x2d, 0x7b, 0x63, 0x79, 0x8c, 0x78, + 0x65, 0x07, 0x26, 0xd7, 0xf2, 0x06, 0xd8, 0xf5, 0x22, 0x09, 0x28, 0x72, 0x2c, 0x51, 0x10, 0x1f, + 0x69, 0x0e, 0x1d, 0xc7, 0x08, 0x30, 0x68, 0x92, 0x6d, 0x7a, 0xe4, 0x99, 0xe9, 0xc9, 0x4c, 0xd3, + 0x91, 0x36, 0x30, 0x10, 0x24, 0x39, 0xe5, 0x90, 0x4b, 0x90, 0x1c, 0x03, 0xe4, 0x9a, 0xbf, 0x93, + 0x43, 0xfe, 0x40, 0x8e, 0xf9, 0x01, 0x39, 0x06, 0xfd, 0xe0, 0x53, 0x96, 0xe4, 0x45, 0x72, 0x63, + 0x7f, 0xf5, 0x55, 0x55, 0x77, 0x55, 0x75, 0x4d, 0x35, 0xe1, 0xd1, 0x98, 0xb1, 0xb1, 0x47, 0xab, + 0x43, 0x8f, 0x4d, 0x46, 0x55, 0xdf, 0xab, 0xbe, 0x7f, 0x3a, 0xa0, 0x9c, 0x3c, 0xad, 0x9e, 0xb0, + 0x81, 0x13, 0xd3, 0xe8, 0xbd, 0x3b, 0xa4, 0x95, 0x30, 0x62, 0x9c, 0xa1, 0x5b, 0x8a, 0x5a, 0x91, + 0xd4, 0x8a, 0xef, 0x55, 0x34, 0x75, 0xfb, 0xae, 0xb6, 0x41, 0x42, 0xb7, 0x4a, 0x82, 0x80, 0x71, + 0xc2, 0x5d, 0x16, 0xc4, 0x4a, 0x6d, 0x7b, 0x6b, 0x51, 0x3a, 0xe1, 0x6f, 0x35, 0x7c, 0x47, 0xc3, + 0x72, 0x35, 0x98, 0xbc, 0xa9, 0x52, 0x3f, 0xe4, 0x67, 0x5a, 0x78, 0x6f, 0x55, 0xc8, 0x5d, 0x9f, + 0xc6, 0x9c, 0xf8, 0xa1, 0x22, 0x98, 0x7f, 0xcc, 0xc0, 0xa6, 0x1d, 0x11, 0x37, 0x70, 0x83, 0x71, + 0x33, 0x08, 0x27, 0x1c, 0x75, 0x00, 0xe2, 0x21, 0xf1, 0xa8, 0xc3, 0x5d, 0x1a, 0x95, 0x13, 0x3b, + 0x89, 0xdd, 0xe2, 0xde, 0x17, 0x95, 0x0b, 0xb6, 0x5c, 0x59, 0xd2, 0xad, 0xf4, 0x84, 0xa2, 0xed, + 0xd2, 0x08, 0xe7, 0xe3, 0xe9, 0x4f, 0x74, 0x0f, 0x0a, 0x3e, 0x89, 0x39, 0x8d, 0x1c, 0x7e, 0x16, + 0xd2, 0x72, 0x72, 0x27, 0xb1, 0x9b, 0xc7, 0xa0, 0x20, 0xfb, 0x2c, 0xa4, 0x82, 0xf0, 0x2b, 0x16, + 0xbd, 0x9b, 0x12, 0x52, 0x8a, 0xa0, 0x20, 0x49, 0xd8, 0x83, 0xad, 0x90, 0x44, 0xc4, 0xa7, 0xc2, + 0x88, 0x88, 0xe5, 0x94, 0x9a, 0x96, 0xd4, 0xeb, 0x33, 0x61, 0x4f, 0xca, 0xa4, 0xce, 0x7d, 0xd8, + 0xd0, 0x46, 0x87, 0x6c, 0x12, 0xf0, 0x72, 0x66, 0x27, 0xb1, 0x9b, 0xc2, 0xda, 0x51, 0x5d, 0x40, + 0xe8, 0x4b, 0xb8, 0x79, 0xce, 0xac, 0x22, 0x67, 0x25, 0xf9, 0xc6, 0x8a, 0x5d, 0xa5, 0x75, 0x1f, + 0x36, 0x42, 0x32, 0x7c, 0x47, 0xc6, 0xd4, 0x99, 0x44, 0x6e, 0x5c, 0x5e, 0xdf, 0x49, 0xed, 0xe6, + 0x71, 0x41, 0x63, 0xfd, 0xc8, 0x8d, 0xd1, 0x03, 0xd8, 0x0c, 0xcf, 0xf8, 0x5b, 0x16, 0x38, 0x3e, + 0x1b, 0x4d, 0x3c, 0x5a, 0xce, 0xc9, 0x7d, 0x6e, 0x28, 0xb0, 0x25, 0x31, 0x84, 0x20, 0x4d, 0xa2, + 0x71, 0x5c, 0x06, 0xa9, 0x2f, 0x7f, 0xa3, 0x3e, 0x94, 0xde, 0x9e, 0x85, 0x34, 0x9a, 0x39, 0x8e, + 0xcb, 0x1b, 0x3b, 0x89, 0xdd, 0xc2, 0xde, 0x0f, 0x2e, 0x4c, 0xc0, 0xe1, 0x12, 0xbf, 0x17, 0xd2, + 0x21, 0x5e, 0xb5, 0x81, 0x6e, 0x42, 0x36, 0xa2, 0x63, 0x97, 0x05, 0xe5, 0xa2, 0xdc, 0x88, 0x5e, + 0xa1, 0x5b, 0xb0, 0x2e, 0xaa, 0x73, 0xe4, 0x46, 0x65, 0x43, 0x09, 0x4e, 0xd8, 0xa0, 0xe1, 0x46, + 0xe8, 0x73, 0x28, 0x45, 0x93, 0x40, 0xd4, 0x8a, 0xf3, 0x9e, 0x46, 0xb1, 0xd0, 0x2c, 0x49, 0x42, + 0x51, 0xc3, 0x2f, 0x15, 0x6a, 0x76, 0x21, 0x3f, 0xcb, 0x39, 0xca, 0x43, 0x66, 0xbf, 0xd6, 0x6b, + 0xd6, 0x8d, 0x35, 0x54, 0x04, 0xe8, 0xd9, 0xb5, 0x76, 0xa3, 0x86, 0x1b, 0xce, 0x53, 0x23, 0x81, + 0x36, 0x21, 0xdf, 0xc5, 0x56, 0xab, 0xd9, 0x6f, 0x39, 0x4f, 0x8d, 0x94, 0x58, 0x4a, 0xa6, 0x73, + 0xd0, 0xed, 0x1b, 0x59, 0x04, 0x90, 0xad, 0xf7, 0x7b, 0x76, 0xa7, 0x65, 0x64, 0xcc, 0x7f, 0x27, + 0x01, 0x9d, 0x3f, 0x13, 0x3a, 0x84, 0xf4, 0x98, 0x11, 0x4f, 0xd7, 0xe3, 0x97, 0xdf, 0x21, 0x1c, + 0x95, 0x03, 0x46, 0x3c, 0x51, 0x12, 0x58, 0x5a, 0x40, 0x3f, 0x86, 0xac, 0x94, 0xc7, 0xe5, 0xe4, + 0x4e, 0x6a, 0xb7, 0xb0, 0xf7, 0xf0, 0x42, 0x5b, 0xdd, 0xa5, 0xa8, 0x6a, 0x2d, 0xf4, 0x19, 0x80, + 0x4f, 0x4e, 0x1d, 0x1e, 0xb9, 0xc4, 0x8b, 0x65, 0xb1, 0x66, 0x70, 0xde, 0x27, 0xa7, 0xb6, 0x04, + 0x50, 0x05, 0xae, 0x0b, 0xb1, 0x20, 0x7b, 0x1e, 0xf5, 0xa6, 0xbc, 0xb4, 0xe4, 0x5d, 0xf3, 0xc9, + 0x69, 0x57, 0x4b, 0x34, 0xff, 0x39, 0xdc, 0x5e, 0x4e, 0x97, 0xe3, 0x53, 0x1e, 0xb9, 0x43, 0x87, + 0x93, 0xb1, 0x2c, 0xda, 0x3c, 0xbe, 0xb5, 0x4c, 0x68, 0x49, 0xb9, 0x4d, 0xc6, 0x66, 0x0d, 0x72, + 0xd3, 0xc3, 0xa1, 0xdb, 0xb0, 0x75, 0xd0, 0xa9, 0x1d, 0x3b, 0xf6, 0xcf, 0xbb, 0x96, 0xd3, 0x6f, + 0xf7, 0xba, 0x56, 0xbd, 0xf9, 0xa2, 0x69, 0x35, 0x8c, 0x35, 0xb4, 0x01, 0xb9, 0x56, 0xed, 0x55, + 0xb3, 0xd5, 0x7c, 0x6d, 0x19, 0x09, 0xb9, 0x6a, 0xb6, 0xd5, 0x2a, 0x69, 0xfe, 0x3d, 0x0d, 0x9b, + 0x4b, 0xe7, 0x44, 0xdf, 0x87, 0xe2, 0x7c, 0x2f, 0x01, 0xf1, 0xa9, 0x8c, 0x79, 0x1e, 0x6f, 0xce, + 0xd0, 0x36, 0xf1, 0xa9, 0x48, 0xc8, 0xec, 0x0a, 0x5e, 0x96, 0x90, 0x25, 0xe3, 0xf3, 0x95, 0x4a, + 0x88, 0xb0, 0x80, 0xee, 0x40, 0xde, 0x77, 0x03, 0xe7, 0x3d, 0xf1, 0x26, 0xaa, 0x3b, 0x24, 0x70, + 0xce, 0x77, 0x83, 0x97, 0x62, 0x2d, 0x85, 0xe4, 0x54, 0x0b, 0x53, 0x5a, 0x48, 0x4e, 0x95, 0xf0, + 0x09, 0xa0, 0x21, 0xe1, 0x74, 0xcc, 0x22, 0x77, 0x48, 0x3c, 0x45, 0x8a, 0xcb, 0x19, 0x79, 0xa1, + 0xae, 0x2d, 0x48, 0x24, 0x3b, 0x16, 0x55, 0x3d, 0x72, 0xe3, 0x61, 0x44, 0x39, 0x9d, 0x72, 0xb3, + 0x3b, 0xa9, 0xdd, 0x04, 0x2e, 0x4e, 0x61, 0x4d, 0x9c, 0xb7, 0x40, 0x71, 0xc2, 0xf5, 0x2b, 0x5a, + 0xe0, 0xf2, 0x09, 0xd5, 0x75, 0x10, 0xa7, 0xd3, 0x2d, 0xf0, 0x2c, 0xa4, 0xe6, 0x78, 0x21, 0xc8, + 0x32, 0x5b, 0xdf, 0x83, 0xed, 0x6e, 0x0d, 0xd7, 0x5a, 0x96, 0x6d, 0xe1, 0x8f, 0xa5, 0x0c, 0x20, + 0xdb, 0xe8, 0xf4, 0xf7, 0x8f, 0x45, 0xc2, 0x0a, 0xb0, 0xde, 0x6c, 0xdb, 0xd6, 0x81, 0x85, 0x8d, + 0x24, 0x2a, 0x41, 0xa1, 0x5e, 0xb3, 0xad, 0x83, 0x0e, 0x6e, 0xd6, 0x6b, 0xc7, 0x46, 0x4a, 0xa4, + 0xb3, 0xd1, 0xec, 0xd5, 0xb1, 0x65, 0x5b, 0x46, 0xda, 0xfc, 0xc5, 0xf4, 0x3e, 0x0a, 0x27, 0x39, + 0x48, 0xb7, 0x3b, 0x6d, 0xcb, 0x58, 0x43, 0x5b, 0x70, 0xad, 0xdf, 0x6e, 0xda, 0xce, 0x71, 0xb3, + 0x6d, 0xd5, 0xb0, 0xd3, 0xab, 0xd7, 0xa4, 0x65, 0x04, 0x45, 0x05, 0x77, 0x0e, 0x34, 0x96, 0x44, + 0xdb, 0x70, 0x53, 0x62, 0xd8, 0x7a, 0x69, 0xe1, 0x9e, 0xb5, 0x20, 0x4b, 0x99, 0x7f, 0x4e, 0xc3, + 0x8d, 0xe5, 0x0b, 0xd6, 0x99, 0x70, 0xf1, 0xcd, 0xb8, 0x0d, 0x39, 0x59, 0xe7, 0x8e, 0x3b, 0xd2, + 0xd5, 0xb2, 0x2e, 0xd7, 0xcd, 0x11, 0xf2, 0xce, 0xb7, 0x34, 0x75, 0xef, 0xf6, 0x3f, 0xf1, 0x0e, + 0x2b, 0x17, 0x2b, 0x60, 0x6c, 0x05, 0x3c, 0x3a, 0x3b, 0xdf, 0xe9, 0x28, 0x6c, 0xbc, 0x71, 0x03, + 0xe2, 0xe9, 0x4b, 0x24, 0x2b, 0xe6, 0x7f, 0x74, 0xa5, 0xae, 0x1b, 0x2e, 0x48, 0xbb, 0x6a, 0x81, + 0x86, 0x50, 0x20, 0xde, 0xd4, 0x89, 0xb8, 0xdc, 0xa9, 0xff, 0x93, 0x17, 0x20, 0x9e, 0xf6, 0x11, + 0x6f, 0x8f, 0x56, 0x83, 0xad, 0x9d, 0x3f, 0x80, 0x4d, 0xae, 0xbf, 0xba, 0x4e, 0xcc, 0x69, 0x28, + 0x23, 0x9e, 0xc2, 0x1b, 0x53, 0xb0, 0xc7, 0x69, 0x28, 0x6a, 0x9d, 0x0d, 0x4e, 0xe8, 0x90, 0xbb, + 0xef, 0xe9, 0xd2, 0xd5, 0x2a, 0xce, 0x60, 0x59, 0xec, 0xdb, 0xfb, 0xab, 0x5e, 0x54, 0x68, 0x91, + 0x01, 0xa9, 0x77, 0xf4, 0x4c, 0x67, 0x53, 0xfc, 0x44, 0x37, 0x20, 0x33, 0x37, 0x94, 0xc7, 0x6a, + 0xf1, 0x3c, 0xf9, 0x55, 0xc2, 0xfc, 0x4f, 0x02, 0x8a, 0xd3, 0x41, 0x40, 0x57, 0xc4, 0x1e, 0x6c, + 0x0d, 0x99, 0x1f, 0x7a, 0x94, 0xd3, 0x91, 0xea, 0x81, 0xfa, 0xd3, 0xaa, 0x36, 0x7b, 0x7d, 0x26, + 0x94, 0x6d, 0x50, 0x7d, 0x59, 0x2d, 0xc8, 0xea, 0x6e, 0xa9, 0x2a, 0xe4, 0xc9, 0x77, 0x0a, 0x28, + 0xd6, 0xca, 0xe8, 0x31, 0x5c, 0x1b, 0xb2, 0x20, 0x9e, 0xf8, 0x74, 0xe4, 0xf8, 0x9e, 0x33, 0x09, + 0x5c, 0x1e, 0xeb, 0xd6, 0x51, 0x9a, 0x0a, 0x5a, 0x5e, 0x5f, 0xc0, 0xe8, 0x27, 0x70, 0xd7, 0x8d, + 0x9d, 0x95, 0x06, 0xcc, 0x27, 0x32, 0xb6, 0x27, 0x6c, 0x20, 0xbb, 0x5b, 0x0e, 0xdf, 0x76, 0xe3, + 0x65, 0x8f, 0xb6, 0x64, 0x1c, 0xb1, 0x81, 0xf9, 0xcf, 0x14, 0x94, 0xba, 0x11, 0x1d, 0xb9, 0x43, + 0x31, 0xaa, 0xa9, 0x09, 0xea, 0x1e, 0x80, 0xcf, 0x46, 0xd4, 0x5b, 0xe8, 0x9e, 0x87, 0x6b, 0x38, + 0x2f, 0x31, 0xd9, 0x3b, 0x1f, 0xc0, 0x86, 0xfe, 0xac, 0x2a, 0x4a, 0x52, 0x53, 0x0a, 0x1a, 0x95, + 0x24, 0x04, 0xa9, 0x49, 0xe4, 0x96, 0xf3, 0x5a, 0x26, 0x16, 0xc8, 0x86, 0xc2, 0x88, 0x70, 0xe2, + 0xbc, 0x61, 0x91, 0x4f, 0xb8, 0x3c, 0x54, 0x71, 0xef, 0xd9, 0xc5, 0x9d, 0x69, 0x79, 0x63, 0x95, + 0x06, 0xe1, 0xe4, 0x85, 0x54, 0xc5, 0x30, 0x9a, 0xfd, 0x16, 0xf3, 0x97, 0x2b, 0xe4, 0x4e, 0x48, + 0xf8, 0x5b, 0x55, 0xcd, 0x79, 0x0c, 0x12, 0xea, 0x0a, 0x44, 0x10, 0x98, 0x8c, 0xb1, 0x64, 0xe8, + 0xaf, 0x12, 0x28, 0x48, 0x30, 0xd0, 0x2e, 0x18, 0xa2, 0x4b, 0x2f, 0x0d, 0x5c, 0x6a, 0x86, 0x2a, + 0xfa, 0xe4, 0xf4, 0x67, 0x0b, 0x33, 0xd7, 0x7c, 0x14, 0x59, 0x5f, 0x1a, 0x45, 0x3e, 0x32, 0x71, + 0xe4, 0x3e, 0x3a, 0x71, 0xbc, 0x04, 0x98, 0x1f, 0x03, 0xdd, 0x81, 0x5b, 0x8d, 0x9a, 0x5d, 0x73, + 0x5e, 0x74, 0x70, 0xab, 0x66, 0xaf, 0x34, 0xd1, 0x1c, 0xa4, 0x6d, 0xeb, 0x95, 0xad, 0xc6, 0x0f, + 0xfb, 0x85, 0x83, 0xad, 0x7a, 0x07, 0x37, 0x8c, 0xa4, 0xe8, 0x7b, 0xb3, 0xa5, 0x73, 0xf0, 0xba, + 0xd9, 0x35, 0x52, 0xfb, 0x25, 0xd8, 0x54, 0x49, 0xd3, 0xee, 0xcd, 0xbf, 0x26, 0xc0, 0x98, 0x07, + 0x50, 0x97, 0xf5, 0x4a, 0x24, 0x12, 0xe7, 0x22, 0xf1, 0x08, 0x8c, 0x70, 0xa6, 0xa4, 0x23, 0x91, + 0x94, 0x91, 0x28, 0xcd, 0x71, 0x15, 0x8a, 0x7b, 0x50, 0xa0, 0x51, 0xc4, 0xa6, 0xf1, 0x4a, 0x49, + 0x16, 0x48, 0x48, 0x11, 0x3e, 0x03, 0x08, 0xd8, 0x88, 0x3a, 0x6f, 0xd9, 0x24, 0x52, 0x13, 0x44, + 0x02, 0xe7, 0x05, 0x72, 0x28, 0x00, 0xf3, 0x2f, 0x59, 0x48, 0x1d, 0xb1, 0x01, 0xda, 0x02, 0x31, + 0xb6, 0xcd, 0x5b, 0x6f, 0xe6, 0x84, 0x0d, 0x9a, 0x23, 0xd4, 0x81, 0xe2, 0xac, 0x4d, 0xc8, 0x5c, + 0xca, 0x7d, 0x5c, 0x36, 0xef, 0x2c, 0xcd, 0xf2, 0x87, 0x6b, 0x78, 0xd6, 0x66, 0x54, 0x59, 0xf7, + 0x97, 0x8e, 0xa6, 0x4c, 0xaa, 0xfe, 0xba, 0xfb, 0xa9, 0x15, 0x78, 0xb8, 0xb6, 0x18, 0x06, 0x65, + 0xf6, 0x1b, 0x28, 0x0c, 0x23, 0x4a, 0xb8, 0x78, 0x70, 0xf8, 0x6a, 0x9e, 0x28, 0xec, 0x6d, 0x4f, + 0x2d, 0x4e, 0x1f, 0x2e, 0x15, 0x7b, 0xfa, 0x70, 0xc1, 0xa0, 0xe8, 0x02, 0x40, 0x5f, 0x03, 0xc4, + 0x9c, 0x44, 0x5c, 0xe9, 0x66, 0xae, 0xd4, 0xcd, 0x4b, 0xb6, 0x54, 0xfd, 0x21, 0xe4, 0x68, 0x30, + 0x52, 0x8a, 0xd9, 0x2b, 0x15, 0xd7, 0x69, 0x30, 0x92, 0x6a, 0x5f, 0x41, 0x26, 0xe6, 0x84, 0x4f, + 0xc7, 0x02, 0xf3, 0xc2, 0xa3, 0x1f, 0xb1, 0x41, 0xa5, 0x27, 0x98, 0x58, 0x29, 0x88, 0xbe, 0xad, + 0xf2, 0xed, 0xd3, 0x38, 0x26, 0xe3, 0xd9, 0xab, 0x40, 0x82, 0x2d, 0x85, 0x21, 0x0c, 0xa5, 0x59, + 0xd6, 0x54, 0x59, 0xc9, 0x0e, 0x50, 0xd8, 0xfb, 0xfc, 0xca, 0xb4, 0xa9, 0x12, 0x3d, 0x4c, 0xe0, + 0x59, 0xde, 0x75, 0xd1, 0xbe, 0x82, 0x6b, 0x0b, 0x89, 0xd3, 0x56, 0x41, 0x5a, 0x7d, 0xf4, 0x09, + 0x99, 0x9b, 0xd9, 0x5d, 0x48, 0xbf, 0xc2, 0xcc, 0xdf, 0x24, 0x20, 0x23, 0xcf, 0x28, 0x26, 0x8c, + 0x9e, 0x5d, 0xb3, 0x3f, 0x32, 0xc7, 0xfc, 0xb4, 0x6f, 0xf5, 0xad, 0xc6, 0xec, 0x0d, 0xd0, 0xad, + 0xe1, 0x66, 0xfb, 0xc0, 0x48, 0x8a, 0xb1, 0x06, 0xf7, 0xdb, 0x6d, 0xb1, 0x90, 0x0f, 0x82, 0x5e, + 0xbf, 0x5e, 0xb7, 0xac, 0x86, 0xd5, 0x30, 0xd2, 0x42, 0xed, 0x45, 0xad, 0x79, 0x6c, 0x35, 0x8c, + 0x8c, 0x78, 0x4a, 0xd4, 0x6b, 0xed, 0xba, 0x75, 0x7c, 0x2c, 0xa8, 0x59, 0x41, 0xd5, 0x6b, 0xab, + 0x61, 0xac, 0xef, 0xaf, 0x43, 0x46, 0x96, 0xe2, 0x7e, 0x0e, 0xb2, 0xea, 0x68, 0xe6, 0x6b, 0x30, + 0xea, 0xb2, 0x44, 0x8e, 0xd8, 0x00, 0xd3, 0x5f, 0x4e, 0x68, 0x2c, 0xfb, 0x4e, 0x48, 0x22, 0xaa, + 0x3f, 0x40, 0x79, 0xac, 0x57, 0xa8, 0x02, 0x29, 0xd1, 0xe7, 0xd5, 0xd5, 0xb8, 0x7b, 0x59, 0x32, + 0xb1, 0x20, 0x9a, 0x1f, 0xa0, 0x74, 0xec, 0xc6, 0xfc, 0x88, 0x0d, 0xe2, 0xab, 0x4c, 0xdf, 0x84, + 0xec, 0x1b, 0xd7, 0xe3, 0x34, 0xd2, 0x1f, 0x4c, 0xbd, 0x12, 0xd7, 0x3a, 0x14, 0xaf, 0x47, 0xce, + 0xde, 0xd1, 0x40, 0x3f, 0x61, 0xf3, 0x02, 0xb1, 0x05, 0x20, 0x26, 0x5e, 0x29, 0x8e, 0xdd, 0x6f, + 0x55, 0x45, 0x67, 0x70, 0x4e, 0x00, 0x3d, 0xf7, 0x5b, 0x6a, 0x7a, 0x60, 0xcc, 0xdd, 0xc7, 0x21, + 0x0b, 0x62, 0x8a, 0xbe, 0x80, 0xf4, 0x09, 0x1b, 0xc4, 0xe5, 0x84, 0xfc, 0x68, 0x5e, 0x7e, 0x06, + 0xc9, 0x44, 0x0f, 0xa1, 0x14, 0xd0, 0x53, 0xd1, 0xc3, 0x66, 0xdb, 0x50, 0x5b, 0xdc, 0x14, 0x70, + 0x77, 0xba, 0x15, 0xf3, 0x01, 0x6c, 0x1e, 0x50, 0xbe, 0x10, 0x45, 0x04, 0xe9, 0x85, 0x17, 0x81, + 0xfc, 0x6d, 0x3e, 0x04, 0xa3, 0x4e, 0x82, 0x21, 0xf5, 0x2e, 0xe7, 0xed, 0xfd, 0x2d, 0x0d, 0x70, + 0xc4, 0x06, 0x3d, 0xf5, 0x57, 0x08, 0xfa, 0x7d, 0x02, 0xf2, 0xb3, 0x2c, 0xa1, 0x8b, 0xeb, 0x70, + 0x35, 0x93, 0xdb, 0x97, 0x1e, 0xd0, 0xac, 0xfc, 0xf6, 0x1f, 0xff, 0xfa, 0x53, 0x72, 0xd7, 0xbc, + 0x3f, 0xfb, 0xff, 0xe5, 0xd7, 0x2a, 0x1d, 0x3f, 0x0a, 0x23, 0x26, 0x06, 0x9f, 0xb8, 0xfa, 0xf8, + 0x43, 0x55, 0x44, 0xe1, 0xb9, 0xc8, 0x27, 0xfa, 0x43, 0x02, 0x72, 0xd3, 0x88, 0xa2, 0x8b, 0xfb, + 0xd8, 0x4a, 0xce, 0xb7, 0x1f, 0x7d, 0x02, 0x53, 0xa5, 0xc7, 0x7c, 0x24, 0x77, 0xf4, 0x00, 0x5d, + 0xbd, 0x23, 0x74, 0x06, 0x59, 0x15, 0x6f, 0x74, 0x71, 0x93, 0x5e, 0x4a, 0xc8, 0x15, 0xc1, 0xf8, + 0x88, 0x6b, 0x91, 0x8a, 0x05, 0xc7, 0xd2, 0x6f, 0xf5, 0xf1, 0x07, 0xf4, 0x3b, 0x91, 0x8e, 0x69, + 0x1a, 0x2f, 0x4b, 0xc7, 0x4a, 0xaa, 0xb7, 0x6f, 0x9e, 0x6b, 0x9a, 0x96, 0x1f, 0xf2, 0x33, 0xf3, + 0x99, 0xf4, 0xfd, 0xc4, 0xdc, 0xbd, 0xd2, 0xf7, 0xf3, 0xa1, 0xb4, 0xf9, 0x3c, 0xf1, 0x78, 0x9f, + 0xc1, 0xbd, 0x21, 0xf3, 0xcf, 0x39, 0x27, 0xa1, 0x3b, 0xdd, 0xc0, 0x7e, 0x69, 0x5e, 0x43, 0x5d, + 0xe1, 0xb1, 0x9b, 0x78, 0xfd, 0xb5, 0xe6, 0x8f, 0x99, 0x47, 0x82, 0x71, 0x85, 0x45, 0xe3, 0xea, + 0x98, 0x06, 0x72, 0x3f, 0x55, 0x25, 0x22, 0xa1, 0x1b, 0x9f, 0xfb, 0x67, 0xee, 0x1b, 0xdf, 0x1b, + 0x64, 0x25, 0xeb, 0xd9, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x10, 0x62, 0xa8, 0xbe, 0x13, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go new file mode 100644 index 000000000..486865db6 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go @@ -0,0 +1,1050 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1beta1/model_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Represents a machine learning solution. +// +// A model can have multiple versions, each of which is a deployed, trained +// model ready to receive prediction requests. The model itself is just a +// container. +type Model struct { + // Required. The name specified for the model when it was created. + // + // The model name must be unique within the project it is created in. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The description specified for the model when it was created. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Output only. The default version of the model. This version will be used to + // handle prediction requests that do not specify a version. + // + // You can change the default version by calling + // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). + DefaultVersion *Version `protobuf:"bytes,3,opt,name=default_version,json=defaultVersion" json:"default_version,omitempty"` + // Optional. The list of regions where the model is going to be deployed. + // Currently only one region per model is supported. + // Defaults to 'us-central1' if nothing is set. + Regions []string `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"` + // Optional. If true, enables StackDriver Logging for online prediction. + // Default is false. + OnlinePredictionLogging bool `protobuf:"varint,5,opt,name=online_prediction_logging,json=onlinePredictionLogging" json:"online_prediction_logging,omitempty"` +} + +func (m *Model) Reset() { *m = Model{} } +func (m *Model) String() string { return proto.CompactTextString(m) } +func (*Model) ProtoMessage() {} +func (*Model) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Model) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Model) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Model) GetDefaultVersion() *Version { + if m != nil { + return m.DefaultVersion + } + return nil +} + +func (m *Model) GetRegions() []string { + if m != nil { + return m.Regions + } + return nil +} + +func (m *Model) GetOnlinePredictionLogging() bool { + if m != nil { + return m.OnlinePredictionLogging + } + return false +} + +// Represents a version of the model. +// +// Each version is a trained model deployed in the cloud, ready to handle +// prediction requests. A model can have multiple versions. You can get +// information about all of the versions of a given model by calling +// [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). +type Version struct { + // Required.The name specified for the version when it was created. + // + // The version name must be unique within the model it is created in. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The description specified for the version when it was created. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Output only. If true, this version will be used to handle prediction + // requests that do not specify a version. + // + // You can change the default version by calling + // [projects.methods.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). + IsDefault bool `protobuf:"varint,3,opt,name=is_default,json=isDefault" json:"is_default,omitempty"` + // Required. The Google Cloud Storage location of the trained model used to + // create the version. See the + // [overview of model deployment](/ml/docs/concepts/deployment-overview) for + // more informaiton. + // + // When passing Version to + // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create) + // the model service uses the specified location as the source of the model. + // Once deployed, the model version is hosted by the prediction service, so + // this location is useful only as a historical record. + DeploymentUri string `protobuf:"bytes,4,opt,name=deployment_uri,json=deploymentUri" json:"deployment_uri,omitempty"` + // Output only. The time the version was created. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. The time the version was last used for prediction. + LastUseTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=last_use_time,json=lastUseTime" json:"last_use_time,omitempty"` + // Optional. The Google Cloud ML runtime version to use for this deployment. + // If not set, Google Cloud ML will choose a version. + RuntimeVersion string `protobuf:"bytes,8,opt,name=runtime_version,json=runtimeVersion" json:"runtime_version,omitempty"` + // Optional. Manually select the number of nodes to use for serving the + // model. If unset (i.e., by default), the number of nodes used to serve + // the model automatically scales with traffic. However, care should be + // taken to ramp up traffic according to the model's ability to scale. If + // your model needs to handle bursts of traffic beyond it's ability to + // scale, it is recommended you set this field appropriately. + ManualScaling *ManualScaling `protobuf:"bytes,9,opt,name=manual_scaling,json=manualScaling" json:"manual_scaling,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Version) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Version) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Version) GetIsDefault() bool { + if m != nil { + return m.IsDefault + } + return false +} + +func (m *Version) GetDeploymentUri() string { + if m != nil { + return m.DeploymentUri + } + return "" +} + +func (m *Version) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Version) GetLastUseTime() *google_protobuf2.Timestamp { + if m != nil { + return m.LastUseTime + } + return nil +} + +func (m *Version) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +func (m *Version) GetManualScaling() *ManualScaling { + if m != nil { + return m.ManualScaling + } + return nil +} + +// Options for manually scaling a model. +type ManualScaling struct { + // The number of nodes to allocate for this model. These nodes are always up, + // starting from the time the model is deployed, so the cost of operating + // this model will be proportional to nodes * number of hours since + // deployment. + Nodes int32 `protobuf:"varint,1,opt,name=nodes" json:"nodes,omitempty"` +} + +func (m *ManualScaling) Reset() { *m = ManualScaling{} } +func (m *ManualScaling) String() string { return proto.CompactTextString(m) } +func (*ManualScaling) ProtoMessage() {} +func (*ManualScaling) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ManualScaling) GetNodes() int32 { + if m != nil { + return m.Nodes + } + return 0 +} + +// Request message for the CreateModel method. +type CreateModelRequest struct { + // Required. The project name. + // + // Authorization: requires `Editor` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The model to create. + Model *Model `protobuf:"bytes,2,opt,name=model" json:"model,omitempty"` +} + +func (m *CreateModelRequest) Reset() { *m = CreateModelRequest{} } +func (m *CreateModelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateModelRequest) ProtoMessage() {} +func (*CreateModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *CreateModelRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateModelRequest) GetModel() *Model { + if m != nil { + return m.Model + } + return nil +} + +// Request message for the ListModels method. +type ListModelsRequest struct { + // Required. The name of the project whose models are to be listed. + // + // Authorization: requires `Viewer` role on the specified project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of models to retrieve per "page" of results. If there + // are more remaining results than this number, the response message will + // contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListModelsRequest) Reset() { *m = ListModelsRequest{} } +func (m *ListModelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListModelsRequest) ProtoMessage() {} +func (*ListModelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *ListModelsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListModelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListModelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListModels method. +type ListModelsResponse struct { + // The list of models. + Models []*Model `protobuf:"bytes,1,rep,name=models" json:"models,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListModelsResponse) Reset() { *m = ListModelsResponse{} } +func (m *ListModelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListModelsResponse) ProtoMessage() {} +func (*ListModelsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *ListModelsResponse) GetModels() []*Model { + if m != nil { + return m.Models + } + return nil +} + +func (m *ListModelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetModel method. +type GetModelRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetModelRequest) Reset() { *m = GetModelRequest{} } +func (m *GetModelRequest) String() string { return proto.CompactTextString(m) } +func (*GetModelRequest) ProtoMessage() {} +func (*GetModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *GetModelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the DeleteModel method. +type DeleteModelRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteModelRequest) Reset() { *m = DeleteModelRequest{} } +func (m *DeleteModelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteModelRequest) ProtoMessage() {} +func (*DeleteModelRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *DeleteModelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Uploads the provided trained model version to Cloud Machine Learning. +type CreateVersionRequest struct { + // Required. The name of the model. + // + // Authorization: requires `Editor` role on the parent project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The version details. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *CreateVersionRequest) Reset() { *m = CreateVersionRequest{} } +func (m *CreateVersionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVersionRequest) ProtoMessage() {} +func (*CreateVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *CreateVersionRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateVersionRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +// Request message for the ListVersions method. +type ListVersionsRequest struct { + // Required. The name of the model for which to list the version. + // + // Authorization: requires `Viewer` role on the parent project. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. A page token to request the next page of results. + // + // You get the token from the `next_page_token` field of the response from + // the previous call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The number of versions to retrieve per "page" of results. If + // there are more remaining results than this number, the response message + // will contain a valid value in the `next_page_token` field. + // + // The default value is 20, and the maximum page size is 100. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListVersionsRequest) Reset() { *m = ListVersionsRequest{} } +func (m *ListVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListVersionsRequest) ProtoMessage() {} +func (*ListVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ListVersionsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListVersionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListVersionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for the ListVersions method. +type ListVersionsResponse struct { + // The list of versions. + Versions []*Version `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` + // Optional. Pass this token as the `page_token` field of the request for a + // subsequent call. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListVersionsResponse) Reset() { *m = ListVersionsResponse{} } +func (m *ListVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListVersionsResponse) ProtoMessage() {} +func (*ListVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *ListVersionsResponse) GetVersions() []*Version { + if m != nil { + return m.Versions + } + return nil +} + +func (m *ListVersionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for the GetVersion method. +type GetVersionRequest struct { + // Required. The name of the version. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } +func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionRequest) ProtoMessage() {} +func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *GetVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the DeleteVerionRequest method. +type DeleteVersionRequest struct { + // Required. The name of the version. You can get the names of all the + // versions of a model by calling + // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteVersionRequest) Reset() { *m = DeleteVersionRequest{} } +func (m *DeleteVersionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVersionRequest) ProtoMessage() {} +func (*DeleteVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DeleteVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for the SetDefaultVersion request. +type SetDefaultVersionRequest struct { + // Required. The name of the version to make the default for the model. You + // can get the names of all the versions of a model by calling + // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list). + // + // Authorization: requires `Editor` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *SetDefaultVersionRequest) Reset() { *m = SetDefaultVersionRequest{} } +func (m *SetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*SetDefaultVersionRequest) ProtoMessage() {} +func (*SetDefaultVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *SetDefaultVersionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Model)(nil), "google.cloud.ml.v1beta1.Model") + proto.RegisterType((*Version)(nil), "google.cloud.ml.v1beta1.Version") + proto.RegisterType((*ManualScaling)(nil), "google.cloud.ml.v1beta1.ManualScaling") + proto.RegisterType((*CreateModelRequest)(nil), "google.cloud.ml.v1beta1.CreateModelRequest") + proto.RegisterType((*ListModelsRequest)(nil), "google.cloud.ml.v1beta1.ListModelsRequest") + proto.RegisterType((*ListModelsResponse)(nil), "google.cloud.ml.v1beta1.ListModelsResponse") + proto.RegisterType((*GetModelRequest)(nil), "google.cloud.ml.v1beta1.GetModelRequest") + proto.RegisterType((*DeleteModelRequest)(nil), "google.cloud.ml.v1beta1.DeleteModelRequest") + proto.RegisterType((*CreateVersionRequest)(nil), "google.cloud.ml.v1beta1.CreateVersionRequest") + proto.RegisterType((*ListVersionsRequest)(nil), "google.cloud.ml.v1beta1.ListVersionsRequest") + proto.RegisterType((*ListVersionsResponse)(nil), "google.cloud.ml.v1beta1.ListVersionsResponse") + proto.RegisterType((*GetVersionRequest)(nil), "google.cloud.ml.v1beta1.GetVersionRequest") + proto.RegisterType((*DeleteVersionRequest)(nil), "google.cloud.ml.v1beta1.DeleteVersionRequest") + proto.RegisterType((*SetDefaultVersionRequest)(nil), "google.cloud.ml.v1beta1.SetDefaultVersionRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ModelService service + +type ModelServiceClient interface { + // Creates a model which will later contain one or more versions. + // + // You must add at least one version before you can request predictions from + // the model. Add versions by calling + // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create). + CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) + // Lists the models in a project. + // + // Each project can contain multiple models, and each model can have multiple + // versions. + ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) + // Gets information about a model, including its name, the description (if + // set), and the default version (if at least one version of the model has + // been deployed). + GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) + // Deletes a model. + // + // You can only delete a model if there are no versions in it. You can delete + // versions by calling + // [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete). + DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Creates a new version of a model from a trained TensorFlow model. + // + // If the version created in the cloud by this call is the first deployed + // version of the specified model, it will be made the default version of the + // model. When you add a version to a model that already has one or more + // versions, the default version does not automatically change. If you want a + // new version to be the default, you must call + // [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). + CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets basic information about all the versions of a model. + // + // If you expect that a model has a lot of versions, or if you need to handle + // only a limited number of results at a time, you can request that the list + // be retrieved in batches (called pages): + ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) + // Gets information about a model version. + // + // Models can have multiple versions. You can call + // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list) + // to get the same information that this method returns for all of the + // versions of a model. + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) + // Deletes a model version. + // + // Each model can have multiple versions deployed and in use at any given + // time. Use this method to remove a single version. + // + // Note: You cannot delete the version that is set as the default version + // of the model unless it is the only remaining version. + DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Designates a version to be the default for the model. + // + // The default version is used for prediction requests made against the model + // that don't specify a version. + // + // The first version to be created for a model is automatically set as the + // default. You must make any subsequent changes to the default version + // setting manually using this method. + SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) +} + +type modelServiceClient struct { + cc *grpc.ClientConn +} + +func NewModelServiceClient(cc *grpc.ClientConn) ModelServiceClient { + return &modelServiceClient{cc} +} + +func (c *modelServiceClient) CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*Model, error) { + out := new(Model) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/CreateModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error) { + out := new(ListModelsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/ListModels", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error) { + out := new(Model) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/GetModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/DeleteModel", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) CreateVersion(ctx context.Context, in *CreateVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/CreateVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) { + out := new(ListVersionsResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/ListVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/GetVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) DeleteVersion(ctx context.Context, in *DeleteVersionRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/DeleteVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *modelServiceClient) SetDefaultVersion(ctx context.Context, in *SetDefaultVersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ModelService/SetDefaultVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ModelService service + +type ModelServiceServer interface { + // Creates a model which will later contain one or more versions. + // + // You must add at least one version before you can request predictions from + // the model. Add versions by calling + // [projects.models.versions.create](/ml/reference/rest/v1beta1/projects.models.versions/create). + CreateModel(context.Context, *CreateModelRequest) (*Model, error) + // Lists the models in a project. + // + // Each project can contain multiple models, and each model can have multiple + // versions. + ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error) + // Gets information about a model, including its name, the description (if + // set), and the default version (if at least one version of the model has + // been deployed). + GetModel(context.Context, *GetModelRequest) (*Model, error) + // Deletes a model. + // + // You can only delete a model if there are no versions in it. You can delete + // versions by calling + // [projects.models.versions.delete](/ml/reference/rest/v1beta1/projects.models.versions/delete). + DeleteModel(context.Context, *DeleteModelRequest) (*google_longrunning.Operation, error) + // Creates a new version of a model from a trained TensorFlow model. + // + // If the version created in the cloud by this call is the first deployed + // version of the specified model, it will be made the default version of the + // model. When you add a version to a model that already has one or more + // versions, the default version does not automatically change. If you want a + // new version to be the default, you must call + // [projects.models.versions.setDefault](/ml/reference/rest/v1beta1/projects.models.versions/setDefault). + CreateVersion(context.Context, *CreateVersionRequest) (*google_longrunning.Operation, error) + // Gets basic information about all the versions of a model. + // + // If you expect that a model has a lot of versions, or if you need to handle + // only a limited number of results at a time, you can request that the list + // be retrieved in batches (called pages): + ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) + // Gets information about a model version. + // + // Models can have multiple versions. You can call + // [projects.models.versions.list](/ml/reference/rest/v1beta1/projects.models.versions/list) + // to get the same information that this method returns for all of the + // versions of a model. + GetVersion(context.Context, *GetVersionRequest) (*Version, error) + // Deletes a model version. + // + // Each model can have multiple versions deployed and in use at any given + // time. Use this method to remove a single version. + // + // Note: You cannot delete the version that is set as the default version + // of the model unless it is the only remaining version. + DeleteVersion(context.Context, *DeleteVersionRequest) (*google_longrunning.Operation, error) + // Designates a version to be the default for the model. + // + // The default version is used for prediction requests made against the model + // that don't specify a version. + // + // The first version to be created for a model is automatically set as the + // default. You must make any subsequent changes to the default version + // setting manually using this method. + SetDefaultVersion(context.Context, *SetDefaultVersionRequest) (*Version, error) +} + +func RegisterModelServiceServer(s *grpc.Server, srv ModelServiceServer) { + s.RegisterService(&_ModelService_serviceDesc, srv) +} + +func _ModelService_CreateModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).CreateModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/CreateModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).CreateModel(ctx, req.(*CreateModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_ListModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListModelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).ListModels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/ListModels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).ListModels(ctx, req.(*ListModelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_GetModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).GetModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/GetModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).GetModel(ctx, req.(*GetModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_DeleteModel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteModelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).DeleteModel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/DeleteModel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).DeleteModel(ctx, req.(*DeleteModelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_CreateVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).CreateVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/CreateVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).CreateVersion(ctx, req.(*CreateVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_ListVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).ListVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/ListVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).ListVersions(ctx, req.(*ListVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_DeleteVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).DeleteVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/DeleteVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).DeleteVersion(ctx, req.(*DeleteVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModelService_SetDefaultVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetDefaultVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModelServiceServer).SetDefaultVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ModelService/SetDefaultVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModelServiceServer).SetDefaultVersion(ctx, req.(*SetDefaultVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ModelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1beta1.ModelService", + HandlerType: (*ModelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateModel", + Handler: _ModelService_CreateModel_Handler, + }, + { + MethodName: "ListModels", + Handler: _ModelService_ListModels_Handler, + }, + { + MethodName: "GetModel", + Handler: _ModelService_GetModel_Handler, + }, + { + MethodName: "DeleteModel", + Handler: _ModelService_DeleteModel_Handler, + }, + { + MethodName: "CreateVersion", + Handler: _ModelService_CreateVersion_Handler, + }, + { + MethodName: "ListVersions", + Handler: _ModelService_ListVersions_Handler, + }, + { + MethodName: "GetVersion", + Handler: _ModelService_GetVersion_Handler, + }, + { + MethodName: "DeleteVersion", + Handler: _ModelService_DeleteVersion_Handler, + }, + { + MethodName: "SetDefaultVersion", + Handler: _ModelService_SetDefaultVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1beta1/model_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1beta1/model_service.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1013 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xd6, 0x26, 0x71, 0x62, 0x3f, 0xd7, 0x89, 0x32, 0x04, 0x6a, 0x0c, 0xa1, 0xd6, 0x56, 0x69, + 0x2d, 0xa7, 0xdd, 0x25, 0x06, 0x55, 0x8a, 0x0b, 0x45, 0x2a, 0x91, 0x2a, 0xa4, 0x46, 0x44, 0x9b, + 0x96, 0x03, 0x97, 0xd5, 0xc6, 0x9e, 0x2e, 0x53, 0x76, 0x67, 0xb6, 0x3b, 0xb3, 0x11, 0x14, 0x7a, + 0x80, 0x03, 0x47, 0x0e, 0x20, 0xae, 0x5c, 0xb8, 0xf3, 0xcf, 0x70, 0xe7, 0x84, 0xf8, 0x23, 0x38, + 0xa1, 0xf9, 0xb1, 0xce, 0x3a, 0xfe, 0xb1, 0x1b, 0x24, 0x6e, 0x9e, 0x37, 0xdf, 0x9b, 0xf7, 0xcd, + 0xfb, 0xde, 0x7b, 0x3b, 0x86, 0xfd, 0x90, 0xb1, 0x30, 0xc2, 0xee, 0x28, 0x62, 0xd9, 0xd8, 0x8d, + 0x23, 0xf7, 0xfc, 0xe0, 0x0c, 0x8b, 0xe0, 0xc0, 0x8d, 0xd9, 0x18, 0x47, 0x3e, 0xc7, 0xe9, 0x39, + 0x19, 0x61, 0x27, 0x49, 0x99, 0x60, 0xe8, 0xba, 0x06, 0x3b, 0x0a, 0xec, 0xc4, 0x91, 0x63, 0xc0, + 0x9d, 0xb7, 0xcd, 0x29, 0x41, 0x42, 0xdc, 0x80, 0x52, 0x26, 0x02, 0x41, 0x18, 0xe5, 0xda, 0xad, + 0xf3, 0x7a, 0x71, 0x37, 0x13, 0x5f, 0x18, 0xf3, 0x4d, 0x63, 0x8e, 0x18, 0x0d, 0xd3, 0x8c, 0x52, + 0x42, 0x43, 0x97, 0x25, 0x38, 0x9d, 0xf2, 0xbd, 0x61, 0x40, 0x6a, 0x75, 0x96, 0x3d, 0x73, 0x05, + 0x89, 0x31, 0x17, 0x41, 0x9c, 0x68, 0x80, 0xfd, 0xa7, 0x05, 0xb5, 0x63, 0xc9, 0x15, 0x21, 0x58, + 0xa3, 0x41, 0x8c, 0xdb, 0x56, 0xd7, 0xea, 0x35, 0x3c, 0xf5, 0x1b, 0x75, 0xa1, 0x39, 0xc6, 0x7c, + 0x94, 0x92, 0x44, 0x1e, 0xda, 0x5e, 0x51, 0x5b, 0x45, 0x13, 0xfa, 0x04, 0xb6, 0xc6, 0xf8, 0x59, + 0x90, 0x45, 0xc2, 0x3f, 0xc7, 0x29, 0x97, 0xa8, 0xd5, 0xae, 0xd5, 0x6b, 0x0e, 0xba, 0xce, 0x82, + 0xdb, 0x3a, 0x9f, 0x69, 0x9c, 0xb7, 0x69, 0x1c, 0xcd, 0x1a, 0xb5, 0x61, 0x23, 0xc5, 0xa1, 0x24, + 0xdf, 0x5e, 0xeb, 0xae, 0xf6, 0x1a, 0x5e, 0xbe, 0x44, 0x43, 0x78, 0x93, 0xd1, 0x88, 0x50, 0xec, + 0x27, 0x29, 0x1e, 0x93, 0x91, 0x8c, 0xec, 0x47, 0x2c, 0x0c, 0x09, 0x0d, 0xdb, 0xb5, 0xae, 0xd5, + 0xab, 0x7b, 0xd7, 0x35, 0xe0, 0x64, 0xb2, 0xff, 0x58, 0x6f, 0xdb, 0xff, 0xac, 0xc0, 0x46, 0x1e, + 0xe1, 0xbf, 0x5d, 0x71, 0x17, 0x80, 0x70, 0xdf, 0x90, 0x55, 0xb7, 0xab, 0x7b, 0x0d, 0xc2, 0x8f, + 0xb4, 0x01, 0xed, 0xc1, 0xe6, 0x18, 0x27, 0x11, 0xfb, 0x3a, 0xc6, 0x54, 0xf8, 0x59, 0x4a, 0xda, + 0x6b, 0xea, 0x8c, 0xd6, 0x85, 0xf5, 0x69, 0x4a, 0xd0, 0x7d, 0x68, 0x8e, 0x52, 0x1c, 0x08, 0xec, + 0x4b, 0x09, 0x14, 0xeb, 0xe6, 0xa0, 0x93, 0x27, 0x29, 0xd7, 0xc7, 0x79, 0x92, 0xeb, 0xe3, 0x81, + 0x86, 0x4b, 0x03, 0x7a, 0x00, 0xad, 0x28, 0xe0, 0xc2, 0xcf, 0xb8, 0x71, 0x5f, 0x2f, 0x75, 0x6f, + 0x4a, 0x87, 0xa7, 0x5c, 0xfb, 0xdf, 0x86, 0xad, 0x34, 0xa3, 0xd2, 0x73, 0xa2, 0x52, 0x5d, 0x91, + 0xdc, 0x34, 0xe6, 0x3c, 0x43, 0xc7, 0xb0, 0x19, 0x07, 0x34, 0x0b, 0x22, 0x9f, 0x8f, 0x82, 0x48, + 0xa6, 0xb7, 0xa1, 0x22, 0xdd, 0x5a, 0xa8, 0xe6, 0xb1, 0x82, 0x9f, 0x6a, 0xb4, 0xd7, 0x8a, 0x8b, + 0x4b, 0x7b, 0x0f, 0x5a, 0x53, 0xfb, 0x68, 0x07, 0x6a, 0x94, 0x8d, 0x31, 0x57, 0x12, 0xd4, 0x3c, + 0xbd, 0xb0, 0xcf, 0x00, 0x7d, 0xac, 0x2e, 0xab, 0x2a, 0xd1, 0xc3, 0x2f, 0x32, 0xcc, 0x05, 0x7a, + 0x03, 0xd6, 0x93, 0x20, 0xc5, 0x54, 0x18, 0xbd, 0xcc, 0x0a, 0xbd, 0x0f, 0x35, 0xd5, 0x5d, 0x4a, + 0xab, 0xe6, 0xe0, 0x9d, 0xc5, 0xd4, 0xd4, 0x69, 0x1a, 0x6c, 0x87, 0xb0, 0xfd, 0x98, 0x70, 0xa1, + 0x6c, 0xbc, 0x2c, 0xc4, 0x2e, 0x40, 0x12, 0x84, 0xd8, 0x17, 0xec, 0x4b, 0x4c, 0x8d, 0x9e, 0x0d, + 0x69, 0x79, 0x22, 0x0d, 0xe8, 0x2d, 0x50, 0x0b, 0x9f, 0x93, 0x97, 0x5a, 0xc9, 0x9a, 0x57, 0x97, + 0x86, 0x53, 0xf2, 0x12, 0xdb, 0x02, 0x50, 0x31, 0x10, 0x4f, 0x18, 0xe5, 0x18, 0xdd, 0x83, 0x75, + 0xc5, 0x43, 0xde, 0x7c, 0xb5, 0x02, 0x6b, 0x83, 0x46, 0xb7, 0x60, 0x8b, 0xe2, 0xaf, 0x84, 0x5f, + 0xa0, 0xa3, 0x4b, 0xb4, 0x25, 0xcd, 0x27, 0x39, 0x25, 0x7b, 0x0f, 0xb6, 0x1e, 0x61, 0x31, 0x95, + 0xbf, 0x39, 0xd5, 0x6e, 0xf7, 0x00, 0x1d, 0xe1, 0x08, 0x5f, 0xca, 0xf4, 0x3c, 0xe4, 0x73, 0xd8, + 0xd1, 0x9a, 0xe4, 0xed, 0x5a, 0x92, 0xb2, 0x21, 0x6c, 0xe4, 0xa5, 0xb5, 0x52, 0x71, 0x00, 0xe4, + 0x0e, 0x36, 0x81, 0xd7, 0x64, 0xca, 0x8c, 0xfd, 0x7f, 0x55, 0xe7, 0x5b, 0xd8, 0x99, 0x0e, 0x65, + 0xf4, 0xf9, 0x00, 0xea, 0x86, 0x4d, 0xae, 0x50, 0x39, 0xff, 0x89, 0x47, 0x65, 0x95, 0x6e, 0xc3, + 0xf6, 0x23, 0x2c, 0x2e, 0x65, 0x74, 0x5e, 0xf6, 0xfb, 0xb0, 0xa3, 0x75, 0xaa, 0x80, 0x75, 0xa0, + 0x7d, 0x8a, 0xc5, 0xd1, 0xd4, 0x30, 0x5d, 0x82, 0x1f, 0xfc, 0x0d, 0x70, 0x4d, 0xc9, 0x7f, 0xaa, + 0xbf, 0x4e, 0xe8, 0x47, 0x0b, 0x9a, 0x85, 0xfe, 0x43, 0xfb, 0x0b, 0x6f, 0x3e, 0xdb, 0xa5, 0x9d, + 0x92, 0x42, 0xb6, 0x07, 0xdf, 0xff, 0xf1, 0xd7, 0xcf, 0x2b, 0x77, 0xec, 0x9b, 0x93, 0x4f, 0xe3, + 0x37, 0x5a, 0xc6, 0x0f, 0x93, 0x94, 0x3d, 0xc7, 0x23, 0xc1, 0xdd, 0xfe, 0x2b, 0xfd, 0xb9, 0xe4, + 0x43, 0xdd, 0xab, 0xe8, 0x27, 0x0b, 0xe0, 0xa2, 0x87, 0x50, 0x7f, 0x61, 0x88, 0x99, 0x8e, 0xee, + 0xec, 0x57, 0xc2, 0x6a, 0xd1, 0xed, 0x7d, 0xc5, 0x6d, 0x0f, 0x55, 0xe1, 0x86, 0xbe, 0xb3, 0xa0, + 0x9e, 0xb7, 0x18, 0xea, 0x2d, 0x0c, 0x73, 0xa9, 0x0b, 0x4b, 0xf3, 0x33, 0x87, 0x83, 0x54, 0xa9, + 0xc0, 0xc0, 0x10, 0x70, 0xfb, 0xaf, 0xd0, 0x0f, 0x16, 0x34, 0x0b, 0xfd, 0xbb, 0x44, 0xa9, 0xd9, + 0x2e, 0xef, 0xec, 0xe6, 0xe0, 0xc2, 0x8b, 0xc1, 0xf9, 0x34, 0x7f, 0x31, 0xe4, 0x44, 0xfa, 0x95, + 0x88, 0xfc, 0x6a, 0x41, 0x6b, 0x6a, 0x3c, 0xa0, 0xbb, 0x25, 0x45, 0x33, 0x5d, 0x98, 0x65, 0x64, + 0x3e, 0x52, 0x64, 0x0e, 0x6d, 0x67, 0x89, 0x32, 0x17, 0x74, 0xdc, 0xbc, 0x11, 0x87, 0xf9, 0x48, + 0x41, 0xbf, 0x59, 0x70, 0xad, 0xd8, 0xe8, 0xe8, 0xce, 0xd2, 0xc2, 0xb8, 0x34, 0x7a, 0x3a, 0x77, + 0x2b, 0xa2, 0x4d, 0x21, 0xdd, 0x53, 0x74, 0xdf, 0x45, 0x57, 0xa4, 0xab, 0x0a, 0xfd, 0x62, 0x20, + 0x2c, 0x29, 0xf4, 0x99, 0xa9, 0xd1, 0x29, 0x1d, 0x4f, 0xf3, 0x48, 0x2d, 0x12, 0x74, 0xc2, 0x48, + 0x6a, 0xfb, 0x8b, 0x05, 0xad, 0xa9, 0xe1, 0xb3, 0x44, 0xdb, 0x79, 0x43, 0xaa, 0x4c, 0x5b, 0xc3, + 0xab, 0x7f, 0x55, 0x5e, 0xbf, 0x5b, 0xb0, 0x3d, 0x33, 0xe8, 0xd0, 0xc1, 0x42, 0x6e, 0x8b, 0x86, + 0x62, 0x85, 0xd4, 0x1d, 0x29, 0x8a, 0x0f, 0xec, 0xc3, 0xab, 0x51, 0x1c, 0xf2, 0x49, 0xc8, 0xa1, + 0xd5, 0x7f, 0xf8, 0x02, 0x6e, 0x8c, 0x58, 0x3c, 0x13, 0x2c, 0x48, 0x48, 0x1e, 0xf0, 0xe1, 0x76, + 0x71, 0x10, 0x9f, 0xc8, 0x57, 0xdc, 0x89, 0xf5, 0xf9, 0xa1, 0xf1, 0x08, 0x59, 0x14, 0xd0, 0xd0, + 0x61, 0x69, 0xe8, 0x86, 0x98, 0xaa, 0x37, 0x9e, 0xab, 0xb7, 0x82, 0x84, 0xf0, 0x99, 0xff, 0x1c, + 0xf7, 0xe3, 0xe8, 0x6c, 0x5d, 0xa1, 0xde, 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0x04, 0x39, 0xff, + 0x08, 0x98, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go new file mode 100644 index 000000000..293077731 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1beta1/operation_metadata.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The operation type. +type OperationMetadata_OperationType int32 + +const ( + // Unspecified operation type. + OperationMetadata_OPERATION_TYPE_UNSPECIFIED OperationMetadata_OperationType = 0 + // An operation to create a new version. + OperationMetadata_CREATE_VERSION OperationMetadata_OperationType = 1 + // An operation to delete an existing version. + OperationMetadata_DELETE_VERSION OperationMetadata_OperationType = 2 + // An operation to delete an existing model. + OperationMetadata_DELETE_MODEL OperationMetadata_OperationType = 3 +) + +var OperationMetadata_OperationType_name = map[int32]string{ + 0: "OPERATION_TYPE_UNSPECIFIED", + 1: "CREATE_VERSION", + 2: "DELETE_VERSION", + 3: "DELETE_MODEL", +} +var OperationMetadata_OperationType_value = map[string]int32{ + "OPERATION_TYPE_UNSPECIFIED": 0, + "CREATE_VERSION": 1, + "DELETE_VERSION": 2, + "DELETE_MODEL": 3, +} + +func (x OperationMetadata_OperationType) String() string { + return proto.EnumName(OperationMetadata_OperationType_name, int32(x)) +} +func (OperationMetadata_OperationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +// Represents the metadata of the long-running operation. +type OperationMetadata struct { + // The time the operation was submitted. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time operation processing started. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time operation processing completed. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Indicates whether a request to cancel this operation has been made. + IsCancellationRequested bool `protobuf:"varint,4,opt,name=is_cancellation_requested,json=isCancellationRequested" json:"is_cancellation_requested,omitempty"` + // The operation type. + OperationType OperationMetadata_OperationType `protobuf:"varint,5,opt,name=operation_type,json=operationType,enum=google.cloud.ml.v1beta1.OperationMetadata_OperationType" json:"operation_type,omitempty"` + // Contains the name of the model associated with the operation. + ModelName string `protobuf:"bytes,6,opt,name=model_name,json=modelName" json:"model_name,omitempty"` + // Contains the version associated with the operation. + Version *Version `protobuf:"bytes,7,opt,name=version" json:"version,omitempty"` +} + +func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } +func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata) ProtoMessage() {} +func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *OperationMetadata) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *OperationMetadata) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *OperationMetadata) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *OperationMetadata) GetIsCancellationRequested() bool { + if m != nil { + return m.IsCancellationRequested + } + return false +} + +func (m *OperationMetadata) GetOperationType() OperationMetadata_OperationType { + if m != nil { + return m.OperationType + } + return OperationMetadata_OPERATION_TYPE_UNSPECIFIED +} + +func (m *OperationMetadata) GetModelName() string { + if m != nil { + return m.ModelName + } + return "" +} + +func (m *OperationMetadata) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func init() { + proto.RegisterType((*OperationMetadata)(nil), "google.cloud.ml.v1beta1.OperationMetadata") + proto.RegisterEnum("google.cloud.ml.v1beta1.OperationMetadata_OperationType", OperationMetadata_OperationType_name, OperationMetadata_OperationType_value) +} + +func init() { proto.RegisterFile("google/cloud/ml/v1beta1/operation_metadata.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x5f, 0x6b, 0xdb, 0x30, + 0x14, 0xc5, 0xe7, 0xb6, 0x6b, 0x1b, 0x75, 0x0d, 0x99, 0x1f, 0x56, 0x2f, 0x6c, 0xab, 0xe9, 0x53, + 0x60, 0x60, 0xaf, 0x1d, 0x83, 0x75, 0x7d, 0x6a, 0x13, 0x0d, 0x02, 0x6d, 0x6c, 0x54, 0xaf, 0xb0, + 0xbd, 0x18, 0xc5, 0xbe, 0x33, 0x02, 0xfd, 0xf1, 0x2c, 0x25, 0xd0, 0x0f, 0xb4, 0xef, 0x39, 0x22, + 0xd9, 0x34, 0x23, 0x84, 0x3e, 0xea, 0xdc, 0xf3, 0xbb, 0xf7, 0xf8, 0x5e, 0xa3, 0x4f, 0x95, 0x52, + 0x15, 0x87, 0xb8, 0xe0, 0x6a, 0x51, 0xc6, 0x82, 0xc7, 0xcb, 0xf3, 0x39, 0x18, 0x7a, 0x1e, 0xab, + 0x1a, 0x1a, 0x6a, 0x98, 0x92, 0xb9, 0x00, 0x43, 0x4b, 0x6a, 0x68, 0x54, 0x37, 0xca, 0x28, 0xff, + 0xc4, 0x11, 0x91, 0x25, 0x22, 0xc1, 0xa3, 0x96, 0x18, 0xbe, 0x6b, 0x5b, 0xd1, 0x9a, 0xc5, 0x54, + 0x4a, 0x65, 0x2c, 0xae, 0x1d, 0x36, 0xfc, 0xb8, 0x6d, 0x90, 0x50, 0x25, 0xf0, 0x5c, 0x43, 0xb3, + 0x64, 0x05, 0xb4, 0xe6, 0xd3, 0xd6, 0x6c, 0x5f, 0xf3, 0xc5, 0xef, 0xd8, 0x30, 0x01, 0xda, 0x50, + 0x51, 0x3b, 0xc3, 0xd9, 0xdf, 0x3d, 0xf4, 0x3a, 0xe9, 0x12, 0xde, 0xb5, 0x01, 0xfd, 0x2b, 0x74, + 0x54, 0x34, 0x40, 0x0d, 0xe4, 0x2b, 0x7f, 0xe0, 0x85, 0xde, 0xe8, 0xe8, 0x62, 0x18, 0xb5, 0x81, + 0xbb, 0x66, 0x51, 0xd6, 0x35, 0x23, 0xc8, 0xd9, 0x57, 0x82, 0x7f, 0x89, 0x90, 0x36, 0xb4, 0x31, + 0x8e, 0xdd, 0x79, 0x96, 0xed, 0x59, 0xb7, 0x45, 0xbf, 0xa0, 0x43, 0x90, 0xa5, 0x03, 0x77, 0x9f, + 0x05, 0x0f, 0x40, 0x96, 0x16, 0xfb, 0x86, 0xde, 0x32, 0x9d, 0x17, 0x54, 0x16, 0xc0, 0xb9, 0xdb, + 0x75, 0x03, 0x7f, 0x16, 0xa0, 0x0d, 0x94, 0xc1, 0x5e, 0xe8, 0x8d, 0x0e, 0xc9, 0x09, 0xd3, 0xe3, + 0xb5, 0x3a, 0xe9, 0xca, 0x7e, 0x8e, 0xfa, 0x4f, 0x17, 0x32, 0x8f, 0x35, 0x04, 0x2f, 0x43, 0x6f, + 0xd4, 0xbf, 0xf8, 0x1a, 0x6d, 0x39, 0x4f, 0xb4, 0xb1, 0xae, 0x27, 0x25, 0x7b, 0xac, 0x81, 0x1c, + 0xab, 0xf5, 0xa7, 0xff, 0x1e, 0x21, 0x77, 0x19, 0x49, 0x05, 0x04, 0xfb, 0xa1, 0x37, 0xea, 0x91, + 0x9e, 0x55, 0x66, 0xd4, 0x66, 0x3f, 0x58, 0x42, 0xa3, 0x99, 0x92, 0xc1, 0x81, 0xfd, 0xe2, 0x70, + 0xeb, 0xe0, 0x07, 0xe7, 0x23, 0x1d, 0x70, 0xc6, 0xd0, 0xf1, 0x7f, 0xa3, 0xfd, 0x0f, 0x68, 0x98, + 0xa4, 0x98, 0x5c, 0x67, 0xd3, 0x64, 0x96, 0x67, 0x3f, 0x53, 0x9c, 0xff, 0x98, 0xdd, 0xa7, 0x78, + 0x3c, 0xfd, 0x3e, 0xc5, 0x93, 0xc1, 0x0b, 0xdf, 0x47, 0xfd, 0x31, 0xc1, 0xd7, 0x19, 0xce, 0x1f, + 0x30, 0xb9, 0x9f, 0x26, 0xb3, 0x81, 0xb7, 0xd2, 0x26, 0xf8, 0x16, 0xaf, 0x69, 0x3b, 0xfe, 0x00, + 0xbd, 0x6a, 0xb5, 0xbb, 0x64, 0x82, 0x6f, 0x07, 0xbb, 0x37, 0x4b, 0x74, 0x5a, 0x28, 0xb1, 0x11, + 0x8d, 0xd6, 0xac, 0x8b, 0x77, 0xf3, 0x66, 0x63, 0x31, 0xe9, 0xea, 0x66, 0xa9, 0xf7, 0xeb, 0xb2, + 0xc5, 0x2a, 0xc5, 0xa9, 0xac, 0x22, 0xd5, 0x54, 0x71, 0x05, 0xd2, 0x5e, 0x34, 0x76, 0x25, 0x5a, + 0x33, 0xbd, 0xf1, 0x47, 0x5f, 0x09, 0x3e, 0xdf, 0xb7, 0xae, 0xcf, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x89, 0xf8, 0x21, 0xa7, 0x5f, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go new file mode 100644 index 000000000..7d6a8ada9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go @@ -0,0 +1,343 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1beta1/prediction_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api3 "google.golang.org/genproto/googleapis/api/httpbody" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request for predictions to be issued against a trained model. +// +// The body of the request is a single JSON object with a single top-level +// field: +// +//
+//
instances
+//
A JSON array containing values representing the instances to use for +// prediction.
+//
+// +// The structure of each element of the instances list is determined by your +// model's input definition. Instances can include named inputs or can contain +// only unlabeled values. +// +// Not all data includes named inputs. Some instances will be simple +// JSON values (boolean, number, or string). However, instances are often lists +// of simple values, or complex nested lists. Here are some examples of request +// bodies: +// +// CSV data with each row encoded as a string value: +//
+// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]}
+// 
+// Plain text: +//
+// {"instances": ["the quick brown fox", "la bruja le dio"]}
+// 
+// Sentences encoded as lists of words (vectors of strings): +//
+// {
+//   "instances": [
+//     ["the","quick","brown"],
+//     ["la","bruja","le"],
+//     ...
+//   ]
+// }
+// 
+// Floating point scalar values: +//
+// {"instances": [0.0, 1.1, 2.2]}
+// 
+// Vectors of integers: +//
+// {
+//   "instances": [
+//     [0, 1, 2],
+//     [3, 4, 5],
+//     ...
+//   ]
+// }
+// 
+// Tensors (in this case, two-dimensional tensors): +//
+// {
+//   "instances": [
+//     [
+//       [0, 1, 2],
+//       [3, 4, 5]
+//     ],
+//     ...
+//   ]
+// }
+// 
+// Images can be represented different ways. In this encoding scheme the first +// two dimensions represent the rows and columns of the image, and the third +// contains lists (vectors) of the R, G, and B values for each pixel. +//
+// {
+//   "instances": [
+//     [
+//       [
+//         [138, 30, 66],
+//         [130, 20, 56],
+//         ...
+//       ],
+//       [
+//         [126, 38, 61],
+//         [122, 24, 57],
+//         ...
+//       ],
+//       ...
+//     ],
+//     ...
+//   ]
+// }
+// 
+// JSON strings must be encoded as UTF-8. To send binary data, you must +// base64-encode the data and mark it as binary. To mark a JSON string +// as binary, replace it with a JSON object with a single attribute named `b64`: +//
{"b64": "..."} 
+// For example: +// +// Two Serialized tf.Examples (fake data, for illustrative purposes only): +//
+// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]}
+// 
+// Two JPEG image byte strings (fake data, for illustrative purposes only): +//
+// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]}
+// 
+// If your data includes named references, format each instance as a JSON object +// with the named references as the keys: +// +// JSON input data to be preprocessed: +//
+// {
+//   "instances": [
+//     {
+//       "a": 1.0,
+//       "b": true,
+//       "c": "x"
+//     },
+//     {
+//       "a": -2.0,
+//       "b": false,
+//       "c": "y"
+//     }
+//   ]
+// }
+// 
+// Some models have an underlying TensorFlow graph that accepts multiple input +// tensors. In this case, you should use the names of JSON name/value pairs to +// identify the input tensors, as shown in the following exmaples: +// +// For a graph with input tensor aliases "tag" (string) and "image" +// (base64-encoded string): +//
+// {
+//   "instances": [
+//     {
+//       "tag": "beach",
+//       "image": {"b64": "ASa8asdf"}
+//     },
+//     {
+//       "tag": "car",
+//       "image": {"b64": "JLK7ljk3"}
+//     }
+//   ]
+// }
+// 
+// For a graph with input tensor aliases "tag" (string) and "image" +// (3-dimensional array of 8-bit ints): +//
+// {
+//   "instances": [
+//     {
+//       "tag": "beach",
+//       "image": [
+//         [
+//           [138, 30, 66],
+//           [130, 20, 56],
+//           ...
+//         ],
+//         [
+//           [126, 38, 61],
+//           [122, 24, 57],
+//           ...
+//         ],
+//         ...
+//       ]
+//     },
+//     {
+//       "tag": "car",
+//       "image": [
+//         [
+//           [255, 0, 102],
+//           [255, 0, 97],
+//           ...
+//         ],
+//         [
+//           [254, 1, 101],
+//           [254, 2, 93],
+//           ...
+//         ],
+//         ...
+//       ]
+//     },
+//     ...
+//   ]
+// }
+// 
+// If the call is successful, the response body will contain one prediction +// entry per instance in the request body. If prediction fails for any +// instance, the response body will contain no predictions and will contian +// a single error entry instead. +type PredictRequest struct { + // Required. The resource name of a model or a version. + // + // Authorization: requires `Viewer` role on the parent project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // + // Required. The prediction request body. + HttpBody *google_api3.HttpBody `protobuf:"bytes,2,opt,name=http_body,json=httpBody" json:"http_body,omitempty"` +} + +func (m *PredictRequest) Reset() { *m = PredictRequest{} } +func (m *PredictRequest) String() string { return proto.CompactTextString(m) } +func (*PredictRequest) ProtoMessage() {} +func (*PredictRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *PredictRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PredictRequest) GetHttpBody() *google_api3.HttpBody { + if m != nil { + return m.HttpBody + } + return nil +} + +func init() { + proto.RegisterType((*PredictRequest)(nil), "google.cloud.ml.v1beta1.PredictRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for OnlinePredictionService service + +type OnlinePredictionServiceClient interface { + // Performs prediction on the data in the request. + // + // **** REMOVE FROM GENERATED DOCUMENTATION + Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) +} + +type onlinePredictionServiceClient struct { + cc *grpc.ClientConn +} + +func NewOnlinePredictionServiceClient(cc *grpc.ClientConn) OnlinePredictionServiceClient { + return &onlinePredictionServiceClient{cc} +} + +func (c *onlinePredictionServiceClient) Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*google_api3.HttpBody, error) { + out := new(google_api3.HttpBody) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.OnlinePredictionService/Predict", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for OnlinePredictionService service + +type OnlinePredictionServiceServer interface { + // Performs prediction on the data in the request. + // + // **** REMOVE FROM GENERATED DOCUMENTATION + Predict(context.Context, *PredictRequest) (*google_api3.HttpBody, error) +} + +func RegisterOnlinePredictionServiceServer(s *grpc.Server, srv OnlinePredictionServiceServer) { + s.RegisterService(&_OnlinePredictionService_serviceDesc, srv) +} + +func _OnlinePredictionService_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PredictRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OnlinePredictionServiceServer).Predict(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.OnlinePredictionService/Predict", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OnlinePredictionServiceServer).Predict(ctx, req.(*PredictRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _OnlinePredictionService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1beta1.OnlinePredictionService", + HandlerType: (*OnlinePredictionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Predict", + Handler: _OnlinePredictionService_Predict_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1beta1/prediction_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1beta1/prediction_service.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 312 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4a, 0x03, 0x31, + 0x14, 0xc7, 0x49, 0x11, 0xb5, 0x11, 0x5c, 0x04, 0xb1, 0xb5, 0x08, 0x96, 0xba, 0xb0, 0x74, 0x91, + 0xd8, 0xba, 0xb2, 0xe2, 0xa6, 0x2b, 0x77, 0x0e, 0x75, 0x21, 0xb8, 0x29, 0xe9, 0x4c, 0x48, 0x23, + 0x99, 0xbc, 0x38, 0x93, 0x16, 0x8b, 0xb8, 0xf1, 0x0a, 0x3d, 0x9a, 0x57, 0xf0, 0x20, 0x92, 0x49, + 0x28, 0xca, 0xe8, 0xee, 0x31, 0x6f, 0x7e, 0xef, 0xff, 0x11, 0x7c, 0x29, 0x01, 0xa4, 0x16, 0x2c, + 0xd5, 0xb0, 0xcc, 0x58, 0xae, 0xd9, 0x6a, 0x38, 0x17, 0x8e, 0x0f, 0x99, 0x2d, 0x44, 0xa6, 0x52, + 0xa7, 0xc0, 0xcc, 0x4a, 0x51, 0xac, 0x54, 0x2a, 0xa8, 0x2d, 0xc0, 0x01, 0x69, 0x05, 0x82, 0x56, + 0x04, 0xcd, 0x35, 0x8d, 0x44, 0xe7, 0x34, 0x9e, 0xe2, 0x56, 0x31, 0x6e, 0x0c, 0x38, 0xee, 0xe9, + 0x32, 0x60, 0x9d, 0x93, 0x1f, 0xdb, 0x85, 0x73, 0x76, 0x0e, 0xd9, 0x3a, 0xac, 0x7a, 0x8f, 0xf8, + 0x30, 0x09, 0x6a, 0x53, 0xf1, 0xb2, 0x14, 0xa5, 0x23, 0x04, 0xef, 0x18, 0x9e, 0x8b, 0x36, 0xea, + 0xa2, 0x7e, 0x73, 0x5a, 0xcd, 0x64, 0x88, 0x9b, 0x9e, 0x9b, 0x79, 0xb0, 0xdd, 0xe8, 0xa2, 0xfe, + 0xc1, 0xe8, 0x88, 0x46, 0x2f, 0xdc, 0x2a, 0x7a, 0xe7, 0x9c, 0x9d, 0x40, 0xb6, 0x9e, 0xee, 0x2f, + 0xe2, 0x34, 0xda, 0x20, 0xdc, 0xba, 0x37, 0x5a, 0x19, 0x91, 0x6c, 0xd3, 0x3c, 0x84, 0x30, 0xe4, + 0x15, 0xef, 0xc5, 0x8f, 0xe4, 0x82, 0xfe, 0x13, 0x89, 0xfe, 0xb6, 0xd5, 0xf9, 0x53, 0xaf, 0x47, + 0x3f, 0x3e, 0xbf, 0x36, 0x8d, 0x7e, 0xef, 0x7c, 0xdb, 0xdd, 0x9b, 0x37, 0x7c, 0x6b, 0x0b, 0x78, + 0x16, 0xa9, 0x2b, 0xd9, 0x60, 0xf0, 0x3e, 0x8e, 0x75, 0x8e, 0xd1, 0x60, 0xb2, 0xc2, 0x67, 0x29, + 0xe4, 0x35, 0x4d, 0x7f, 0x33, 0x1e, 0x98, 0x1c, 0xd7, 0xfc, 0x26, 0xbe, 0xa9, 0x04, 0x3d, 0x5d, + 0x47, 0x4c, 0x82, 0xe6, 0x46, 0x52, 0x28, 0x24, 0x93, 0xc2, 0x54, 0x3d, 0xb2, 0xb0, 0xe2, 0x56, + 0x95, 0xb5, 0xe7, 0xbc, 0xc9, 0xf5, 0x7c, 0xb7, 0xfa, 0xeb, 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0x00, 0x26, 0x25, 0x67, 0xf3, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go new file mode 100644 index 000000000..cdf5bbc57 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/ml/v1beta1/project_service.proto + +package ml + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Requests service account information associated with a project. +type GetConfigRequest struct { + // Required. The project name. + // + // Authorization: requires `Viewer` role on the specified project. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (m *GetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *GetConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Returns service account information associated with a project. +type GetConfigResponse struct { + // The service account Cloud ML uses to access resources in the project. + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // The project number for `service_account`. + ServiceAccountProject int64 `protobuf:"varint,2,opt,name=service_account_project,json=serviceAccountProject" json:"service_account_project,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (m *GetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *GetConfigResponse) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *GetConfigResponse) GetServiceAccountProject() int64 { + if m != nil { + return m.ServiceAccountProject + } + return 0 +} + +func init() { + proto.RegisterType((*GetConfigRequest)(nil), "google.cloud.ml.v1beta1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "google.cloud.ml.v1beta1.GetConfigResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ProjectManagementService service + +type ProjectManagementServiceClient interface { + // Get the service account information associated with your project. You need + // this information in order to grant the service account persmissions for + // the Google Cloud Storage location where you put your model training code + // for training the model with Google Cloud Machine Learning. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) +} + +type projectManagementServiceClient struct { + cc *grpc.ClientConn +} + +func NewProjectManagementServiceClient(cc *grpc.ClientConn) ProjectManagementServiceClient { + return &projectManagementServiceClient{cc} +} + +func (c *projectManagementServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/google.cloud.ml.v1beta1.ProjectManagementService/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ProjectManagementService service + +type ProjectManagementServiceServer interface { + // Get the service account information associated with your project. You need + // this information in order to grant the service account persmissions for + // the Google Cloud Storage location where you put your model training code + // for training the model with Google Cloud Machine Learning. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) +} + +func RegisterProjectManagementServiceServer(s *grpc.Server, srv ProjectManagementServiceServer) { + s.RegisterService(&_ProjectManagementService_serviceDesc, srv) +} + +func _ProjectManagementService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProjectManagementServiceServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.ml.v1beta1.ProjectManagementService/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProjectManagementServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ProjectManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.ml.v1beta1.ProjectManagementService", + HandlerType: (*ProjectManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetConfig", + Handler: _ProjectManagementService_GetConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/ml/v1beta1/project_service.proto", +} + +func init() { proto.RegisterFile("google/cloud/ml/v1beta1/project_service.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 327 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4a, 0x43, 0x31, + 0x10, 0xc6, 0x79, 0x55, 0x84, 0x66, 0xe1, 0x9f, 0x88, 0xb4, 0x14, 0xc1, 0x52, 0xa4, 0xd6, 0xa2, + 0x09, 0x55, 0x10, 0x54, 0x5c, 0x58, 0x17, 0xae, 0x84, 0x52, 0x77, 0x6e, 0x4a, 0xfa, 0x1c, 0xc3, + 0x93, 0x24, 0x13, 0x5f, 0xd2, 0x6e, 0xc4, 0x8d, 0x27, 0x10, 0x3c, 0x87, 0xa7, 0xf1, 0x0a, 0x1e, + 0x44, 0xfa, 0x92, 0x16, 0x6d, 0x11, 0xdc, 0x0d, 0x33, 0xbf, 0x6f, 0x32, 0xdf, 0x4c, 0xc8, 0xa1, + 0x44, 0x94, 0x0a, 0x78, 0xaa, 0x70, 0x74, 0xcf, 0xb5, 0xe2, 0xe3, 0xce, 0x10, 0xbc, 0xe8, 0x70, + 0x9b, 0xe3, 0x23, 0xa4, 0x7e, 0xe0, 0x20, 0x1f, 0x67, 0x29, 0x30, 0x9b, 0xa3, 0x47, 0x5a, 0x09, + 0x38, 0x2b, 0x70, 0xa6, 0x15, 0x8b, 0x78, 0x6d, 0x3b, 0xf6, 0x11, 0x36, 0xe3, 0xc2, 0x18, 0xf4, + 0xc2, 0x67, 0x68, 0x5c, 0x90, 0x35, 0x9a, 0x64, 0xfd, 0x1a, 0xfc, 0x15, 0x9a, 0x87, 0x4c, 0xf6, + 0xe1, 0x69, 0x04, 0xce, 0x53, 0x4a, 0x96, 0x8d, 0xd0, 0x50, 0x4d, 0xea, 0x49, 0xab, 0xdc, 0x2f, + 0xe2, 0x86, 0x27, 0x1b, 0x3f, 0x38, 0x67, 0xd1, 0x38, 0xa0, 0x7b, 0x64, 0x2d, 0x0e, 0x31, 0x10, + 0x69, 0x8a, 0x23, 0xe3, 0xa3, 0x66, 0x35, 0xa6, 0x2f, 0x43, 0x96, 0x9e, 0x90, 0xca, 0x1c, 0x38, + 0x88, 0x2e, 0xaa, 0xa5, 0x7a, 0xd2, 0x5a, 0xea, 0x6f, 0xfd, 0x16, 0xf4, 0x42, 0xf1, 0xe8, 0x23, + 0x21, 0xd5, 0x18, 0xdf, 0x08, 0x23, 0x24, 0x68, 0x30, 0xfe, 0x36, 0xa0, 0xf4, 0x2d, 0x21, 0xe5, + 0xd9, 0x4c, 0x74, 0x9f, 0xfd, 0xb1, 0x00, 0x36, 0xef, 0xaf, 0xd6, 0xfe, 0x0f, 0x1a, 0x2c, 0x36, + 0x0e, 0x5e, 0x3f, 0xbf, 0xde, 0x4b, 0x4d, 0xba, 0x3b, 0x5b, 0xff, 0xf3, 0x64, 0x1f, 0x17, 0x71, + 0x7c, 0xc7, 0xdb, 0x2f, 0x67, 0x72, 0xaa, 0xea, 0x3a, 0xb2, 0x93, 0xa2, 0x5e, 0x68, 0x2f, 0x6c, + 0x36, 0x7d, 0xa2, 0xbb, 0x19, 0xfd, 0x44, 0x17, 0xbd, 0xc9, 0x15, 0x7a, 0xc9, 0xdd, 0x69, 0xd4, + 0x48, 0x54, 0xc2, 0x48, 0x86, 0xb9, 0xe4, 0x12, 0x4c, 0x71, 0x23, 0x1e, 0x4a, 0xc2, 0x66, 0x6e, + 0xe1, 0x33, 0x9c, 0x6b, 0x35, 0x5c, 0x29, 0xa8, 0xe3, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x76, + 0x59, 0xc4, 0x91, 0x31, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/resources.pb.go new file mode 100644 index 000000000..74e09fa95 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/resources.pb.go @@ -0,0 +1,590 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/runtimeconfig/v1beta1/resources.proto + +/* +Package runtimeconfig is a generated protocol buffer package. + +It is generated from these files: + google/cloud/runtimeconfig/v1beta1/resources.proto + google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto + +It has these top-level messages: + RuntimeConfig + Variable + EndCondition + Waiter + ListConfigsRequest + ListConfigsResponse + GetConfigRequest + CreateConfigRequest + UpdateConfigRequest + DeleteConfigRequest + ListVariablesRequest + ListVariablesResponse + WatchVariableRequest + GetVariableRequest + CreateVariableRequest + UpdateVariableRequest + DeleteVariableRequest + ListWaitersRequest + ListWaitersResponse + GetWaiterRequest + CreateWaiterRequest + DeleteWaiterRequest +*/ +package runtimeconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `VariableState` describes the last known state of the variable and is +// used during a `variables().watch` call to distinguish the state of the +// variable. +type VariableState int32 + +const ( + // Default variable state. + VariableState_VARIABLE_STATE_UNSPECIFIED VariableState = 0 + // The variable was updated, while `variables().watch` was executing. + VariableState_UPDATED VariableState = 1 + // The variable was deleted, while `variables().watch` was executing. + VariableState_DELETED VariableState = 2 +) + +var VariableState_name = map[int32]string{ + 0: "VARIABLE_STATE_UNSPECIFIED", + 1: "UPDATED", + 2: "DELETED", +} +var VariableState_value = map[string]int32{ + "VARIABLE_STATE_UNSPECIFIED": 0, + "UPDATED": 1, + "DELETED": 2, +} + +func (x VariableState) String() string { + return proto.EnumName(VariableState_name, int32(x)) +} +func (VariableState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig +// service. A RuntimeConfig resource consists of metadata and a hierarchy of +// variables. +type RuntimeConfig struct { + // The resource name of a runtime config. The name must have the format: + // + // projects/[PROJECT_ID]/configs/[CONFIG_NAME] + // + // The `[PROJECT_ID]` must be a valid project ID, and `[CONFIG_NAME]` is an + // arbitrary name that matches RFC 1035 segment specification. The length of + // `[CONFIG_NAME]` must be less than 64 bytes. + // + // You pick the RuntimeConfig resource name, but the server will validate that + // the name adheres to this format. After you create the resource, you cannot + // change the resource's name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // An optional description of the RuntimeConfig object. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *RuntimeConfig) Reset() { *m = RuntimeConfig{} } +func (m *RuntimeConfig) String() string { return proto.CompactTextString(m) } +func (*RuntimeConfig) ProtoMessage() {} +func (*RuntimeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *RuntimeConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RuntimeConfig) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes a single variable within a RuntimeConfig resource. +// The name denotes the hierarchical variable name. For example, +// `ports/serving_port` is a valid variable name. The variable value is an +// opaque string and only leaf variables can have values (that is, variables +// that do not have any child variables). +type Variable struct { + // The name of the variable resource, in the format: + // + // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME] + // + // The `[PROJECT_ID]` must be a valid project ID, `[CONFIG_NAME]` must be a + // valid RuntimeConfig reource and `[VARIABLE_NAME]` follows Unix file system + // file path naming. + // + // The `[VARIABLE_NAME]` can contain ASCII letters, numbers, slashes and + // dashes. Slashes are used as path element separators and are not part of the + // `[VARIABLE_NAME]` itself, so `[VARIABLE_NAME]` must contain at least one + // non-slash character. Multiple slashes are coalesced into single slash + // character. Each path segment should follow RFC 1035 segment specification. + // The length of a `[VARIABLE_NAME]` must be less than 256 bytes. + // + // Once you create a variable, you cannot change the variable name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The the value of the variable. It can be either a binary or a string + // value. You must specify one of either `value` or `text`. Specifying both + // will cause the server to return an error. + // + // Types that are valid to be assigned to Contents: + // *Variable_Value + // *Variable_Text + Contents isVariable_Contents `protobuf_oneof:"contents"` + // [Output Only] The time of the last variable update. + UpdateTime *google_protobuf2.Timestamp `protobuf:"bytes,3,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // [Ouput only] The current state of the variable. The variable state indicates + // the outcome of the `variables().watch` call and is visible through the + // `get` and `list` calls. + State VariableState `protobuf:"varint,4,opt,name=state,enum=google.cloud.runtimeconfig.v1beta1.VariableState" json:"state,omitempty"` +} + +func (m *Variable) Reset() { *m = Variable{} } +func (m *Variable) String() string { return proto.CompactTextString(m) } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isVariable_Contents interface { + isVariable_Contents() +} + +type Variable_Value struct { + Value []byte `protobuf:"bytes,2,opt,name=value,proto3,oneof"` +} +type Variable_Text struct { + Text string `protobuf:"bytes,5,opt,name=text,oneof"` +} + +func (*Variable_Value) isVariable_Contents() {} +func (*Variable_Text) isVariable_Contents() {} + +func (m *Variable) GetContents() isVariable_Contents { + if m != nil { + return m.Contents + } + return nil +} + +func (m *Variable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Variable) GetValue() []byte { + if x, ok := m.GetContents().(*Variable_Value); ok { + return x.Value + } + return nil +} + +func (m *Variable) GetText() string { + if x, ok := m.GetContents().(*Variable_Text); ok { + return x.Text + } + return "" +} + +func (m *Variable) GetUpdateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *Variable) GetState() VariableState { + if m != nil { + return m.State + } + return VariableState_VARIABLE_STATE_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Variable) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Variable_OneofMarshaler, _Variable_OneofUnmarshaler, _Variable_OneofSizer, []interface{}{ + (*Variable_Value)(nil), + (*Variable_Text)(nil), + } +} + +func _Variable_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Variable) + // contents + switch x := m.Contents.(type) { + case *Variable_Value: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Value) + case *Variable_Text: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Text) + case nil: + default: + return fmt.Errorf("Variable.Contents has unexpected type %T", x) + } + return nil +} + +func _Variable_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Variable) + switch tag { + case 2: // contents.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Contents = &Variable_Value{x} + return true, err + case 5: // contents.text + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Contents = &Variable_Text{x} + return true, err + default: + return false, nil + } +} + +func _Variable_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Variable) + // contents + switch x := m.Contents.(type) { + case *Variable_Value: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *Variable_Text: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Text))) + n += len(x.Text) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The condition that a Waiter resource is waiting for. +type EndCondition struct { + // The condition oneof holds the available condition types for this + // EndCondition. Currently, the only available type is Cardinality. + // + // Types that are valid to be assigned to Condition: + // *EndCondition_Cardinality_ + Condition isEndCondition_Condition `protobuf_oneof:"condition"` +} + +func (m *EndCondition) Reset() { *m = EndCondition{} } +func (m *EndCondition) String() string { return proto.CompactTextString(m) } +func (*EndCondition) ProtoMessage() {} +func (*EndCondition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isEndCondition_Condition interface { + isEndCondition_Condition() +} + +type EndCondition_Cardinality_ struct { + Cardinality *EndCondition_Cardinality `protobuf:"bytes,1,opt,name=cardinality,oneof"` +} + +func (*EndCondition_Cardinality_) isEndCondition_Condition() {} + +func (m *EndCondition) GetCondition() isEndCondition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *EndCondition) GetCardinality() *EndCondition_Cardinality { + if x, ok := m.GetCondition().(*EndCondition_Cardinality_); ok { + return x.Cardinality + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*EndCondition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _EndCondition_OneofMarshaler, _EndCondition_OneofUnmarshaler, _EndCondition_OneofSizer, []interface{}{ + (*EndCondition_Cardinality_)(nil), + } +} + +func _EndCondition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*EndCondition) + // condition + switch x := m.Condition.(type) { + case *EndCondition_Cardinality_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cardinality); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("EndCondition.Condition has unexpected type %T", x) + } + return nil +} + +func _EndCondition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*EndCondition) + switch tag { + case 1: // condition.cardinality + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(EndCondition_Cardinality) + err := b.DecodeMessage(msg) + m.Condition = &EndCondition_Cardinality_{msg} + return true, err + default: + return false, nil + } +} + +func _EndCondition_OneofSizer(msg proto.Message) (n int) { + m := msg.(*EndCondition) + // condition + switch x := m.Condition.(type) { + case *EndCondition_Cardinality_: + s := proto.Size(x.Cardinality) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cardinality condition for the Waiter resource. A cardinality condition is +// met when the number of variables under a specified path prefix reaches a +// predefined number. For example, if you set a Cardinality condition where +// the `path` is set to `/foo` and the number of paths is set to 2, the +// following variables would meet the condition in a RuntimeConfig resource: +// +// + `/foo/variable1 = "value1"` +// + `/foo/variable2 = "value2"` +// + `/bar/variable3 = "value3"` +// +// It would not would not satisify the same condition with the `number` set to +// 3, however, because there is only 2 paths that start with `/foo`. +// Cardinality conditions are recursive; all subtrees under the specific +// path prefix are counted. +type EndCondition_Cardinality struct { + // The root of the variable subtree to monitor. For example, `/foo`. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // The number variables under the `path` that must exist to meet this + // condition. Defaults to 1 if not specified. + Number int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` +} + +func (m *EndCondition_Cardinality) Reset() { *m = EndCondition_Cardinality{} } +func (m *EndCondition_Cardinality) String() string { return proto.CompactTextString(m) } +func (*EndCondition_Cardinality) ProtoMessage() {} +func (*EndCondition_Cardinality) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *EndCondition_Cardinality) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *EndCondition_Cardinality) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +// A Waiter resource waits for some end condition within a RuntimeConfig resource +// to be met before it returns. For example, assume you have a distributed +// system where each node writes to a Variable resource indidicating the node's +// readiness as part of the startup process. +// +// You then configure a Waiter resource with the success condition set to wait +// until some number of nodes have checked in. Afterwards, your application +// runs some arbitrary code after the condition has been met and the waiter +// returns successfully. +// +// Once created, a Waiter resource is immutable. +// +// To learn more about using waiters, read the +// [Creating a Waiter](/deployment-manager/runtime-configurator/creating-a-waiter) +// documentation. +type Waiter struct { + // The name of the Waiter resource, in the format: + // + // projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME] + // + // The `[PROJECT_ID]` must be a valid Google Cloud project ID, + // the `[CONFIG_NAME]` must be a valid RuntimeConfig resource, the + // `[WAITER_NAME]` must match RFC 1035 segment specification, and the length + // of `[WAITER_NAME]` must be less than 64 bytes. + // + // After you create a Waiter resource, you cannot change the resource name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // [Required] Specifies the timeout of the waiter in seconds, beginning from + // the instant that `waiters().create` method is called. If this time elapses + // before the success or failure conditions are met, the waiter fails and sets + // the `error` code to `DEADLINE_EXCEEDED`. + Timeout *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=timeout" json:"timeout,omitempty"` + // [Optional] The failure condition of this waiter. If this condition is met, + // `done` will be set to `true` and the `error` code will be set to `ABORTED`. + // The failure condition takes precedence over the success condition. If both + // conditions are met, a failure will be indicated. This value is optional; if + // no failure condition is set, the only failure scenario will be a timeout. + Failure *EndCondition `protobuf:"bytes,3,opt,name=failure" json:"failure,omitempty"` + // [Required] The success condition. If this condition is met, `done` will be + // set to `true` and the `error` value will remain unset. The failure condition + // takes precedence over the success condition. If both conditions are met, a + // failure will be indicated. + Success *EndCondition `protobuf:"bytes,4,opt,name=success" json:"success,omitempty"` + // [Output Only] The instant at which this Waiter resource was created. Adding + // the value of `timeout` to this instant yields the timeout deadline for the + // waiter. + CreateTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // [Output Only] If the value is `false`, it means the waiter is still waiting + // for one of its conditions to be met. + // + // If true, the waiter has finished. If the waiter finished due to a timeout + // or failure, `error` will be set. + Done bool `protobuf:"varint,6,opt,name=done" json:"done,omitempty"` + // [Output Only] If the waiter ended due to a failure or timeout, this value + // will be set. + Error *google_rpc.Status `protobuf:"bytes,7,opt,name=error" json:"error,omitempty"` +} + +func (m *Waiter) Reset() { *m = Waiter{} } +func (m *Waiter) String() string { return proto.CompactTextString(m) } +func (*Waiter) ProtoMessage() {} +func (*Waiter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Waiter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Waiter) GetTimeout() *google_protobuf1.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *Waiter) GetFailure() *EndCondition { + if m != nil { + return m.Failure + } + return nil +} + +func (m *Waiter) GetSuccess() *EndCondition { + if m != nil { + return m.Success + } + return nil +} + +func (m *Waiter) GetCreateTime() *google_protobuf2.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Waiter) GetDone() bool { + if m != nil { + return m.Done + } + return false +} + +func (m *Waiter) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func init() { + proto.RegisterType((*RuntimeConfig)(nil), "google.cloud.runtimeconfig.v1beta1.RuntimeConfig") + proto.RegisterType((*Variable)(nil), "google.cloud.runtimeconfig.v1beta1.Variable") + proto.RegisterType((*EndCondition)(nil), "google.cloud.runtimeconfig.v1beta1.EndCondition") + proto.RegisterType((*EndCondition_Cardinality)(nil), "google.cloud.runtimeconfig.v1beta1.EndCondition.Cardinality") + proto.RegisterType((*Waiter)(nil), "google.cloud.runtimeconfig.v1beta1.Waiter") + proto.RegisterEnum("google.cloud.runtimeconfig.v1beta1.VariableState", VariableState_name, VariableState_value) +} + +func init() { proto.RegisterFile("google/cloud/runtimeconfig/v1beta1/resources.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 615 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x5d, 0x6f, 0xd3, 0x3c, + 0x14, 0xc7, 0x9b, 0x3e, 0x7d, 0xd9, 0x4e, 0xb6, 0x47, 0x93, 0x85, 0x46, 0xa8, 0xd0, 0xa8, 0x7a, + 0x81, 0x2a, 0x2e, 0x12, 0xda, 0x5d, 0xa1, 0x71, 0xd3, 0x97, 0xb0, 0x15, 0x4d, 0x30, 0xb9, 0xdd, + 0x90, 0xb8, 0x19, 0xae, 0xe3, 0x85, 0x48, 0xa9, 0x1d, 0x39, 0xce, 0x04, 0xdf, 0x86, 0x6b, 0x3e, + 0x01, 0x9f, 0x86, 0x2b, 0x3e, 0x08, 0xb2, 0xe3, 0x40, 0x0b, 0x13, 0x1b, 0xdc, 0xf9, 0xf8, 0xfc, + 0xcf, 0xef, 0xbc, 0xf8, 0x24, 0x30, 0x8c, 0x85, 0x88, 0x53, 0x16, 0xd0, 0x54, 0x14, 0x51, 0x20, + 0x0b, 0xae, 0x92, 0x15, 0xa3, 0x82, 0x5f, 0x25, 0x71, 0x70, 0x3d, 0x58, 0x32, 0x45, 0x06, 0x81, + 0x64, 0xb9, 0x28, 0x24, 0x65, 0xb9, 0x9f, 0x49, 0xa1, 0x04, 0xea, 0x95, 0x31, 0xbe, 0x89, 0xf1, + 0x37, 0x62, 0x7c, 0x1b, 0xd3, 0x79, 0x68, 0xb9, 0x24, 0x4b, 0x02, 0xc2, 0xb9, 0x50, 0x44, 0x25, + 0x82, 0x5b, 0x42, 0xe7, 0xc0, 0x7a, 0x8d, 0xb5, 0x2c, 0xae, 0x82, 0xa8, 0x90, 0x46, 0x60, 0xfd, + 0x8f, 0x7e, 0xf5, 0xeb, 0x0c, 0xb9, 0x22, 0xab, 0xcc, 0x0a, 0xee, 0x5b, 0x81, 0xcc, 0x68, 0x90, + 0x2b, 0xa2, 0x0a, 0x4b, 0xee, 0x85, 0xb0, 0x8b, 0xcb, 0x82, 0x26, 0xa6, 0x20, 0x84, 0xa0, 0xc1, + 0xc9, 0x8a, 0x79, 0x4e, 0xd7, 0xe9, 0x6f, 0x63, 0x73, 0x46, 0x5d, 0x70, 0x23, 0x96, 0x53, 0x99, + 0x64, 0x3a, 0xa7, 0x57, 0x37, 0xae, 0xf5, 0xab, 0xde, 0x57, 0x07, 0xb6, 0x2e, 0x88, 0x4c, 0xc8, + 0x32, 0x65, 0x37, 0x22, 0xf6, 0xa1, 0x79, 0x4d, 0xd2, 0x82, 0x99, 0xe0, 0x9d, 0x93, 0x1a, 0x2e, + 0x4d, 0x74, 0x0f, 0x1a, 0x8a, 0x7d, 0x50, 0x5e, 0x53, 0x6b, 0x4f, 0x6a, 0xd8, 0x58, 0xe8, 0x08, + 0xdc, 0x22, 0x8b, 0x88, 0x62, 0x97, 0xba, 0x32, 0xef, 0xbf, 0xae, 0xd3, 0x77, 0x87, 0x1d, 0xdf, + 0xce, 0xb1, 0xea, 0xd2, 0x5f, 0x54, 0x5d, 0x62, 0x28, 0xe5, 0xfa, 0x02, 0x1d, 0x43, 0x53, 0xb7, + 0xc8, 0xbc, 0x46, 0xd7, 0xe9, 0xff, 0x3f, 0x1c, 0xf8, 0xb7, 0x8f, 0xdf, 0xaf, 0x6a, 0x9f, 0xeb, + 0x40, 0x5c, 0xc6, 0x8f, 0x01, 0xb6, 0xa8, 0xe0, 0x8a, 0x71, 0x95, 0xf7, 0xbe, 0x38, 0xb0, 0x13, + 0xf2, 0x68, 0x22, 0x78, 0x94, 0xe8, 0x8e, 0xd1, 0x3b, 0x70, 0x29, 0x91, 0x51, 0xc2, 0x49, 0x9a, + 0xa8, 0x8f, 0xa6, 0x57, 0x77, 0xf8, 0xfc, 0x2e, 0xb9, 0xd6, 0x31, 0xfe, 0xe4, 0x27, 0xe3, 0xa4, + 0x86, 0xd7, 0x91, 0x9d, 0x67, 0xe0, 0xae, 0x79, 0xf5, 0x54, 0x33, 0xa2, 0xde, 0x57, 0x53, 0xd5, + 0x67, 0xb4, 0x0f, 0x2d, 0x5e, 0xac, 0x96, 0x4c, 0x9a, 0xb1, 0x36, 0xb1, 0xb5, 0xc6, 0x2e, 0x6c, + 0xd3, 0x2a, 0x45, 0xef, 0x5b, 0x1d, 0x5a, 0x6f, 0x48, 0xa2, 0x98, 0xbc, 0xf1, 0x65, 0x0e, 0xa1, + 0xad, 0x8b, 0x14, 0x85, 0x32, 0x10, 0x77, 0xf8, 0xe0, 0xb7, 0x39, 0x4f, 0xed, 0xb6, 0xe1, 0x4a, + 0x89, 0x5e, 0x42, 0xfb, 0x8a, 0x24, 0x69, 0x21, 0xab, 0xc7, 0x79, 0xfa, 0xb7, 0x9d, 0xe3, 0x0a, + 0xa0, 0x59, 0x79, 0x41, 0x29, 0xcb, 0x73, 0xf3, 0x62, 0xff, 0xc4, 0xb2, 0x00, 0xbd, 0x38, 0x54, + 0xb2, 0x1f, 0x8b, 0xd3, 0xbc, 0x7d, 0x71, 0x4a, 0xb9, 0x59, 0x1c, 0x04, 0x8d, 0x48, 0x70, 0xe6, + 0xb5, 0xba, 0x4e, 0x7f, 0x0b, 0x9b, 0x33, 0xea, 0x43, 0x93, 0x49, 0x29, 0xa4, 0xd7, 0x36, 0x28, + 0x54, 0xa1, 0x64, 0x46, 0xfd, 0xb9, 0xf9, 0x90, 0x70, 0x29, 0x78, 0x32, 0x83, 0xdd, 0x8d, 0x2d, + 0x42, 0x07, 0xd0, 0xb9, 0x18, 0xe1, 0xd9, 0x68, 0x7c, 0x1a, 0x5e, 0xce, 0x17, 0xa3, 0x45, 0x78, + 0x79, 0xfe, 0x6a, 0x7e, 0x16, 0x4e, 0x66, 0x2f, 0x66, 0xe1, 0x74, 0xaf, 0x86, 0x5c, 0x68, 0x9f, + 0x9f, 0x4d, 0x47, 0x8b, 0x70, 0xba, 0xe7, 0x68, 0x63, 0x1a, 0x9e, 0x86, 0xda, 0xa8, 0x8f, 0x3f, + 0x39, 0xf0, 0x98, 0x8a, 0xd5, 0x1d, 0xc6, 0x70, 0xe6, 0xbc, 0x7d, 0x6d, 0x55, 0xb1, 0x48, 0x09, + 0x8f, 0x7d, 0x21, 0xe3, 0x20, 0x66, 0xdc, 0xb4, 0x1a, 0x94, 0x2e, 0x92, 0x25, 0xf9, 0x9f, 0x7e, + 0x58, 0x47, 0x1b, 0xb7, 0x9f, 0xeb, 0xbd, 0xe3, 0x92, 0x38, 0x31, 0x79, 0x37, 0x7e, 0x0f, 0xfe, + 0xc5, 0x60, 0xac, 0x43, 0x96, 0x2d, 0x93, 0xe0, 0xf0, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x61, + 0xe4, 0x09, 0x63, 0x10, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/runtimeconfig.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/runtimeconfig.pb.go new file mode 100644 index 000000000..359703685 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1/runtimeconfig.pb.go @@ -0,0 +1,1354 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto + +package runtimeconfig + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request for the `ListConfigs()` method. +type ListConfigsRequest struct { + // The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848) + // for this request, in the format `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Specifies the number of results to return per page. If there are fewer + // elements than the specified number, returns all elements. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Specifies a page token to use. Set `pageToken` to a `nextPageToken` + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListConfigsRequest) Reset() { *m = ListConfigsRequest{} } +func (m *ListConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListConfigsRequest) ProtoMessage() {} +func (*ListConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *ListConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// `ListConfigs()` returns the following response. The order of returned +// objects is arbitrary; that is, it is not ordered in any particular way. +type ListConfigsResponse struct { + // A list of the configurations in the project. The order of returned + // objects is arbitrary; that is, it is not ordered in any particular way. + Configs []*RuntimeConfig `protobuf:"bytes,1,rep,name=configs" json:"configs,omitempty"` + // This token allows you to get the next page of results for list requests. + // If the number of results is larger than `pageSize`, use the `nextPageToken` + // as a value for the query parameter `pageToken` in the next list request. + // Subsequent list requests will have their own `nextPageToken` to continue + // paging through the results + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListConfigsResponse) Reset() { *m = ListConfigsResponse{} } +func (m *ListConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListConfigsResponse) ProtoMessage() {} +func (*ListConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *ListConfigsResponse) GetConfigs() []*RuntimeConfig { + if m != nil { + return m.Configs + } + return nil +} + +func (m *ListConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Gets a RuntimeConfig resource. +type GetConfigRequest struct { + // The name of the RuntimeConfig resource to retrieve, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (m *GetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *GetConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Creates a RuntimeConfig resource. +type CreateConfigRequest struct { + // The [project ID](https://support.google.com/cloud/answer/6158840?hl=en&ref_topic=6158848) + // for this request, in the format `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The RuntimeConfig to create. + Config *RuntimeConfig `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` + // An optional but recommended unique `request_id`. If the server + // receives two `create()` requests with the same + // `request_id`, then the second request will be ignored and the + // first resource created and stored in the backend is returned. + // Empty `request_id` fields are ignored. + // + // It is responsibility of the client to ensure uniqueness of the + // `request_id` strings. + // + // `request_id` strings are limited to 64 characters. + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` +} + +func (m *CreateConfigRequest) Reset() { *m = CreateConfigRequest{} } +func (m *CreateConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateConfigRequest) ProtoMessage() {} +func (*CreateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *CreateConfigRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateConfigRequest) GetConfig() *RuntimeConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *CreateConfigRequest) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +// Request message for `UpdateConfig()` method. +type UpdateConfigRequest struct { + // The name of the RuntimeConfig resource to update, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The config resource to update. + Config *RuntimeConfig `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` +} + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (m *UpdateConfigRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *UpdateConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateConfigRequest) GetConfig() *RuntimeConfig { + if m != nil { + return m.Config + } + return nil +} + +// Request for the `DeleteConfig()` method. +type DeleteConfigRequest struct { + // The RuntimeConfig resource to delete, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteConfigRequest) Reset() { *m = DeleteConfigRequest{} } +func (m *DeleteConfigRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteConfigRequest) ProtoMessage() {} +func (*DeleteConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *DeleteConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for the `ListVariables()` method. +type ListVariablesRequest struct { + // The path to the RuntimeConfig resource for which you want to list variables. + // The configuration must exist beforehand; the path must by in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Filters variables by matching the specified filter. For example: + // + // `projects/example-project/config/[CONFIG_NAME]/variables/example-variable`. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Specifies the number of results to return per page. If there are fewer + // elements than the specified number, returns all elements. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Specifies a page token to use. Set `pageToken` to a `nextPageToken` + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The flag indicates whether the user wants to return values of variables. + // If true, then only those variables that user has IAM GetVariable permission + // will be returned along with their values. + ReturnValues bool `protobuf:"varint,5,opt,name=return_values,json=returnValues" json:"return_values,omitempty"` +} + +func (m *ListVariablesRequest) Reset() { *m = ListVariablesRequest{} } +func (m *ListVariablesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVariablesRequest) ProtoMessage() {} +func (*ListVariablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *ListVariablesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListVariablesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListVariablesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListVariablesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListVariablesRequest) GetReturnValues() bool { + if m != nil { + return m.ReturnValues + } + return false +} + +// Response for the `ListVariables()` method. +type ListVariablesResponse struct { + // A list of variables and their values. The order of returned variable + // objects is arbitrary. + Variables []*Variable `protobuf:"bytes,1,rep,name=variables" json:"variables,omitempty"` + // This token allows you to get the next page of results for list requests. + // If the number of results is larger than `pageSize`, use the `nextPageToken` + // as a value for the query parameter `pageToken` in the next list request. + // Subsequent list requests will have their own `nextPageToken` to continue + // paging through the results + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListVariablesResponse) Reset() { *m = ListVariablesResponse{} } +func (m *ListVariablesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVariablesResponse) ProtoMessage() {} +func (*ListVariablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *ListVariablesResponse) GetVariables() []*Variable { + if m != nil { + return m.Variables + } + return nil +} + +func (m *ListVariablesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `WatchVariable()` method. +type WatchVariableRequest struct { + // The name of the variable to watch, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If specified, checks the current timestamp of the variable and if the + // current timestamp is newer than `newerThan` timestamp, the method returns + // immediately. + // + // If not specified or the variable has an older timestamp, the watcher waits + // for a the value to change before returning. + NewerThan *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=newer_than,json=newerThan" json:"newer_than,omitempty"` +} + +func (m *WatchVariableRequest) Reset() { *m = WatchVariableRequest{} } +func (m *WatchVariableRequest) String() string { return proto.CompactTextString(m) } +func (*WatchVariableRequest) ProtoMessage() {} +func (*WatchVariableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *WatchVariableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WatchVariableRequest) GetNewerThan() *google_protobuf2.Timestamp { + if m != nil { + return m.NewerThan + } + return nil +} + +// Request for the `GetVariable()` method. +type GetVariableRequest struct { + // The name of the variable to return, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIBLE_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetVariableRequest) Reset() { *m = GetVariableRequest{} } +func (m *GetVariableRequest) String() string { return proto.CompactTextString(m) } +func (*GetVariableRequest) ProtoMessage() {} +func (*GetVariableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *GetVariableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for the `CreateVariable()` method. +type CreateVariableRequest struct { + // The path to the RutimeConfig resource that this variable should belong to. + // The configuration must exist beforehand; the path must by in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The variable to create. + Variable *Variable `protobuf:"bytes,2,opt,name=variable" json:"variable,omitempty"` + // An optional but recommended unique `request_id`. If the server + // receives two `create()` requests with the same + // `request_id`, then the second request will be ignored and the + // first resource created and stored in the backend is returned. + // Empty `request_id` fields are ignored. + // + // It is responsibility of the client to ensure uniqueness of the + // `request_id` strings. + // + // `request_id` strings are limited to 64 characters. + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` +} + +func (m *CreateVariableRequest) Reset() { *m = CreateVariableRequest{} } +func (m *CreateVariableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVariableRequest) ProtoMessage() {} +func (*CreateVariableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *CreateVariableRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateVariableRequest) GetVariable() *Variable { + if m != nil { + return m.Variable + } + return nil +} + +func (m *CreateVariableRequest) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +// Request for the `UpdateVariable()` method. +type UpdateVariableRequest struct { + // The name of the variable to update, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The variable to update. + Variable *Variable `protobuf:"bytes,2,opt,name=variable" json:"variable,omitempty"` +} + +func (m *UpdateVariableRequest) Reset() { *m = UpdateVariableRequest{} } +func (m *UpdateVariableRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateVariableRequest) ProtoMessage() {} +func (*UpdateVariableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *UpdateVariableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateVariableRequest) GetVariable() *Variable { + if m != nil { + return m.Variable + } + return nil +} + +// Request for the `DeleteVariable()` method. +type DeleteVariableRequest struct { + // The name of the variable to delete, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/variables/[VARIABLE_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Set to `true` to recursively delete multiple variables with the same + // prefix. + Recursive bool `protobuf:"varint,2,opt,name=recursive" json:"recursive,omitempty"` +} + +func (m *DeleteVariableRequest) Reset() { *m = DeleteVariableRequest{} } +func (m *DeleteVariableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVariableRequest) ProtoMessage() {} +func (*DeleteVariableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DeleteVariableRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteVariableRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +// Request for the `ListWaiters()` method. +type ListWaitersRequest struct { + // The path to the configuration for which you want to get a list of waiters. + // The configuration must exist beforehand; the path must by in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Specifies the number of results to return per page. If there are fewer + // elements than the specified number, returns all elements. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Specifies a page token to use. Set `pageToken` to a `nextPageToken` + // returned by a previous list request to get the next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListWaitersRequest) Reset() { *m = ListWaitersRequest{} } +func (m *ListWaitersRequest) String() string { return proto.CompactTextString(m) } +func (*ListWaitersRequest) ProtoMessage() {} +func (*ListWaitersRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *ListWaitersRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListWaitersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListWaitersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListWaiters()` method. +// Order of returned waiter objects is arbitrary. +type ListWaitersResponse struct { + // Found waiters in the project. + Waiters []*Waiter `protobuf:"bytes,1,rep,name=waiters" json:"waiters,omitempty"` + // This token allows you to get the next page of results for list requests. + // If the number of results is larger than `pageSize`, use the `nextPageToken` + // as a value for the query parameter `pageToken` in the next list request. + // Subsequent list requests will have their own `nextPageToken` to continue + // paging through the results + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListWaitersResponse) Reset() { *m = ListWaitersResponse{} } +func (m *ListWaitersResponse) String() string { return proto.CompactTextString(m) } +func (*ListWaitersResponse) ProtoMessage() {} +func (*ListWaitersResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *ListWaitersResponse) GetWaiters() []*Waiter { + if m != nil { + return m.Waiters + } + return nil +} + +func (m *ListWaitersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `GetWaiter()` method. +type GetWaiterRequest struct { + // The fully-qualified name of the Waiter resource object to retrieve, in the + // format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetWaiterRequest) Reset() { *m = GetWaiterRequest{} } +func (m *GetWaiterRequest) String() string { return proto.CompactTextString(m) } +func (*GetWaiterRequest) ProtoMessage() {} +func (*GetWaiterRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *GetWaiterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for `CreateWaiter()` method. +type CreateWaiterRequest struct { + // The path to the configuration that will own the waiter. + // The configuration must exist beforehand; the path must by in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The Waiter resource to create. + Waiter *Waiter `protobuf:"bytes,2,opt,name=waiter" json:"waiter,omitempty"` + // An optional but recommended unique `request_id`. If the server + // receives two `create()` requests with the same + // `request_id`, then the second request will be ignored and the + // first resource created and stored in the backend is returned. + // Empty `request_id` fields are ignored. + // + // It is responsibility of the client to ensure uniqueness of the + // `request_id` strings. + // + // `request_id` strings are limited to 64 characters. + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId" json:"request_id,omitempty"` +} + +func (m *CreateWaiterRequest) Reset() { *m = CreateWaiterRequest{} } +func (m *CreateWaiterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateWaiterRequest) ProtoMessage() {} +func (*CreateWaiterRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *CreateWaiterRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateWaiterRequest) GetWaiter() *Waiter { + if m != nil { + return m.Waiter + } + return nil +} + +func (m *CreateWaiterRequest) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +// Request for the `DeleteWaiter()` method. +type DeleteWaiterRequest struct { + // The Waiter resource to delete, in the format: + // + // `projects/[PROJECT_ID]/configs/[CONFIG_NAME]/waiters/[WAITER_NAME]` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteWaiterRequest) Reset() { *m = DeleteWaiterRequest{} } +func (m *DeleteWaiterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteWaiterRequest) ProtoMessage() {} +func (*DeleteWaiterRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *DeleteWaiterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*ListConfigsRequest)(nil), "google.cloud.runtimeconfig.v1beta1.ListConfigsRequest") + proto.RegisterType((*ListConfigsResponse)(nil), "google.cloud.runtimeconfig.v1beta1.ListConfigsResponse") + proto.RegisterType((*GetConfigRequest)(nil), "google.cloud.runtimeconfig.v1beta1.GetConfigRequest") + proto.RegisterType((*CreateConfigRequest)(nil), "google.cloud.runtimeconfig.v1beta1.CreateConfigRequest") + proto.RegisterType((*UpdateConfigRequest)(nil), "google.cloud.runtimeconfig.v1beta1.UpdateConfigRequest") + proto.RegisterType((*DeleteConfigRequest)(nil), "google.cloud.runtimeconfig.v1beta1.DeleteConfigRequest") + proto.RegisterType((*ListVariablesRequest)(nil), "google.cloud.runtimeconfig.v1beta1.ListVariablesRequest") + proto.RegisterType((*ListVariablesResponse)(nil), "google.cloud.runtimeconfig.v1beta1.ListVariablesResponse") + proto.RegisterType((*WatchVariableRequest)(nil), "google.cloud.runtimeconfig.v1beta1.WatchVariableRequest") + proto.RegisterType((*GetVariableRequest)(nil), "google.cloud.runtimeconfig.v1beta1.GetVariableRequest") + proto.RegisterType((*CreateVariableRequest)(nil), "google.cloud.runtimeconfig.v1beta1.CreateVariableRequest") + proto.RegisterType((*UpdateVariableRequest)(nil), "google.cloud.runtimeconfig.v1beta1.UpdateVariableRequest") + proto.RegisterType((*DeleteVariableRequest)(nil), "google.cloud.runtimeconfig.v1beta1.DeleteVariableRequest") + proto.RegisterType((*ListWaitersRequest)(nil), "google.cloud.runtimeconfig.v1beta1.ListWaitersRequest") + proto.RegisterType((*ListWaitersResponse)(nil), "google.cloud.runtimeconfig.v1beta1.ListWaitersResponse") + proto.RegisterType((*GetWaiterRequest)(nil), "google.cloud.runtimeconfig.v1beta1.GetWaiterRequest") + proto.RegisterType((*CreateWaiterRequest)(nil), "google.cloud.runtimeconfig.v1beta1.CreateWaiterRequest") + proto.RegisterType((*DeleteWaiterRequest)(nil), "google.cloud.runtimeconfig.v1beta1.DeleteWaiterRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for RuntimeConfigManager service + +type RuntimeConfigManagerClient interface { + // Lists all the RuntimeConfig resources within project. + ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) + // Gets information about a RuntimeConfig resource. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) + // Creates a new RuntimeConfig resource. The configuration name must be + // unique within project. + CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) + // Updates a RuntimeConfig resource. The configuration must exist beforehand. + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) + // Deletes a RuntimeConfig resource. + DeleteConfig(ctx context.Context, in *DeleteConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Lists variables within given a configuration, matching any provided filters. + // This only lists variable names, not the values, unless `return_values` is + // true, in which case only variables that user has IAM permission to + // GetVariable will be returned. + ListVariables(ctx context.Context, in *ListVariablesRequest, opts ...grpc.CallOption) (*ListVariablesResponse, error) + // Gets information about a single variable. + GetVariable(ctx context.Context, in *GetVariableRequest, opts ...grpc.CallOption) (*Variable, error) + // Watches a specific variable and waits for a change in the variable's value. + // When there is a change, this method returns the new value or times out. + // + // If a variable is deleted while being watched, the `variableState` state is + // set to `DELETED` and the method returns the last known variable `value`. + // + // If you set the deadline for watching to a larger value than internal timeout + // (60 seconds), the current variable value is returned and the `variableState` + // will be `VARIABLE_STATE_UNSPECIFIED`. + // + // To learn more about creating a watcher, read the + // [Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable) + // documentation. + WatchVariable(ctx context.Context, in *WatchVariableRequest, opts ...grpc.CallOption) (*Variable, error) + // Creates a variable within the given configuration. You cannot create + // a variable with a name that is a prefix of an existing variable name, or a + // name that has an existing variable name as a prefix. + // + // To learn more about creating a variable, read the + // [Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables) + // documentation. + CreateVariable(ctx context.Context, in *CreateVariableRequest, opts ...grpc.CallOption) (*Variable, error) + // Updates an existing variable with a new value. + UpdateVariable(ctx context.Context, in *UpdateVariableRequest, opts ...grpc.CallOption) (*Variable, error) + // Deletes a variable or multiple variables. + // + // If you specify a variable name, then that variable is deleted. If you + // specify a prefix and `recursive` is true, then all variables with that + // prefix are deleted. You must set a `recursive` to true if you delete + // variables by prefix. + DeleteVariable(ctx context.Context, in *DeleteVariableRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // List waiters within the given configuration. + ListWaiters(ctx context.Context, in *ListWaitersRequest, opts ...grpc.CallOption) (*ListWaitersResponse, error) + // Gets information about a single waiter. + GetWaiter(ctx context.Context, in *GetWaiterRequest, opts ...grpc.CallOption) (*Waiter, error) + // Creates a Waiter resource. This operation returns a long-running Operation + // resource which can be polled for completion. However, a waiter with the + // given name will exist (and can be retrieved) prior to the operation + // completing. If the operation fails, the failed Waiter resource will + // still exist and must be deleted prior to subsequent creation attempts. + CreateWaiter(ctx context.Context, in *CreateWaiterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes the waiter with the specified name. + DeleteWaiter(ctx context.Context, in *DeleteWaiterRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) +} + +type runtimeConfigManagerClient struct { + cc *grpc.ClientConn +} + +func NewRuntimeConfigManagerClient(cc *grpc.ClientConn) RuntimeConfigManagerClient { + return &runtimeConfigManagerClient{cc} +} + +func (c *runtimeConfigManagerClient) ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) { + out := new(ListConfigsResponse) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) { + out := new(RuntimeConfig) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) { + out := new(RuntimeConfig) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*RuntimeConfig, error) { + out := new(RuntimeConfig) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/UpdateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) DeleteConfig(ctx context.Context, in *DeleteConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) ListVariables(ctx context.Context, in *ListVariablesRequest, opts ...grpc.CallOption) (*ListVariablesResponse, error) { + out := new(ListVariablesResponse) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListVariables", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) GetVariable(ctx context.Context, in *GetVariableRequest, opts ...grpc.CallOption) (*Variable, error) { + out := new(Variable) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetVariable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) WatchVariable(ctx context.Context, in *WatchVariableRequest, opts ...grpc.CallOption) (*Variable, error) { + out := new(Variable) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/WatchVariable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) CreateVariable(ctx context.Context, in *CreateVariableRequest, opts ...grpc.CallOption) (*Variable, error) { + out := new(Variable) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateVariable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) UpdateVariable(ctx context.Context, in *UpdateVariableRequest, opts ...grpc.CallOption) (*Variable, error) { + out := new(Variable) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/UpdateVariable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) DeleteVariable(ctx context.Context, in *DeleteVariableRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteVariable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) ListWaiters(ctx context.Context, in *ListWaitersRequest, opts ...grpc.CallOption) (*ListWaitersResponse, error) { + out := new(ListWaitersResponse) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListWaiters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) GetWaiter(ctx context.Context, in *GetWaiterRequest, opts ...grpc.CallOption) (*Waiter, error) { + out := new(Waiter) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetWaiter", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) CreateWaiter(ctx context.Context, in *CreateWaiterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateWaiter", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeConfigManagerClient) DeleteWaiter(ctx context.Context, in *DeleteWaiterRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteWaiter", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for RuntimeConfigManager service + +type RuntimeConfigManagerServer interface { + // Lists all the RuntimeConfig resources within project. + ListConfigs(context.Context, *ListConfigsRequest) (*ListConfigsResponse, error) + // Gets information about a RuntimeConfig resource. + GetConfig(context.Context, *GetConfigRequest) (*RuntimeConfig, error) + // Creates a new RuntimeConfig resource. The configuration name must be + // unique within project. + CreateConfig(context.Context, *CreateConfigRequest) (*RuntimeConfig, error) + // Updates a RuntimeConfig resource. The configuration must exist beforehand. + UpdateConfig(context.Context, *UpdateConfigRequest) (*RuntimeConfig, error) + // Deletes a RuntimeConfig resource. + DeleteConfig(context.Context, *DeleteConfigRequest) (*google_protobuf4.Empty, error) + // Lists variables within given a configuration, matching any provided filters. + // This only lists variable names, not the values, unless `return_values` is + // true, in which case only variables that user has IAM permission to + // GetVariable will be returned. + ListVariables(context.Context, *ListVariablesRequest) (*ListVariablesResponse, error) + // Gets information about a single variable. + GetVariable(context.Context, *GetVariableRequest) (*Variable, error) + // Watches a specific variable and waits for a change in the variable's value. + // When there is a change, this method returns the new value or times out. + // + // If a variable is deleted while being watched, the `variableState` state is + // set to `DELETED` and the method returns the last known variable `value`. + // + // If you set the deadline for watching to a larger value than internal timeout + // (60 seconds), the current variable value is returned and the `variableState` + // will be `VARIABLE_STATE_UNSPECIFIED`. + // + // To learn more about creating a watcher, read the + // [Watching a Variable for Changes](/deployment-manager/runtime-configurator/watching-a-variable) + // documentation. + WatchVariable(context.Context, *WatchVariableRequest) (*Variable, error) + // Creates a variable within the given configuration. You cannot create + // a variable with a name that is a prefix of an existing variable name, or a + // name that has an existing variable name as a prefix. + // + // To learn more about creating a variable, read the + // [Setting and Getting Data](/deployment-manager/runtime-configurator/set-and-get-variables) + // documentation. + CreateVariable(context.Context, *CreateVariableRequest) (*Variable, error) + // Updates an existing variable with a new value. + UpdateVariable(context.Context, *UpdateVariableRequest) (*Variable, error) + // Deletes a variable or multiple variables. + // + // If you specify a variable name, then that variable is deleted. If you + // specify a prefix and `recursive` is true, then all variables with that + // prefix are deleted. You must set a `recursive` to true if you delete + // variables by prefix. + DeleteVariable(context.Context, *DeleteVariableRequest) (*google_protobuf4.Empty, error) + // List waiters within the given configuration. + ListWaiters(context.Context, *ListWaitersRequest) (*ListWaitersResponse, error) + // Gets information about a single waiter. + GetWaiter(context.Context, *GetWaiterRequest) (*Waiter, error) + // Creates a Waiter resource. This operation returns a long-running Operation + // resource which can be polled for completion. However, a waiter with the + // given name will exist (and can be retrieved) prior to the operation + // completing. If the operation fails, the failed Waiter resource will + // still exist and must be deleted prior to subsequent creation attempts. + CreateWaiter(context.Context, *CreateWaiterRequest) (*google_longrunning.Operation, error) + // Deletes the waiter with the specified name. + DeleteWaiter(context.Context, *DeleteWaiterRequest) (*google_protobuf4.Empty, error) +} + +func RegisterRuntimeConfigManagerServer(s *grpc.Server, srv RuntimeConfigManagerServer) { + s.RegisterService(&_RuntimeConfigManager_serviceDesc, srv) +} + +func _RuntimeConfigManager_ListConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).ListConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).ListConfigs(ctx, req.(*ListConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_CreateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).CreateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).CreateConfig(ctx, req.(*CreateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_DeleteConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).DeleteConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).DeleteConfig(ctx, req.(*DeleteConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_ListVariables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVariablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).ListVariables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListVariables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).ListVariables(ctx, req.(*ListVariablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_GetVariable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVariableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).GetVariable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetVariable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).GetVariable(ctx, req.(*GetVariableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_WatchVariable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WatchVariableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).WatchVariable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/WatchVariable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).WatchVariable(ctx, req.(*WatchVariableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_CreateVariable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVariableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).CreateVariable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateVariable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).CreateVariable(ctx, req.(*CreateVariableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_UpdateVariable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateVariableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).UpdateVariable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/UpdateVariable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).UpdateVariable(ctx, req.(*UpdateVariableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_DeleteVariable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVariableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).DeleteVariable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteVariable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).DeleteVariable(ctx, req.(*DeleteVariableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_ListWaiters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWaitersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).ListWaiters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/ListWaiters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).ListWaiters(ctx, req.(*ListWaitersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_GetWaiter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWaiterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).GetWaiter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/GetWaiter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).GetWaiter(ctx, req.(*GetWaiterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_CreateWaiter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateWaiterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).CreateWaiter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/CreateWaiter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).CreateWaiter(ctx, req.(*CreateWaiterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeConfigManager_DeleteWaiter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteWaiterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeConfigManagerServer).DeleteWaiter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager/DeleteWaiter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeConfigManagerServer).DeleteWaiter(ctx, req.(*DeleteWaiterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RuntimeConfigManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.runtimeconfig.v1beta1.RuntimeConfigManager", + HandlerType: (*RuntimeConfigManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListConfigs", + Handler: _RuntimeConfigManager_ListConfigs_Handler, + }, + { + MethodName: "GetConfig", + Handler: _RuntimeConfigManager_GetConfig_Handler, + }, + { + MethodName: "CreateConfig", + Handler: _RuntimeConfigManager_CreateConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _RuntimeConfigManager_UpdateConfig_Handler, + }, + { + MethodName: "DeleteConfig", + Handler: _RuntimeConfigManager_DeleteConfig_Handler, + }, + { + MethodName: "ListVariables", + Handler: _RuntimeConfigManager_ListVariables_Handler, + }, + { + MethodName: "GetVariable", + Handler: _RuntimeConfigManager_GetVariable_Handler, + }, + { + MethodName: "WatchVariable", + Handler: _RuntimeConfigManager_WatchVariable_Handler, + }, + { + MethodName: "CreateVariable", + Handler: _RuntimeConfigManager_CreateVariable_Handler, + }, + { + MethodName: "UpdateVariable", + Handler: _RuntimeConfigManager_UpdateVariable_Handler, + }, + { + MethodName: "DeleteVariable", + Handler: _RuntimeConfigManager_DeleteVariable_Handler, + }, + { + MethodName: "ListWaiters", + Handler: _RuntimeConfigManager_ListWaiters_Handler, + }, + { + MethodName: "GetWaiter", + Handler: _RuntimeConfigManager_GetWaiter_Handler, + }, + { + MethodName: "CreateWaiter", + Handler: _RuntimeConfigManager_CreateWaiter_Handler, + }, + { + MethodName: "DeleteWaiter", + Handler: _RuntimeConfigManager_DeleteWaiter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto", +} + +func init() { + proto.RegisterFile("google/cloud/runtimeconfig/v1beta1/runtimeconfig.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 1144 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x98, 0x5b, 0x6f, 0xdc, 0x44, + 0x14, 0xc7, 0x35, 0x69, 0x9b, 0x66, 0x4f, 0x2e, 0xa0, 0xc9, 0x45, 0x91, 0xdb, 0x8a, 0xc8, 0x45, + 0x51, 0x58, 0x55, 0x76, 0x93, 0x56, 0x69, 0x12, 0x28, 0x0f, 0x49, 0x51, 0x08, 0x17, 0xb5, 0x32, + 0x21, 0x95, 0x78, 0x59, 0x4d, 0x36, 0x13, 0xc7, 0xb0, 0x3b, 0x36, 0xf6, 0x38, 0x81, 0xa2, 0xbc, + 0xc0, 0x1b, 0x08, 0x09, 0x89, 0x87, 0xf2, 0x84, 0x10, 0x12, 0x20, 0x21, 0x84, 0x78, 0xe2, 0x05, + 0xd1, 0x2f, 0x81, 0xc4, 0x27, 0xe0, 0x83, 0x20, 0xcf, 0xc5, 0x6b, 0x6f, 0xf6, 0x32, 0x0e, 0xe1, + 0x2d, 0x39, 0x9e, 0x73, 0xce, 0x6f, 0xce, 0x9c, 0x99, 0xff, 0xd1, 0xc2, 0xaa, 0x1f, 0x86, 0x7e, + 0x8b, 0xba, 0xcd, 0x56, 0x98, 0x1e, 0xb8, 0x71, 0xca, 0x78, 0xd0, 0xa6, 0xcd, 0x90, 0x1d, 0x06, + 0xbe, 0x7b, 0xbc, 0xbc, 0x4f, 0x39, 0x59, 0x2e, 0x5b, 0x9d, 0x28, 0x0e, 0x79, 0x88, 0x6d, 0xe9, + 0xe7, 0x08, 0x3f, 0xa7, 0xbc, 0x42, 0xf9, 0x59, 0xd7, 0x55, 0x6c, 0x12, 0x05, 0x2e, 0x61, 0x2c, + 0xe4, 0x84, 0x07, 0x21, 0x4b, 0x64, 0x04, 0x6b, 0xc5, 0x24, 0x33, 0x4d, 0xc2, 0x34, 0x6e, 0x52, + 0xed, 0x73, 0x53, 0xf9, 0xb4, 0x42, 0xe6, 0xc7, 0x29, 0x63, 0x01, 0xf3, 0xdd, 0x30, 0xa2, 0x71, + 0x29, 0xf0, 0x35, 0xb5, 0x48, 0xfc, 0xb7, 0x9f, 0x1e, 0xba, 0xb4, 0x1d, 0xf1, 0x8f, 0xd5, 0xc7, + 0x17, 0xba, 0x3f, 0x66, 0x59, 0x13, 0x4e, 0xda, 0x91, 0x5c, 0x60, 0x1f, 0x01, 0x7e, 0x2b, 0x48, + 0xf8, 0x96, 0x00, 0x49, 0x3c, 0xfa, 0x61, 0x4a, 0x13, 0x8e, 0xe7, 0x60, 0x34, 0x22, 0x31, 0x65, + 0x7c, 0x1e, 0x2d, 0xa0, 0xa5, 0x9a, 0xa7, 0xfe, 0xc3, 0xd7, 0xa0, 0x16, 0x11, 0x9f, 0x36, 0x92, + 0xe0, 0x09, 0x9d, 0x1f, 0x59, 0x40, 0x4b, 0x57, 0xbc, 0xb1, 0xcc, 0xf0, 0x4e, 0xf0, 0x84, 0xe2, + 0x1b, 0x00, 0xe2, 0x23, 0x0f, 0x3f, 0xa0, 0x6c, 0xfe, 0x92, 0x70, 0x14, 0xcb, 0x77, 0x33, 0x83, + 0xfd, 0x39, 0x82, 0xe9, 0x52, 0xaa, 0x24, 0x0a, 0x59, 0x42, 0xf1, 0x9b, 0x70, 0x55, 0x96, 0x21, + 0x99, 0x47, 0x0b, 0x97, 0x96, 0xc6, 0x57, 0x96, 0x9d, 0xe1, 0xc5, 0x76, 0x3c, 0x69, 0x95, 0xc1, + 0x3c, 0x1d, 0x01, 0x2f, 0xc2, 0x73, 0x8c, 0x7e, 0xc4, 0x1b, 0x05, 0x90, 0x11, 0x01, 0x32, 0x99, + 0x99, 0x1f, 0xe5, 0x30, 0x8b, 0xf0, 0xfc, 0x36, 0x55, 0x28, 0x7a, 0xd3, 0x18, 0x2e, 0x33, 0xd2, + 0xa6, 0xca, 0x41, 0xfc, 0x6d, 0x3f, 0x45, 0x30, 0xbd, 0x15, 0x53, 0xc2, 0x69, 0x79, 0x6d, 0xbf, + 0x02, 0xed, 0xc0, 0xa8, 0x44, 0x11, 0x51, 0xce, 0xb5, 0x17, 0x15, 0x20, 0x2b, 0x67, 0x2c, 0xb3, + 0x35, 0x82, 0x03, 0x5d, 0x4e, 0x65, 0xd9, 0x39, 0xb0, 0x39, 0x4c, 0xbf, 0x1b, 0x1d, 0x9c, 0x01, + 0xd3, 0x9b, 0x40, 0x9d, 0x4d, 0x5c, 0x20, 0x94, 0xfd, 0x12, 0x4c, 0x3f, 0xa0, 0x2d, 0x6a, 0x90, + 0xd5, 0xfe, 0x09, 0xc1, 0x4c, 0x76, 0xde, 0x7b, 0x24, 0x0e, 0xc8, 0x7e, 0x8b, 0x0e, 0x6d, 0xae, + 0x39, 0x18, 0x3d, 0x0c, 0x5a, 0x9c, 0xc6, 0xea, 0x04, 0xd4, 0x7f, 0xe5, 0xa6, 0xbb, 0x34, 0xb0, + 0xe9, 0x2e, 0x77, 0x35, 0x1d, 0xbe, 0x09, 0x93, 0x31, 0xe5, 0x69, 0xcc, 0x1a, 0xc7, 0xa4, 0x95, + 0xd2, 0x64, 0xfe, 0xca, 0x02, 0x5a, 0x1a, 0xf3, 0x26, 0xa4, 0x71, 0x4f, 0xd8, 0xec, 0x2f, 0x10, + 0xcc, 0x76, 0x91, 0xaa, 0xde, 0x7c, 0x03, 0x6a, 0xc7, 0xda, 0xa8, 0xba, 0xf3, 0x96, 0x49, 0xf1, + 0x74, 0x24, 0xaf, 0xe3, 0x6e, 0xdc, 0x9a, 0x14, 0x66, 0x1e, 0x13, 0xde, 0x3c, 0xca, 0x63, 0x0c, + 0x38, 0xd9, 0x75, 0x00, 0x46, 0x4f, 0x68, 0xdc, 0xe0, 0x47, 0x44, 0xee, 0x7e, 0x7c, 0xc5, 0xd2, + 0x80, 0xfa, 0xce, 0x3b, 0xbb, 0xfa, 0xce, 0x7b, 0x35, 0xb1, 0x7a, 0xf7, 0x88, 0x30, 0x7b, 0x09, + 0xf0, 0x36, 0xe5, 0x06, 0x49, 0xec, 0x6f, 0x10, 0xcc, 0xca, 0x3b, 0xd0, 0xbd, 0xba, 0xdf, 0x49, + 0xbe, 0x0e, 0x63, 0x7a, 0xdf, 0xaa, 0xe5, 0xaa, 0x55, 0x2d, 0xf7, 0x1e, 0x76, 0x09, 0x52, 0x98, + 0x95, 0x97, 0xc0, 0xa4, 0x58, 0x17, 0x46, 0x65, 0xef, 0xc0, 0xac, 0xbc, 0x05, 0x26, 0x69, 0xaf, + 0x43, 0x2d, 0xa6, 0xcd, 0x34, 0x4e, 0x82, 0x63, 0x99, 0x77, 0xcc, 0xeb, 0x18, 0xf4, 0xfb, 0xfb, + 0x98, 0x04, 0x9c, 0xc6, 0xff, 0xeb, 0xfb, 0xfb, 0x99, 0x7a, 0x7f, 0xf3, 0x54, 0xaa, 0xc7, 0x1f, + 0xc0, 0xd5, 0x13, 0x69, 0x52, 0x1d, 0x5e, 0x37, 0xa9, 0x8a, 0x8c, 0xe2, 0x69, 0xd7, 0x8a, 0x0f, + 0xaf, 0xf2, 0x1e, 0xd0, 0x74, 0x5f, 0xe5, 0x0f, 0x6f, 0x79, 0x6d, 0xbf, 0xca, 0x6c, 0xc2, 0xa8, + 0x44, 0x51, 0x47, 0x5b, 0x65, 0x13, 0xca, 0x73, 0x58, 0xb3, 0xe5, 0x6f, 0xdf, 0x50, 0xfa, 0x95, + 0xbf, 0x67, 0x60, 0xa6, 0xf4, 0x80, 0xbe, 0x4d, 0x18, 0xf1, 0x69, 0x8c, 0x7f, 0x41, 0x30, 0x5e, + 0x10, 0x41, 0xbc, 0x6a, 0x82, 0x79, 0x56, 0xa0, 0xad, 0x7b, 0x95, 0xfd, 0xe4, 0x69, 0xdb, 0xb7, + 0x3e, 0xfd, 0xeb, 0x9f, 0xaf, 0x47, 0x16, 0xf1, 0x8b, 0xf9, 0xd0, 0xf1, 0x89, 0xac, 0xe0, 0xfd, + 0x28, 0x0e, 0xdf, 0xa7, 0x4d, 0x9e, 0xb8, 0xf5, 0x53, 0x57, 0xcb, 0xe9, 0xf7, 0x08, 0x6a, 0xb9, + 0x4e, 0xe2, 0xbb, 0x26, 0x49, 0xbb, 0x65, 0xd5, 0xaa, 0xae, 0x36, 0xbd, 0x20, 0xb3, 0xb2, 0x16, + 0x10, 0x35, 0xa1, 0x5b, 0x3f, 0xc5, 0xbf, 0x21, 0x98, 0x28, 0x6a, 0x34, 0x36, 0x2a, 0x4e, 0x0f, + 0x55, 0x3f, 0x0f, 0xea, 0x5d, 0x81, 0xea, 0xd8, 0x46, 0xf5, 0xdc, 0xd0, 0xda, 0x9e, 0x21, 0x17, + 0xd5, 0xdb, 0x0c, 0xb9, 0x87, 0xde, 0xff, 0x07, 0x64, 0xcb, 0xa8, 0xba, 0x39, 0xf2, 0x97, 0x08, + 0x26, 0x8a, 0xd2, 0x6f, 0x86, 0xdc, 0x63, 0x58, 0xb0, 0xe6, 0xce, 0x08, 0xd4, 0x6b, 0xd9, 0xc4, + 0xaa, 0x4f, 0xbd, 0x6e, 0x76, 0xea, 0xcf, 0x10, 0x4c, 0x96, 0x44, 0x1b, 0xaf, 0x99, 0xde, 0x89, + 0xee, 0x89, 0xc4, 0x5a, 0x3f, 0x87, 0xa7, 0xba, 0x4f, 0x6b, 0x02, 0x7a, 0x05, 0xdf, 0x1e, 0x70, + 0xfe, 0x05, 0x6c, 0xb7, 0x33, 0x0f, 0xfc, 0x8a, 0x60, 0xbc, 0xa0, 0xc0, 0x66, 0x4f, 0xc1, 0x59, + 0xc9, 0xb6, 0x2a, 0x89, 0x98, 0xbd, 0x2e, 0x78, 0xef, 0xe0, 0x65, 0x83, 0x22, 0x77, 0x60, 0xdd, + 0x7a, 0xfd, 0x14, 0xff, 0x81, 0x60, 0xb2, 0x34, 0x99, 0x98, 0x55, 0xbc, 0xd7, 0x30, 0x53, 0x11, + 0x7a, 0x53, 0x40, 0xbf, 0x62, 0xdf, 0xab, 0x0c, 0xbd, 0x71, 0x92, 0x65, 0xdf, 0x40, 0x75, 0xfc, + 0x27, 0x82, 0xa9, 0xf2, 0x14, 0x83, 0xd7, 0xcd, 0xdf, 0x89, 0x8b, 0xe1, 0xaf, 0xdc, 0x24, 0x1b, + 0x9d, 0x49, 0xe8, 0x19, 0x82, 0xa9, 0xf2, 0xac, 0x63, 0xc6, 0xdf, 0x73, 0x3e, 0xaa, 0xc8, 0xbf, + 0x25, 0xf8, 0xef, 0x5b, 0xd5, 0x9b, 0xa6, 0xb0, 0x81, 0x6f, 0x11, 0x4c, 0x95, 0xa7, 0x26, 0xb3, + 0x0d, 0xf4, 0x9c, 0xb4, 0xfa, 0x3e, 0x22, 0xaa, 0xbf, 0xeb, 0xe7, 0xe8, 0xef, 0xdf, 0x95, 0x36, + 0xab, 0x01, 0xc9, 0x5c, 0x9b, 0xcb, 0xc3, 0x9b, 0xb9, 0x36, 0x77, 0x4d, 0x62, 0xf6, 0xaa, 0x60, + 0xbf, 0x8d, 0x1d, 0xc3, 0x36, 0xd1, 0xb3, 0xd7, 0x0f, 0x52, 0xa5, 0x65, 0x38, 0x63, 0x95, 0x2e, + 0x4d, 0x31, 0x56, 0x85, 0x79, 0xa9, 0x17, 0x67, 0xff, 0x1a, 0x2b, 0xc8, 0xec, 0xc9, 0xfe, 0x31, + 0x17, 0x6a, 0x85, 0x5a, 0x41, 0xa8, 0xcb, 0xb4, 0x37, 0xb4, 0x63, 0xe1, 0x97, 0x11, 0xe7, 0xa1, + 0xfe, 0x65, 0xc4, 0x7e, 0x55, 0x00, 0xae, 0xd9, 0x15, 0x0b, 0xb9, 0xa1, 0x07, 0xc1, 0xa7, 0xb9, + 0xd6, 0x55, 0x01, 0xed, 0x31, 0x1c, 0xf6, 0x6d, 0x53, 0x55, 0xc2, 0x7a, 0xc5, 0x12, 0x6e, 0x7e, + 0x87, 0x60, 0xb1, 0x19, 0xb6, 0x0d, 0x70, 0x1e, 0xa1, 0xf7, 0x1e, 0xaa, 0x55, 0x7e, 0xd8, 0x22, + 0xcc, 0x77, 0xc2, 0xd8, 0x77, 0x7d, 0xca, 0x04, 0x89, 0x2b, 0x3f, 0x91, 0x28, 0x48, 0x06, 0xfd, + 0x22, 0xf5, 0x72, 0xc9, 0xfa, 0xf3, 0x88, 0xbd, 0x2d, 0x23, 0x6e, 0x89, 0xbc, 0xa5, 0xb1, 0xc2, + 0xd9, 0x5b, 0xde, 0xcc, 0x5c, 0xf6, 0x47, 0x45, 0x82, 0x3b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0xdb, 0xa7, 0xdc, 0xe8, 0x6b, 0x13, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go new file mode 100644 index 000000000..31c6c4e56 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1/cloud_speech.pb.go @@ -0,0 +1,1254 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/speech/v1/cloud_speech.proto + +/* +Package speech is a generated protocol buffer package. + +It is generated from these files: + google/cloud/speech/v1/cloud_speech.proto + +It has these top-level messages: + RecognizeRequest + LongRunningRecognizeRequest + StreamingRecognizeRequest + StreamingRecognitionConfig + RecognitionConfig + SpeechContext + RecognitionAudio + RecognizeResponse + LongRunningRecognizeResponse + LongRunningRecognizeMetadata + StreamingRecognizeResponse + StreamingRecognitionResult + SpeechRecognitionResult + SpeechRecognitionAlternative + WordInfo +*/ +package speech + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/any" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audio encoding of the data sent in the audio message. All encodings support +// only 1 channel (mono) audio. Only `FLAC` and `WAV` include a header that +// describes the bytes of audio that follow the header. The other encodings +// are raw audio bytes with no header. +// +// For best results, the audio source should be captured and transmitted using +// a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be +// reduced if lossy codecs, which include the other codecs listed in +// this section, are used to capture or transmit the audio, particularly if +// background noise is present. +type RecognitionConfig_AudioEncoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 + // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio + // Codec) is the recommended encoding because it is + // lossless--therefore recognition is not compromised--and + // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream + // encoding supports 16-bit and 24-bit samples, however, not all fields in + // `STREAMINFO` are supported. + RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2 + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. + RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3 + // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000. + RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4 + // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000. + RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5 + // Opus encoded audio frames in Ogg container + // ([OggOpus](https://wiki.xiph.org/OggOpus)). + // `sample_rate_hertz` must be 16000. + RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6 + // Although the use of lossy encodings is not recommended, if a very low + // bitrate encoding is required, `OGG_OPUS` is highly preferred over + // Speex encoding. The [Speex](https://speex.org/) encoding supported by + // Cloud Speech API has a header byte in each block, as in MIME type + // `audio/x-speex-with-header-byte`. + // It is a variant of the RTP Speex encoding defined in + // [RFC 5574](https://tools.ietf.org/html/rfc5574). + // The stream is a sequence of blocks, one block per RTP packet. Each block + // starts with a byte containing the length of the block, in bytes, followed + // by one or more frames of Speex data, padded to an integral number of + // bytes (octets) as specified in RFC 5574. In other words, each RTP header + // is replaced with a single byte containing the block length. Only Speex + // wideband is supported. `sample_rate_hertz` must be 16000. + RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7 +) + +var RecognitionConfig_AudioEncoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "FLAC", + 3: "MULAW", + 4: "AMR", + 5: "AMR_WB", + 6: "OGG_OPUS", + 7: "SPEEX_WITH_HEADER_BYTE", +} +var RecognitionConfig_AudioEncoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "FLAC": 2, + "MULAW": 3, + "AMR": 4, + "AMR_WB": 5, + "OGG_OPUS": 6, + "SPEEX_WITH_HEADER_BYTE": 7, +} + +func (x RecognitionConfig_AudioEncoding) String() string { + return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x)) +} +func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0} +} + +// Indicates the type of speech event. +type StreamingRecognizeResponse_SpeechEventType int32 + +const ( + // No speech event specified. + StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0 + // This event indicates that the server has detected the end of the user's + // speech utterance and expects no additional speech. Therefore, the server + // will not process additional audio (although it may subsequently return + // additional results). The client should stop sending additional audio + // data, half-close the gRPC connection, and wait for any additional results + // until the server closes the gRPC connection. This event is only sent if + // `single_utterance` was set to `true`, and is not used otherwise. + StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1 +) + +var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{ + 0: "SPEECH_EVENT_UNSPECIFIED", + 1: "END_OF_SINGLE_UTTERANCE", +} +var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{ + "SPEECH_EVENT_UNSPECIFIED": 0, + "END_OF_SINGLE_UTTERANCE": 1, +} + +func (x StreamingRecognizeResponse_SpeechEventType) String() string { + return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x)) +} +func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{10, 0} +} + +// The top-level message sent by the client for the `Recognize` method. +type RecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} } +func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*RecognizeRequest) ProtoMessage() {} +func (*RecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *RecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *RecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `LongRunningRecognize` +// method. +type LongRunningRecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} } +func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeRequest) ProtoMessage() {} +func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `StreamingRecognize` method. +// Multiple `StreamingRecognizeRequest` messages are sent. The first message +// must contain a `streaming_config` message and must not contain `audio` data. +// All subsequent messages must contain `audio` data and must not contain a +// `streaming_config` message. +type StreamingRecognizeRequest struct { + // The streaming request, which is either a streaming config or audio content. + // + // Types that are valid to be assigned to StreamingRequest: + // *StreamingRecognizeRequest_StreamingConfig + // *StreamingRecognizeRequest_AudioContent + StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"` +} + +func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} } +func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeRequest) ProtoMessage() {} +func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isStreamingRecognizeRequest_StreamingRequest interface { + isStreamingRecognizeRequest_StreamingRequest() +} + +type StreamingRecognizeRequest_StreamingConfig struct { + StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,oneof"` +} +type StreamingRecognizeRequest_AudioContent struct { + AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"` +} + +func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {} +func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {} + +func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest { + if m != nil { + return m.StreamingRequest + } + return nil +} + +func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok { + return x.StreamingConfig + } + return nil +} + +func (m *StreamingRecognizeRequest) GetAudioContent() []byte { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok { + return x.AudioContent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{ + (*StreamingRecognizeRequest_StreamingConfig)(nil), + (*StreamingRecognizeRequest_AudioContent)(nil), + } +} + +func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StreamingConfig); err != nil { + return err + } + case *StreamingRecognizeRequest_AudioContent: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AudioContent) + case nil: + default: + return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x) + } + return nil +} + +func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StreamingRecognizeRequest) + switch tag { + case 1: // streaming_request.streaming_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StreamingRecognitionConfig) + err := b.DecodeMessage(msg) + m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg} + return true, err + case 2: // streaming_request.audio_content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x} + return true, err + default: + return false, nil + } +} + +func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + s := proto.Size(x.StreamingConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StreamingRecognizeRequest_AudioContent: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AudioContent))) + n += len(x.AudioContent) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Provides information to the recognizer that specifies how to process the +// request. +type StreamingRecognitionConfig struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Optional* If `false` or omitted, the recognizer will perform continuous + // recognition (continuing to wait for and process audio even if the user + // pauses speaking) until the client closes the input stream (gRPC API) or + // until the maximum time limit has been reached. May return multiple + // `StreamingRecognitionResult`s with the `is_final` flag set to `true`. + // + // If `true`, the recognizer will detect a single spoken utterance. When it + // detects that the user has paused or stopped speaking, it will return an + // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no + // more than one `StreamingRecognitionResult` with the `is_final` flag set to + // `true`. + SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance" json:"single_utterance,omitempty"` + // *Optional* If `true`, interim results (tentative hypotheses) may be + // returned as they become available (these interim results are indicated with + // the `is_final=false` flag). + // If `false` or omitted, only `is_final=true` result(s) are returned. + InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults" json:"interim_results,omitempty"` +} + +func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} } +func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionConfig) ProtoMessage() {} +func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *StreamingRecognitionConfig) GetSingleUtterance() bool { + if m != nil { + return m.SingleUtterance + } + return false +} + +func (m *StreamingRecognitionConfig) GetInterimResults() bool { + if m != nil { + return m.InterimResults + } + return false +} + +// Provides information to the recognizer that specifies how to process the +// request. +type RecognitionConfig struct { + // *Required* Encoding of audio data sent in all `RecognitionAudio` messages. + Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,enum=google.cloud.speech.v1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"` + // *Required* Sample rate in Hertz of the audio data sent in all + // `RecognitionAudio` messages. Valid values are: 8000-48000. + // 16000 is optimal. For best results, set the sampling rate of the audio + // source to 16000 Hz. If that's not possible, use the native sample rate of + // the audio source (instead of re-sampling). + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` + // *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // *Optional* Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechRecognitionResult`. + // The server may return fewer than `max_alternatives`. + // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of + // one. If omitted, will return a maximum of one. + MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives" json:"max_alternatives,omitempty"` + // *Optional* If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter" json:"profanity_filter,omitempty"` + // *Optional* A means to provide context to assist the speech recognition. + SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts" json:"speech_contexts,omitempty"` + // *Optional* If `true`, the top result includes a list of words and + // the start and end time offsets (timestamps) for those words. If + // `false`, no word-level time offset information is returned. The default is + // `false`. + EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets" json:"enable_word_time_offsets,omitempty"` +} + +func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } +func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*RecognitionConfig) ProtoMessage() {} +func (*RecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding { + if m != nil { + return m.Encoding + } + return RecognitionConfig_ENCODING_UNSPECIFIED +} + +func (m *RecognitionConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +func (m *RecognitionConfig) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *RecognitionConfig) GetMaxAlternatives() int32 { + if m != nil { + return m.MaxAlternatives + } + return 0 +} + +func (m *RecognitionConfig) GetProfanityFilter() bool { + if m != nil { + return m.ProfanityFilter + } + return false +} + +func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext { + if m != nil { + return m.SpeechContexts + } + return nil +} + +func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool { + if m != nil { + return m.EnableWordTimeOffsets + } + return false +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +type SpeechContext struct { + // *Optional* A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + Phrases []string `protobuf:"bytes,1,rep,name=phrases" json:"phrases,omitempty"` +} + +func (m *SpeechContext) Reset() { *m = SpeechContext{} } +func (m *SpeechContext) String() string { return proto.CompactTextString(m) } +func (*SpeechContext) ProtoMessage() {} +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *SpeechContext) GetPhrases() []string { + if m != nil { + return m.Phrases + } + return nil +} + +// Contains audio data in the encoding specified in the `RecognitionConfig`. +// Either `content` or `uri` must be supplied. Supplying both or neither +// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See +// [audio limits](https://cloud.google.com/speech/limits#content). +type RecognitionAudio struct { + // The audio source, which is either inline content or a Google Cloud + // Storage uri. + // + // Types that are valid to be assigned to AudioSource: + // *RecognitionAudio_Content + // *RecognitionAudio_Uri + AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"` +} + +func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } +func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } +func (*RecognitionAudio) ProtoMessage() {} +func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isRecognitionAudio_AudioSource interface { + isRecognitionAudio_AudioSource() +} + +type RecognitionAudio_Content struct { + Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"` +} +type RecognitionAudio_Uri struct { + Uri string `protobuf:"bytes,2,opt,name=uri,oneof"` +} + +func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {} +func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {} + +func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource { + if m != nil { + return m.AudioSource + } + return nil +} + +func (m *RecognitionAudio) GetContent() []byte { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok { + return x.Content + } + return nil +} + +func (m *RecognitionAudio) GetUri() string { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok { + return x.Uri + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{ + (*RecognitionAudio_Content)(nil), + (*RecognitionAudio_Uri)(nil), + } +} + +func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Content) + case *RecognitionAudio_Uri: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x) + } + return nil +} + +func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecognitionAudio) + switch tag { + case 1: // audio_source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.AudioSource = &RecognitionAudio_Content{x} + return true, err + case 2: // audio_source.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.AudioSource = &RecognitionAudio_Uri{x} + return true, err + default: + return false, nil + } +} + +func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *RecognitionAudio_Uri: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The only message returned to the client by the `Recognize` method. It +// contains the result as zero or more sequential `SpeechRecognitionResult` +// messages. +type RecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} } +func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*RecognizeResponse) ProtoMessage() {} +func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// The only message returned to the client by the `LongRunningRecognize` method. +// It contains the result as zero or more sequential `SpeechRecognitionResult` +// messages. It is included in the `result.response` field of the `Operation` +// returned by the `GetOperation` call of the `google::longrunning::Operations` +// service. +type LongRunningRecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} } +func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeResponse) ProtoMessage() {} +func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// Describes the progress of a long-running `LongRunningRecognize` call. It is +// included in the `metadata` field of the `Operation` returned by the +// `GetOperation` call of the `google::longrunning::Operations` service. +type LongRunningRecognizeMetadata struct { + // Approximate percentage of audio processed thus far. Guaranteed to be 100 + // when the audio is fully processed and the results are available. + ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent processing update. + LastUpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"` +} + +func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} } +func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeMetadata) ProtoMessage() {} +func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *LongRunningRecognizeMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +// `StreamingRecognizeResponse` is the only message returned to the client by +// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` +// messages are streamed back to the client. If there is no recognizable +// audio, and `single_utterance` is set to false, then no messages are streamed +// back to the client. +// +// Here's an example of a series of ten `StreamingRecognizeResponse`s that might +// be returned while processing audio: +// +// 1. results { alternatives { transcript: "tube" } stability: 0.01 } +// +// 2. results { alternatives { transcript: "to be a" } stability: 0.01 } +// +// 3. results { alternatives { transcript: "to be" } stability: 0.9 } +// results { alternatives { transcript: " or not to be" } stability: 0.01 } +// +// 4. results { alternatives { transcript: "to be or not to be" +// confidence: 0.92 } +// alternatives { transcript: "to bee or not to bee" } +// is_final: true } +// +// 5. results { alternatives { transcript: " that's" } stability: 0.01 } +// +// 6. results { alternatives { transcript: " that is" } stability: 0.9 } +// results { alternatives { transcript: " the question" } stability: 0.01 } +// +// 7. results { alternatives { transcript: " that is the question" +// confidence: 0.98 } +// alternatives { transcript: " that was the question" } +// is_final: true } +// +// Notes: +// +// - Only two of the above responses #4 and #7 contain final results; they are +// indicated by `is_final: true`. Concatenating these together generates the +// full transcript: "to be or not to be that is the question". +// +// - The others contain interim `results`. #3 and #6 contain two interim +// `results`: the first portion has a high stability and is less likely to +// change; the second portion has a low stability and is very likely to +// change. A UI designer might choose to show only high stability `results`. +// +// - The specific `stability` and `confidence` values shown above are only for +// illustrative purposes. Actual values may vary. +// +// - In each response, only one of these fields will be set: +// `error`, +// `speech_event_type`, or +// one or more (repeated) `results`. +type StreamingRecognizeResponse struct { + // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // *Output-only* This repeated list contains zero or more results that + // correspond to consecutive portions of the audio currently being processed. + // It contains zero or more `is_final=false` results followed by zero or one + // `is_final=true` result (the newly settled portion). + Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` + // *Output-only* Indicates the type of speech event. + SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,enum=google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"` +} + +func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } +func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeResponse) ProtoMessage() {} +func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType { + if m != nil { + return m.SpeechEventType + } + return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED +} + +// A streaming speech recognition result corresponding to a portion of the audio +// that is currently being processed. +type StreamingRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` + // *Output-only* If `false`, this `StreamingRecognitionResult` represents an + // interim result that may change. If `true`, this is the final time the + // speech service will return this particular `StreamingRecognitionResult`, + // the recognizer will not return any further hypotheses for this portion of + // the transcript and corresponding audio. + IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal" json:"is_final,omitempty"` + // *Output-only* An estimate of the likelihood that the recognizer will not + // change its guess about this interim result. Values range from 0.0 + // (completely unstable) to 1.0 (completely stable). + // This field is only provided for interim results (`is_final=false`). + // The default of 0.0 is a sentinel value indicating `stability` was not set. + Stability float32 `protobuf:"fixed32,3,opt,name=stability" json:"stability,omitempty"` +} + +func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } +func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionResult) ProtoMessage() {} +func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +func (m *StreamingRecognitionResult) GetIsFinal() bool { + if m != nil { + return m.IsFinal + } + return false +} + +func (m *StreamingRecognitionResult) GetStability() float32 { + if m != nil { + return m.Stability + } + return 0 +} + +// A speech recognition result corresponding to a portion of the audio. +type SpeechRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + // These alternatives are ordered in terms of accuracy, with the top (first) + // alternative being the most probable, as ranked by the recognizer. + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` +} + +func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } +func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionResult) ProtoMessage() {} +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +// Alternative hypotheses (a.k.a. n-best list). +type SpeechRecognitionAlternative struct { + // *Output-only* Transcript text representing the words that the user spoke. + Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` + // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is typically provided only for the top hypothesis, and + // only for `is_final=true` results. Clients should not rely on the + // `confidence` field as it is not guaranteed to be accurate or consistent. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // *Output-only* A list of word-specific information for each recognized word. + Words []*WordInfo `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` +} + +func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } +func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionAlternative) ProtoMessage() {} +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SpeechRecognitionAlternative) GetTranscript() string { + if m != nil { + return m.Transcript + } + return "" +} + +func (m *SpeechRecognitionAlternative) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo { + if m != nil { + return m.Words + } + return nil +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +type WordInfo struct { + // *Output-only* Time offset relative to the beginning of the audio, + // and corresponding to the start of the spoken word. + // This field is only set if `enable_word_time_offsets=true` and only + // in the top hypothesis. + // This is an experimental feature and the accuracy of the time offset can + // vary. + StartTime *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // *Output-only* Time offset relative to the beginning of the audio, + // and corresponding to the end of the spoken word. + // This field is only set if `enable_word_time_offsets=true` and only + // in the top hypothesis. + // This is an experimental feature and the accuracy of the time offset can + // vary. + EndTime *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // *Output-only* The word corresponding to this set of information. + Word string `protobuf:"bytes,3,opt,name=word" json:"word,omitempty"` +} + +func (m *WordInfo) Reset() { *m = WordInfo{} } +func (m *WordInfo) String() string { return proto.CompactTextString(m) } +func (*WordInfo) ProtoMessage() {} +func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *WordInfo) GetStartTime() *google_protobuf3.Duration { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *WordInfo) GetEndTime() *google_protobuf3.Duration { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *WordInfo) GetWord() string { + if m != nil { + return m.Word + } + return "" +} + +func init() { + proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1.RecognizeRequest") + proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1.LongRunningRecognizeRequest") + proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1.StreamingRecognizeRequest") + proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1.StreamingRecognitionConfig") + proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1.RecognitionConfig") + proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1.SpeechContext") + proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1.RecognitionAudio") + proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1.RecognizeResponse") + proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1.LongRunningRecognizeResponse") + proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1.LongRunningRecognizeMetadata") + proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1.StreamingRecognizeResponse") + proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1.StreamingRecognitionResult") + proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1.SpeechRecognitionResult") + proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1.SpeechRecognitionAlternative") + proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1.WordInfo") + proto.RegisterEnum("google.cloud.speech.v1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) + proto.RegisterEnum("google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Speech service + +type SpeechClient interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // google.longrunning.Operations interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // a `LongRunningRecognizeResponse` message. + LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) +} + +type speechClient struct { + cc *grpc.ClientConn +} + +func NewSpeechClient(cc *grpc.ClientConn) SpeechClient { + return &speechClient{cc} +} + +func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) { + out := new(RecognizeResponse) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1.Speech/Recognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1.Speech/LongRunningRecognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Speech_serviceDesc.Streams[0], c.cc, "/google.cloud.speech.v1.Speech/StreamingRecognize", opts...) + if err != nil { + return nil, err + } + x := &speechStreamingRecognizeClient{stream} + return x, nil +} + +type Speech_StreamingRecognizeClient interface { + Send(*StreamingRecognizeRequest) error + Recv() (*StreamingRecognizeResponse, error) + grpc.ClientStream +} + +type speechStreamingRecognizeClient struct { + grpc.ClientStream +} + +func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) { + m := new(StreamingRecognizeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Speech service + +type SpeechServer interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // google.longrunning.Operations interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // a `LongRunningRecognizeResponse` message. + LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(Speech_StreamingRecognizeServer) error +} + +func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { + s.RegisterService(&_Speech_serviceDesc, srv) +} + +func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).Recognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1.Speech/Recognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LongRunningRecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).LongRunningRecognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1.Speech/LongRunningRecognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream}) +} + +type Speech_StreamingRecognizeServer interface { + Send(*StreamingRecognizeResponse) error + Recv() (*StreamingRecognizeRequest, error) + grpc.ServerStream +} + +type speechStreamingRecognizeServer struct { + grpc.ServerStream +} + +func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) { + m := new(StreamingRecognizeRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Speech_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.speech.v1.Speech", + HandlerType: (*SpeechServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Recognize", + Handler: _Speech_Recognize_Handler, + }, + { + MethodName: "LongRunningRecognize", + Handler: _Speech_LongRunningRecognize_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingRecognize", + Handler: _Speech_StreamingRecognize_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/cloud/speech/v1/cloud_speech.proto", +} + +func init() { proto.RegisterFile("google/cloud/speech/v1/cloud_speech.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1318 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0x71, 0x3e, 0xde, 0xe6, 0xc3, 0x19, 0x4a, 0xeb, 0xb8, 0xa1, 0x8d, 0xb6, 0x54, + 0x24, 0x3d, 0xd8, 0x24, 0xad, 0x5a, 0x28, 0x08, 0xc9, 0x71, 0x36, 0x89, 0x25, 0xc7, 0x89, 0xc6, + 0x0e, 0x29, 0x1c, 0x18, 0x4d, 0xec, 0xf1, 0x76, 0x25, 0x7b, 0x76, 0x99, 0x19, 0x87, 0xa6, 0x17, + 0x04, 0x57, 0x24, 0x2e, 0x88, 0x9e, 0x39, 0xf5, 0xca, 0x81, 0x0b, 0xbf, 0x81, 0x23, 0xfc, 0x05, + 0x7e, 0x04, 0x47, 0x34, 0x33, 0xbb, 0x8e, 0xed, 0xc4, 0x69, 0x2a, 0x15, 0x89, 0xdb, 0xce, 0xf3, + 0x7e, 0xcc, 0x33, 0xef, 0xbc, 0x1f, 0xb3, 0xb0, 0xe6, 0x87, 0xa1, 0xdf, 0x66, 0x85, 0x46, 0x3b, + 0xec, 0x36, 0x0b, 0x32, 0x62, 0xac, 0xf1, 0xac, 0x70, 0xb2, 0x6e, 0xd7, 0xc4, 0xae, 0xf3, 0x91, + 0x08, 0x55, 0x88, 0x6e, 0x58, 0xd5, 0xbc, 0x11, 0xe5, 0x63, 0xd1, 0xc9, 0x7a, 0x6e, 0x39, 0x76, + 0x41, 0xa3, 0xa0, 0x40, 0x39, 0x0f, 0x15, 0x55, 0x41, 0xc8, 0xa5, 0xb5, 0xca, 0xdd, 0x8d, 0xa5, + 0xed, 0x90, 0xfb, 0xa2, 0xcb, 0x79, 0xc0, 0xfd, 0x42, 0x18, 0x31, 0x31, 0xa0, 0xb4, 0x14, 0x2b, + 0x99, 0xd5, 0x71, 0xb7, 0x55, 0xa0, 0xfc, 0x34, 0x16, 0xdd, 0x1e, 0x16, 0x35, 0xbb, 0xd6, 0x36, + 0x96, 0xdf, 0x19, 0x96, 0xab, 0xa0, 0xc3, 0xa4, 0xa2, 0x9d, 0x28, 0x56, 0xb8, 0x19, 0x2b, 0x88, + 0xa8, 0x51, 0x90, 0x8a, 0xaa, 0x6e, 0xbc, 0xa9, 0xfb, 0xb3, 0x03, 0x19, 0xcc, 0x1a, 0xa1, 0xcf, + 0x83, 0x17, 0x0c, 0xb3, 0xaf, 0xbb, 0x4c, 0x2a, 0x54, 0x84, 0xc9, 0x46, 0xc8, 0x5b, 0x81, 0x9f, + 0x75, 0x56, 0x9c, 0xd5, 0x6b, 0x1b, 0x6b, 0xf9, 0x8b, 0x4f, 0x9d, 0x8f, 0x2d, 0x35, 0x93, 0x92, + 0x31, 0xc0, 0xb1, 0x21, 0xfa, 0x0c, 0xd2, 0xb4, 0xdb, 0x0c, 0xc2, 0xec, 0xb8, 0xf1, 0xb0, 0x7a, + 0x05, 0x0f, 0x45, 0xad, 0x8f, 0xad, 0x99, 0xfb, 0x8b, 0x03, 0xb7, 0x2a, 0x21, 0xf7, 0xb1, 0x8d, + 0xd6, 0xff, 0x91, 0xe2, 0xef, 0x0e, 0x2c, 0xd5, 0x94, 0x60, 0xb4, 0x73, 0x11, 0x41, 0x02, 0x19, + 0x99, 0x08, 0xc9, 0x00, 0xd5, 0x8d, 0x51, 0x1b, 0x0d, 0x3b, 0x3b, 0xe3, 0xbc, 0x3b, 0x86, 0x17, + 0x7a, 0xde, 0x2c, 0x84, 0xee, 0xc1, 0x9c, 0xe1, 0xa1, 0x9d, 0x2b, 0xc6, 0x95, 0x39, 0xc6, 0xec, + 0xee, 0x18, 0x9e, 0x35, 0x70, 0xc9, 0xa2, 0x9b, 0xef, 0xc0, 0xe2, 0x19, 0x0f, 0x61, 0xc9, 0xb9, + 0xbf, 0x39, 0x90, 0x1b, 0xbd, 0xdb, 0xdb, 0x08, 0xee, 0x1a, 0x64, 0x64, 0xc0, 0xfd, 0x36, 0x23, + 0x5d, 0xa5, 0x98, 0xa0, 0xbc, 0xc1, 0x0c, 0xc1, 0x69, 0xbc, 0x60, 0xf1, 0xc3, 0x04, 0x46, 0x1f, + 0xc0, 0x42, 0xc0, 0x15, 0x13, 0x41, 0x87, 0x08, 0x26, 0xbb, 0x6d, 0x25, 0xb3, 0x29, 0xa3, 0x39, + 0x1f, 0xc3, 0xd8, 0xa2, 0xee, 0xab, 0x09, 0x58, 0x3c, 0x4f, 0xb6, 0x06, 0xd3, 0x8c, 0x37, 0xc2, + 0x66, 0xc0, 0x2d, 0xdd, 0xf9, 0x8d, 0xc7, 0x57, 0xa6, 0x9b, 0x37, 0x17, 0xea, 0xc5, 0xe6, 0xb8, + 0xe7, 0x08, 0xdd, 0x87, 0x45, 0x49, 0x3b, 0x51, 0x9b, 0x11, 0x41, 0x15, 0x23, 0xcf, 0x98, 0x50, + 0x2f, 0x0c, 0xff, 0x34, 0x5e, 0xb0, 0x02, 0x4c, 0x15, 0xdb, 0xd5, 0x30, 0xba, 0x0b, 0x73, 0x6d, + 0xca, 0xfd, 0x2e, 0xf5, 0x19, 0x69, 0x84, 0x4d, 0x66, 0xd8, 0xcf, 0xe0, 0xd9, 0x04, 0x2c, 0x85, + 0x4d, 0xa6, 0xe3, 0xd1, 0xa1, 0xcf, 0x09, 0x6d, 0x2b, 0x26, 0x38, 0x55, 0xc1, 0x09, 0x93, 0xd9, + 0x09, 0xeb, 0xaf, 0x43, 0x9f, 0x17, 0xfb, 0x60, 0xad, 0x1a, 0x89, 0xb0, 0x45, 0x79, 0xa0, 0x4e, + 0x49, 0x2b, 0xd0, 0xa2, 0x6c, 0xda, 0x86, 0xae, 0x87, 0x6f, 0x1b, 0x18, 0x55, 0x61, 0xc1, 0x9e, + 0xce, 0x26, 0xc1, 0x73, 0x25, 0xb3, 0x93, 0x2b, 0xa9, 0xd5, 0x6b, 0x1b, 0xf7, 0x46, 0xe6, 0x98, + 0xf9, 0x2a, 0x59, 0x6d, 0x3c, 0x2f, 0xfb, 0x97, 0x12, 0x3d, 0x86, 0x2c, 0xe3, 0xf4, 0xb8, 0xcd, + 0xc8, 0x37, 0xa1, 0x68, 0x12, 0xdd, 0x45, 0x48, 0xd8, 0x6a, 0x49, 0xa6, 0x64, 0x76, 0xda, 0x50, + 0x78, 0xd7, 0xca, 0x8f, 0x42, 0xd1, 0xac, 0x07, 0x1d, 0xb6, 0x6f, 0x85, 0xee, 0x0f, 0x0e, 0xcc, + 0x0d, 0xc4, 0x12, 0x65, 0xe1, 0xba, 0x57, 0x2d, 0xed, 0x6f, 0x95, 0xab, 0x3b, 0xe4, 0xb0, 0x5a, + 0x3b, 0xf0, 0x4a, 0xe5, 0xed, 0xb2, 0xb7, 0x95, 0x19, 0x43, 0xb3, 0x30, 0x5d, 0x29, 0x57, 0xbd, + 0x22, 0x5e, 0x7f, 0x94, 0x71, 0xd0, 0x34, 0x4c, 0x6c, 0x57, 0x8a, 0xa5, 0xcc, 0x38, 0x9a, 0x81, + 0xf4, 0xde, 0x61, 0xa5, 0x78, 0x94, 0x49, 0xa1, 0x29, 0x48, 0x15, 0xf7, 0x70, 0x66, 0x02, 0x01, + 0x4c, 0x16, 0xf7, 0x30, 0x39, 0xda, 0xcc, 0xa4, 0xb5, 0xdd, 0xfe, 0xce, 0x0e, 0xd9, 0x3f, 0x38, + 0xac, 0x65, 0x26, 0x51, 0x0e, 0x6e, 0xd4, 0x0e, 0x3c, 0xef, 0x29, 0x39, 0x2a, 0xd7, 0x77, 0xc9, + 0xae, 0x57, 0xdc, 0xf2, 0x30, 0xd9, 0xfc, 0xa2, 0xee, 0x65, 0xa6, 0xdc, 0x35, 0x98, 0x1b, 0x38, + 0x27, 0xca, 0xc2, 0x54, 0xf4, 0x4c, 0x50, 0xc9, 0x64, 0xd6, 0x59, 0x49, 0xad, 0xce, 0xe0, 0x64, + 0xe9, 0xe2, 0x5e, 0xfb, 0xeb, 0xd5, 0x37, 0xca, 0xc1, 0x54, 0x52, 0x53, 0x4e, 0x5c, 0x53, 0x09, + 0x80, 0x10, 0xa4, 0xba, 0x22, 0x30, 0xa9, 0x30, 0xb3, 0x3b, 0x86, 0xf5, 0x62, 0x73, 0x1e, 0x6c, + 0xc9, 0x11, 0x19, 0x76, 0x45, 0x83, 0xb9, 0x5f, 0xf5, 0xd2, 0x54, 0xb7, 0x03, 0x19, 0x85, 0x5c, + 0x32, 0x54, 0x86, 0xa9, 0x24, 0xbb, 0xc7, 0xcd, 0x15, 0x15, 0x2e, 0xbf, 0xa2, 0x3e, 0x56, 0x36, + 0xff, 0x71, 0x62, 0xef, 0x06, 0xb0, 0x7c, 0x71, 0x6b, 0x7c, 0xfb, 0x5b, 0xfd, 0xe1, 0x5c, 0xbc, + 0xd7, 0x1e, 0x53, 0xb4, 0x49, 0x15, 0x8d, 0x93, 0xd5, 0x17, 0x4c, 0x4a, 0x12, 0x31, 0xd1, 0x48, + 0x82, 0x96, 0x36, 0xc9, 0x6a, 0xf0, 0x03, 0x0b, 0xa3, 0x8f, 0x01, 0xa4, 0xa2, 0x42, 0x99, 0xb4, + 0x8a, 0x9b, 0x6e, 0x2e, 0x61, 0x96, 0x4c, 0xae, 0x7c, 0x3d, 0x99, 0x5c, 0x78, 0xc6, 0x68, 0xeb, + 0x35, 0xda, 0x82, 0x4c, 0x9b, 0x4a, 0x45, 0xba, 0x51, 0x53, 0x97, 0xa3, 0x71, 0x90, 0x7a, 0xad, + 0x83, 0x79, 0x6d, 0x73, 0x68, 0x4c, 0x34, 0xe8, 0xfe, 0x39, 0x7e, 0xbe, 0xeb, 0xf5, 0x85, 0x6d, + 0x15, 0xd2, 0x4c, 0x88, 0x50, 0xc4, 0x4d, 0x0f, 0x25, 0x9e, 0x45, 0xd4, 0xc8, 0xd7, 0xcc, 0xcc, + 0xc4, 0x56, 0x01, 0x55, 0x86, 0x03, 0xfc, 0x46, 0x2d, 0x7d, 0x28, 0xc6, 0x88, 0xc3, 0x62, 0x5c, + 0xc4, 0xec, 0x84, 0x71, 0x45, 0xd4, 0x69, 0xc4, 0x4c, 0x6f, 0x98, 0xdf, 0xd8, 0xbc, 0xaa, 0xdf, + 0xb3, 0x63, 0xc4, 0x77, 0xea, 0x69, 0x57, 0xf5, 0xd3, 0x88, 0xe1, 0xb8, 0x43, 0xf4, 0x00, 0xb7, + 0x02, 0x0b, 0x43, 0x3a, 0x68, 0x19, 0xb2, 0xba, 0x98, 0x4a, 0xbb, 0xc4, 0xfb, 0xdc, 0xab, 0xd6, + 0x87, 0x0a, 0xf6, 0x16, 0xdc, 0xf4, 0xaa, 0x5b, 0x64, 0x7f, 0x9b, 0xd4, 0xca, 0xd5, 0x9d, 0x8a, + 0x47, 0x0e, 0xeb, 0x75, 0x0f, 0x17, 0xab, 0x25, 0x2f, 0xe3, 0xb8, 0xbf, 0x8e, 0x18, 0x25, 0xf6, + 0x94, 0xe8, 0x29, 0xcc, 0x0e, 0xf4, 0x3c, 0xc7, 0xc4, 0xeb, 0xe1, 0x95, 0x13, 0xb2, 0xaf, 0x33, + 0xe2, 0x01, 0x4f, 0x68, 0x09, 0xa6, 0x03, 0x49, 0x5a, 0x01, 0xa7, 0xed, 0x78, 0xb2, 0x4c, 0x05, + 0x72, 0x5b, 0x2f, 0xd1, 0x32, 0xe8, 0xdc, 0x39, 0x0e, 0xda, 0x81, 0x3a, 0x35, 0x79, 0x32, 0x8e, + 0xcf, 0x00, 0x57, 0xc2, 0xcd, 0x11, 0x79, 0xff, 0xdf, 0xb1, 0x75, 0x5f, 0x3a, 0xb0, 0x7c, 0x99, + 0x3a, 0xba, 0x0d, 0xa0, 0x04, 0xe5, 0xb2, 0x21, 0x82, 0xc8, 0x96, 0xd0, 0x0c, 0xee, 0x43, 0xb4, + 0xdc, 0x8c, 0xd6, 0x26, 0x4b, 0x46, 0xe9, 0x38, 0xee, 0x43, 0xd0, 0x23, 0x48, 0xeb, 0x9e, 0xad, + 0x67, 0xa7, 0xe6, 0xbc, 0x32, 0x8a, 0xb3, 0xee, 0xdc, 0x65, 0xde, 0x0a, 0xb1, 0x55, 0x77, 0x7f, + 0x74, 0x60, 0x3a, 0xc1, 0xd0, 0x47, 0x03, 0x25, 0x6a, 0xeb, 0x60, 0xe9, 0x5c, 0x85, 0x6d, 0xc5, + 0x8f, 0xcf, 0xfe, 0x0a, 0x7d, 0xa8, 0xa7, 0x70, 0xb3, 0xbf, 0xb4, 0x2f, 0xb1, 0x9b, 0x62, 0xdc, + 0x4c, 0x0f, 0x84, 0x60, 0x42, 0xb3, 0x88, 0x27, 0xa6, 0xf9, 0xde, 0x78, 0x95, 0x82, 0x49, 0x1b, + 0x29, 0xf4, 0x9d, 0x03, 0x33, 0xbd, 0x04, 0x47, 0xaf, 0x7b, 0xa0, 0xf5, 0xde, 0x5e, 0xb9, 0xb5, + 0x2b, 0x68, 0xda, 0x6a, 0x71, 0xef, 0x7c, 0xff, 0xd7, 0xdf, 0x3f, 0x8d, 0x2f, 0xb9, 0xd7, 0xf5, + 0x7b, 0xdf, 0x2a, 0x3e, 0x11, 0x89, 0xd6, 0x13, 0xe7, 0x3e, 0x7a, 0xe9, 0xc0, 0xf5, 0x8b, 0x3a, + 0x20, 0x7a, 0x30, 0x6a, 0x93, 0x4b, 0x9e, 0xad, 0xb9, 0xf7, 0x12, 0xa3, 0xbe, 0x3f, 0x81, 0xfc, + 0x7e, 0xf2, 0x27, 0xe0, 0xde, 0x37, 0x6c, 0xde, 0x77, 0xef, 0xf4, 0xb1, 0xe9, 0xd3, 0x1c, 0x20, + 0xf6, 0x2d, 0xa0, 0xf3, 0x5d, 0x00, 0xad, 0xbf, 0x49, 0xc7, 0xb0, 0x9c, 0x36, 0xde, 0xbc, 0xc9, + 0xac, 0x3a, 0x1f, 0x3a, 0x9b, 0x6d, 0xc8, 0x35, 0xc2, 0xce, 0x08, 0xe3, 0xcd, 0x6b, 0xf6, 0x0e, + 0x0f, 0xf4, 0xe5, 0x1f, 0x38, 0x5f, 0x7e, 0x1a, 0xab, 0xf9, 0xa1, 0x7e, 0x16, 0xe5, 0x43, 0xe1, + 0x17, 0x7c, 0xc6, 0x4d, 0x6a, 0x14, 0xac, 0x88, 0x46, 0x81, 0x1c, 0xfe, 0x03, 0xfb, 0xc4, 0x7e, + 0xfd, 0xe3, 0x38, 0xc7, 0x93, 0x46, 0xf7, 0xc1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0x28, + 0xae, 0xd1, 0xac, 0x0d, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go new file mode 100644 index 000000000..178a93075 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1beta1/cloud_speech.pb.go @@ -0,0 +1,1195 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/speech/v1beta1/cloud_speech.proto + +/* +Package speech is a generated protocol buffer package. + +It is generated from these files: + google/cloud/speech/v1beta1/cloud_speech.proto + +It has these top-level messages: + SyncRecognizeRequest + AsyncRecognizeRequest + StreamingRecognizeRequest + StreamingRecognitionConfig + RecognitionConfig + SpeechContext + RecognitionAudio + SyncRecognizeResponse + AsyncRecognizeResponse + AsyncRecognizeMetadata + StreamingRecognizeResponse + StreamingRecognitionResult + SpeechRecognitionResult + SpeechRecognitionAlternative +*/ +package speech + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audio encoding of the data sent in the audio message. All encodings support +// only 1 channel (mono) audio. Only `FLAC` includes a header that describes +// the bytes of audio that follow the header. The other encodings are raw +// audio bytes with no header. +// +// For best results, the audio source should be captured and transmitted using +// a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be +// reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture +// or transmit the audio, particularly if background noise is present. +type RecognitionConfig_AudioEncoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // This is the only encoding that may be used by `AsyncRecognize`. + RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 + // This is the recommended encoding for `SyncRecognize` and + // `StreamingRecognize` because it uses lossless compression; therefore + // recognition accuracy is not compromised by a lossy codec. + // + // The stream FLAC (Free Lossless Audio Codec) encoding is specified at: + // http://flac.sourceforge.net/documentation.html. + // 16-bit and 24-bit samples are supported. + // Not all fields in STREAMINFO are supported. + RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2 + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. + RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3 + // Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz. + RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4 + // Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz. + RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5 +) + +var RecognitionConfig_AudioEncoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "FLAC", + 3: "MULAW", + 4: "AMR", + 5: "AMR_WB", +} +var RecognitionConfig_AudioEncoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "FLAC": 2, + "MULAW": 3, + "AMR": 4, + "AMR_WB": 5, +} + +func (x RecognitionConfig_AudioEncoding) String() string { + return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x)) +} +func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0} +} + +// Indicates the type of endpointer event. +type StreamingRecognizeResponse_EndpointerType int32 + +const ( + // No endpointer event specified. + StreamingRecognizeResponse_ENDPOINTER_EVENT_UNSPECIFIED StreamingRecognizeResponse_EndpointerType = 0 + // Speech has been detected in the audio stream, and the service is + // beginning to process it. + StreamingRecognizeResponse_START_OF_SPEECH StreamingRecognizeResponse_EndpointerType = 1 + // Speech has ceased to be detected in the audio stream. (For example, the + // user may have paused after speaking.) If `single_utterance` is `false`, + // the service will continue to process audio, and if subsequent speech is + // detected, will send another START_OF_SPEECH event. + StreamingRecognizeResponse_END_OF_SPEECH StreamingRecognizeResponse_EndpointerType = 2 + // This event is sent after the client has half-closed the input stream gRPC + // connection and the server has received all of the audio. (The server may + // still be processing the audio and may subsequently return additional + // results.) + StreamingRecognizeResponse_END_OF_AUDIO StreamingRecognizeResponse_EndpointerType = 3 + // This event is only sent when `single_utterance` is `true`. It indicates + // that the server has detected the end of the user's speech utterance and + // expects no additional speech. Therefore, the server will not process + // additional audio (although it may subsequently return additional + // results). The client should stop sending additional audio data, + // half-close the gRPC connection, and wait for any additional results + // until the server closes the gRPC connection. + StreamingRecognizeResponse_END_OF_UTTERANCE StreamingRecognizeResponse_EndpointerType = 4 +) + +var StreamingRecognizeResponse_EndpointerType_name = map[int32]string{ + 0: "ENDPOINTER_EVENT_UNSPECIFIED", + 1: "START_OF_SPEECH", + 2: "END_OF_SPEECH", + 3: "END_OF_AUDIO", + 4: "END_OF_UTTERANCE", +} +var StreamingRecognizeResponse_EndpointerType_value = map[string]int32{ + "ENDPOINTER_EVENT_UNSPECIFIED": 0, + "START_OF_SPEECH": 1, + "END_OF_SPEECH": 2, + "END_OF_AUDIO": 3, + "END_OF_UTTERANCE": 4, +} + +func (x StreamingRecognizeResponse_EndpointerType) String() string { + return proto.EnumName(StreamingRecognizeResponse_EndpointerType_name, int32(x)) +} +func (StreamingRecognizeResponse_EndpointerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{10, 0} +} + +// The top-level message sent by the client for the `SyncRecognize` method. +type SyncRecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *SyncRecognizeRequest) Reset() { *m = SyncRecognizeRequest{} } +func (m *SyncRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*SyncRecognizeRequest) ProtoMessage() {} +func (*SyncRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SyncRecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *SyncRecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `AsyncRecognize` method. +type AsyncRecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *AsyncRecognizeRequest) Reset() { *m = AsyncRecognizeRequest{} } +func (m *AsyncRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*AsyncRecognizeRequest) ProtoMessage() {} +func (*AsyncRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *AsyncRecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *AsyncRecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `StreamingRecognize` method. +// Multiple `StreamingRecognizeRequest` messages are sent. The first message +// must contain a `streaming_config` message and must not contain `audio` data. +// All subsequent messages must contain `audio` data and must not contain a +// `streaming_config` message. +type StreamingRecognizeRequest struct { + // The streaming request, which is either a streaming config or audio content. + // + // Types that are valid to be assigned to StreamingRequest: + // *StreamingRecognizeRequest_StreamingConfig + // *StreamingRecognizeRequest_AudioContent + StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"` +} + +func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} } +func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeRequest) ProtoMessage() {} +func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isStreamingRecognizeRequest_StreamingRequest interface { + isStreamingRecognizeRequest_StreamingRequest() +} + +type StreamingRecognizeRequest_StreamingConfig struct { + StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,oneof"` +} +type StreamingRecognizeRequest_AudioContent struct { + AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"` +} + +func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {} +func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {} + +func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest { + if m != nil { + return m.StreamingRequest + } + return nil +} + +func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok { + return x.StreamingConfig + } + return nil +} + +func (m *StreamingRecognizeRequest) GetAudioContent() []byte { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok { + return x.AudioContent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{ + (*StreamingRecognizeRequest_StreamingConfig)(nil), + (*StreamingRecognizeRequest_AudioContent)(nil), + } +} + +func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StreamingConfig); err != nil { + return err + } + case *StreamingRecognizeRequest_AudioContent: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AudioContent) + case nil: + default: + return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x) + } + return nil +} + +func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StreamingRecognizeRequest) + switch tag { + case 1: // streaming_request.streaming_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StreamingRecognitionConfig) + err := b.DecodeMessage(msg) + m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg} + return true, err + case 2: // streaming_request.audio_content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x} + return true, err + default: + return false, nil + } +} + +func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + s := proto.Size(x.StreamingConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StreamingRecognizeRequest_AudioContent: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AudioContent))) + n += len(x.AudioContent) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Provides information to the recognizer that specifies how to process the +// request. +type StreamingRecognitionConfig struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Optional* If `false` or omitted, the recognizer will perform continuous + // recognition (continuing to wait for and process audio even if the user + // pauses speaking) until the client closes the input stream (gRPC API) or + // until the maximum time limit has been reached. May return multiple + // `StreamingRecognitionResult`s with the `is_final` flag set to `true`. + // + // If `true`, the recognizer will detect a single spoken utterance. When it + // detects that the user has paused or stopped speaking, it will return an + // `END_OF_UTTERANCE` event and cease recognition. It will return no more than + // one `StreamingRecognitionResult` with the `is_final` flag set to `true`. + SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance" json:"single_utterance,omitempty"` + // *Optional* If `true`, interim results (tentative hypotheses) may be + // returned as they become available (these interim results are indicated with + // the `is_final=false` flag). + // If `false` or omitted, only `is_final=true` result(s) are returned. + InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults" json:"interim_results,omitempty"` +} + +func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} } +func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionConfig) ProtoMessage() {} +func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *StreamingRecognitionConfig) GetSingleUtterance() bool { + if m != nil { + return m.SingleUtterance + } + return false +} + +func (m *StreamingRecognitionConfig) GetInterimResults() bool { + if m != nil { + return m.InterimResults + } + return false +} + +// Provides information to the recognizer that specifies how to process the +// request. +type RecognitionConfig struct { + // *Required* Encoding of audio data sent in all `RecognitionAudio` messages. + Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,enum=google.cloud.speech.v1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"` + // *Required* Sample rate in Hertz of the audio data sent in all + // `RecognitionAudio` messages. Valid values are: 8000-48000. + // 16000 is optimal. For best results, set the sampling rate of the audio + // source to 16000 Hz. If that's not possible, use the native sample rate of + // the audio source (instead of re-sampling). + SampleRate int32 `protobuf:"varint,2,opt,name=sample_rate,json=sampleRate" json:"sample_rate,omitempty"` + // *Optional* The language of the supplied audio as a BCP-47 language tag. + // Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt + // If omitted, defaults to "en-US". See + // [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // *Optional* Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechRecognitionResult`. + // The server may return fewer than `max_alternatives`. + // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of + // one. If omitted, will return a maximum of one. + MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives" json:"max_alternatives,omitempty"` + // *Optional* If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter" json:"profanity_filter,omitempty"` + // *Optional* A means to provide context to assist the speech recognition. + SpeechContext *SpeechContext `protobuf:"bytes,6,opt,name=speech_context,json=speechContext" json:"speech_context,omitempty"` +} + +func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } +func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*RecognitionConfig) ProtoMessage() {} +func (*RecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding { + if m != nil { + return m.Encoding + } + return RecognitionConfig_ENCODING_UNSPECIFIED +} + +func (m *RecognitionConfig) GetSampleRate() int32 { + if m != nil { + return m.SampleRate + } + return 0 +} + +func (m *RecognitionConfig) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *RecognitionConfig) GetMaxAlternatives() int32 { + if m != nil { + return m.MaxAlternatives + } + return 0 +} + +func (m *RecognitionConfig) GetProfanityFilter() bool { + if m != nil { + return m.ProfanityFilter + } + return false +} + +func (m *RecognitionConfig) GetSpeechContext() *SpeechContext { + if m != nil { + return m.SpeechContext + } + return nil +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +type SpeechContext struct { + // *Optional* A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + Phrases []string `protobuf:"bytes,1,rep,name=phrases" json:"phrases,omitempty"` +} + +func (m *SpeechContext) Reset() { *m = SpeechContext{} } +func (m *SpeechContext) String() string { return proto.CompactTextString(m) } +func (*SpeechContext) ProtoMessage() {} +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *SpeechContext) GetPhrases() []string { + if m != nil { + return m.Phrases + } + return nil +} + +// Contains audio data in the encoding specified in the `RecognitionConfig`. +// Either `content` or `uri` must be supplied. Supplying both or neither +// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See +// [audio limits](https://cloud.google.com/speech/limits#content). +type RecognitionAudio struct { + // The audio source, which is either inline content or a GCS uri. + // + // Types that are valid to be assigned to AudioSource: + // *RecognitionAudio_Content + // *RecognitionAudio_Uri + AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"` +} + +func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } +func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } +func (*RecognitionAudio) ProtoMessage() {} +func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isRecognitionAudio_AudioSource interface { + isRecognitionAudio_AudioSource() +} + +type RecognitionAudio_Content struct { + Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"` +} +type RecognitionAudio_Uri struct { + Uri string `protobuf:"bytes,2,opt,name=uri,oneof"` +} + +func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {} +func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {} + +func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource { + if m != nil { + return m.AudioSource + } + return nil +} + +func (m *RecognitionAudio) GetContent() []byte { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok { + return x.Content + } + return nil +} + +func (m *RecognitionAudio) GetUri() string { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok { + return x.Uri + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{ + (*RecognitionAudio_Content)(nil), + (*RecognitionAudio_Uri)(nil), + } +} + +func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Content) + case *RecognitionAudio_Uri: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x) + } + return nil +} + +func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecognitionAudio) + switch tag { + case 1: // audio_source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.AudioSource = &RecognitionAudio_Content{x} + return true, err + case 2: // audio_source.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.AudioSource = &RecognitionAudio_Uri{x} + return true, err + default: + return false, nil + } +} + +func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *RecognitionAudio_Uri: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The only message returned to the client by `SyncRecognize`. method. It +// contains the result as zero or more sequential `SpeechRecognitionResult` +// messages. +type SyncRecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *SyncRecognizeResponse) Reset() { *m = SyncRecognizeResponse{} } +func (m *SyncRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*SyncRecognizeResponse) ProtoMessage() {} +func (*SyncRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *SyncRecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// The only message returned to the client by `AsyncRecognize`. It contains the +// result as zero or more sequential `SpeechRecognitionResult` messages. It is +// included in the `result.response` field of the `Operation` returned by the +// `GetOperation` call of the `google::longrunning::Operations` service. +type AsyncRecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *AsyncRecognizeResponse) Reset() { *m = AsyncRecognizeResponse{} } +func (m *AsyncRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*AsyncRecognizeResponse) ProtoMessage() {} +func (*AsyncRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *AsyncRecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// Describes the progress of a long-running `AsyncRecognize` call. It is +// included in the `metadata` field of the `Operation` returned by the +// `GetOperation` call of the `google::longrunning::Operations` service. +type AsyncRecognizeMetadata struct { + // Approximate percentage of audio processed thus far. Guaranteed to be 100 + // when the audio is fully processed and the results are available. + ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent processing update. + LastUpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"` +} + +func (m *AsyncRecognizeMetadata) Reset() { *m = AsyncRecognizeMetadata{} } +func (m *AsyncRecognizeMetadata) String() string { return proto.CompactTextString(m) } +func (*AsyncRecognizeMetadata) ProtoMessage() {} +func (*AsyncRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *AsyncRecognizeMetadata) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *AsyncRecognizeMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *AsyncRecognizeMetadata) GetLastUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +// `StreamingRecognizeResponse` is the only message returned to the client by +// `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse` +// messages are streamed back to the client. +// +// Here's an example of a series of ten `StreamingRecognizeResponse`s that might +// be returned while processing audio: +// +// 1. endpointer_type: START_OF_SPEECH +// +// 2. results { alternatives { transcript: "tube" } stability: 0.01 } +// result_index: 0 +// +// 3. results { alternatives { transcript: "to be a" } stability: 0.01 } +// result_index: 0 +// +// 4. results { alternatives { transcript: "to be" } stability: 0.9 } +// results { alternatives { transcript: " or not to be" } stability: 0.01 } +// result_index: 0 +// +// 5. results { alternatives { transcript: "to be or not to be" +// confidence: 0.92 } +// alternatives { transcript: "to bee or not to bee" } +// is_final: true } +// result_index: 0 +// +// 6. results { alternatives { transcript: " that's" } stability: 0.01 } +// result_index: 1 +// +// 7. results { alternatives { transcript: " that is" } stability: 0.9 } +// results { alternatives { transcript: " the question" } stability: 0.01 } +// result_index: 1 +// +// 8. endpointer_type: END_OF_SPEECH +// +// 9. results { alternatives { transcript: " that is the question" +// confidence: 0.98 } +// alternatives { transcript: " that was the question" } +// is_final: true } +// result_index: 1 +// +// 10. endpointer_type: END_OF_AUDIO +// +// Notes: +// +// - Only two of the above responses #5 and #9 contain final results, they are +// indicated by `is_final: true`. Concatenating these together generates the +// full transcript: "to be or not to be that is the question". +// +// - The others contain interim `results`. #4 and #7 contain two interim +// `results`, the first portion has a high stability and is less likely to +// change, the second portion has a low stability and is very likely to +// change. A UI designer might choose to show only high stability `results`. +// +// - The specific `stability` and `confidence` values shown above are only for +// illustrative purposes. Actual values may vary. +// +// - The `result_index` indicates the portion of audio that has had final +// results returned, and is no longer being processed. For example, the +// `results` in #6 and later correspond to the portion of audio after +// "to be or not to be". +type StreamingRecognizeResponse struct { + // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // *Output-only* This repeated list contains zero or more results that + // correspond to consecutive portions of the audio currently being processed. + // It contains zero or one `is_final=true` result (the newly settled portion), + // followed by zero or more `is_final=false` results. + Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` + // *Output-only* Indicates the lowest index in the `results` array that has + // changed. The repeated `StreamingRecognitionResult` results overwrite past + // results at this index and higher. + ResultIndex int32 `protobuf:"varint,3,opt,name=result_index,json=resultIndex" json:"result_index,omitempty"` + // *Output-only* Indicates the type of endpointer event. + EndpointerType StreamingRecognizeResponse_EndpointerType `protobuf:"varint,4,opt,name=endpointer_type,json=endpointerType,enum=google.cloud.speech.v1beta1.StreamingRecognizeResponse_EndpointerType" json:"endpointer_type,omitempty"` +} + +func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } +func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeResponse) ProtoMessage() {} +func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *StreamingRecognizeResponse) GetResultIndex() int32 { + if m != nil { + return m.ResultIndex + } + return 0 +} + +func (m *StreamingRecognizeResponse) GetEndpointerType() StreamingRecognizeResponse_EndpointerType { + if m != nil { + return m.EndpointerType + } + return StreamingRecognizeResponse_ENDPOINTER_EVENT_UNSPECIFIED +} + +// A streaming speech recognition result corresponding to a portion of the audio +// that is currently being processed. +type StreamingRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` + // *Output-only* If `false`, this `StreamingRecognitionResult` represents an + // interim result that may change. If `true`, this is the final time the + // speech service will return this particular `StreamingRecognitionResult`, + // the recognizer will not return any further hypotheses for this portion of + // the transcript and corresponding audio. + IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal" json:"is_final,omitempty"` + // *Output-only* An estimate of the likelihood that the recognizer will not + // change its guess about this interim result. Values range from 0.0 + // (completely unstable) to 1.0 (completely stable). + // This field is only provided for interim results (`is_final=false`). + // The default of 0.0 is a sentinel value indicating `stability` was not set. + Stability float32 `protobuf:"fixed32,3,opt,name=stability" json:"stability,omitempty"` +} + +func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } +func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionResult) ProtoMessage() {} +func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +func (m *StreamingRecognitionResult) GetIsFinal() bool { + if m != nil { + return m.IsFinal + } + return false +} + +func (m *StreamingRecognitionResult) GetStability() float32 { + if m != nil { + return m.Stability + } + return 0 +} + +// A speech recognition result corresponding to a portion of the audio. +type SpeechRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` +} + +func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } +func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionResult) ProtoMessage() {} +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +// Alternative hypotheses (a.k.a. n-best list). +type SpeechRecognitionAlternative struct { + // *Output-only* Transcript text representing the words that the user spoke. + Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` + // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is typically provided only for the top hypothesis, and + // only for `is_final=true` results. Clients should not rely on the + // `confidence` field as it is not guaranteed to be accurate, or even set, in + // any of the results. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } +func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionAlternative) ProtoMessage() {} +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SpeechRecognitionAlternative) GetTranscript() string { + if m != nil { + return m.Transcript + } + return "" +} + +func (m *SpeechRecognitionAlternative) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func init() { + proto.RegisterType((*SyncRecognizeRequest)(nil), "google.cloud.speech.v1beta1.SyncRecognizeRequest") + proto.RegisterType((*AsyncRecognizeRequest)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeRequest") + proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1beta1.StreamingRecognizeRequest") + proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1beta1.StreamingRecognitionConfig") + proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1beta1.RecognitionConfig") + proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1beta1.SpeechContext") + proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1beta1.RecognitionAudio") + proto.RegisterType((*SyncRecognizeResponse)(nil), "google.cloud.speech.v1beta1.SyncRecognizeResponse") + proto.RegisterType((*AsyncRecognizeResponse)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeResponse") + proto.RegisterType((*AsyncRecognizeMetadata)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeMetadata") + proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1beta1.StreamingRecognizeResponse") + proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1beta1.StreamingRecognitionResult") + proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1beta1.SpeechRecognitionResult") + proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1beta1.SpeechRecognitionAlternative") + proto.RegisterEnum("google.cloud.speech.v1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) + proto.RegisterEnum("google.cloud.speech.v1beta1.StreamingRecognizeResponse_EndpointerType", StreamingRecognizeResponse_EndpointerType_name, StreamingRecognizeResponse_EndpointerType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Speech service + +type SpeechClient interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + SyncRecognize(ctx context.Context, in *SyncRecognizeRequest, opts ...grpc.CallOption) (*SyncRecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // [google.longrunning.Operations] + // (/speech/reference/rest/v1beta1/operations#Operation) + // interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // an `AsyncRecognizeResponse` message. + AsyncRecognize(ctx context.Context, in *AsyncRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) +} + +type speechClient struct { + cc *grpc.ClientConn +} + +func NewSpeechClient(cc *grpc.ClientConn) SpeechClient { + return &speechClient{cc} +} + +func (c *speechClient) SyncRecognize(ctx context.Context, in *SyncRecognizeRequest, opts ...grpc.CallOption) (*SyncRecognizeResponse, error) { + out := new(SyncRecognizeResponse) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1beta1.Speech/SyncRecognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) AsyncRecognize(ctx context.Context, in *AsyncRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1beta1.Speech/AsyncRecognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Speech_serviceDesc.Streams[0], c.cc, "/google.cloud.speech.v1beta1.Speech/StreamingRecognize", opts...) + if err != nil { + return nil, err + } + x := &speechStreamingRecognizeClient{stream} + return x, nil +} + +type Speech_StreamingRecognizeClient interface { + Send(*StreamingRecognizeRequest) error + Recv() (*StreamingRecognizeResponse, error) + grpc.ClientStream +} + +type speechStreamingRecognizeClient struct { + grpc.ClientStream +} + +func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) { + m := new(StreamingRecognizeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Speech service + +type SpeechServer interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + SyncRecognize(context.Context, *SyncRecognizeRequest) (*SyncRecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // [google.longrunning.Operations] + // (/speech/reference/rest/v1beta1/operations#Operation) + // interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // an `AsyncRecognizeResponse` message. + AsyncRecognize(context.Context, *AsyncRecognizeRequest) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(Speech_StreamingRecognizeServer) error +} + +func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { + s.RegisterService(&_Speech_serviceDesc, srv) +} + +func _Speech_SyncRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SyncRecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).SyncRecognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1beta1.Speech/SyncRecognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).SyncRecognize(ctx, req.(*SyncRecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_AsyncRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsyncRecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).AsyncRecognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1beta1.Speech/AsyncRecognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).AsyncRecognize(ctx, req.(*AsyncRecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream}) +} + +type Speech_StreamingRecognizeServer interface { + Send(*StreamingRecognizeResponse) error + Recv() (*StreamingRecognizeRequest, error) + grpc.ServerStream +} + +type speechStreamingRecognizeServer struct { + grpc.ServerStream +} + +func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) { + m := new(StreamingRecognizeRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Speech_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.speech.v1beta1.Speech", + HandlerType: (*SpeechServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SyncRecognize", + Handler: _Speech_SyncRecognize_Handler, + }, + { + MethodName: "AsyncRecognize", + Handler: _Speech_AsyncRecognize_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingRecognize", + Handler: _Speech_StreamingRecognize_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/cloud/speech/v1beta1/cloud_speech.proto", +} + +func init() { proto.RegisterFile("google/cloud/speech/v1beta1/cloud_speech.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1214 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xcf, 0xda, 0x71, 0x5e, 0x9e, 0xd8, 0xce, 0x76, 0xda, 0xfe, 0xeb, 0xfa, 0x9f, 0xb6, 0x61, + 0x2b, 0x44, 0x5a, 0x89, 0x35, 0x09, 0xa8, 0x55, 0x0b, 0x17, 0xc7, 0x59, 0x13, 0x4b, 0x8d, 0x93, + 0x4e, 0x1c, 0x8a, 0x90, 0x60, 0x35, 0x59, 0x4f, 0xb6, 0x2b, 0xd9, 0x33, 0xcb, 0xcc, 0x6c, 0x95, + 0x70, 0xec, 0x8d, 0x0b, 0x17, 0xbe, 0x00, 0x12, 0x7c, 0x02, 0xc4, 0x81, 0x0b, 0xe2, 0xc2, 0x81, + 0x3b, 0x5f, 0x81, 0x0f, 0x82, 0x76, 0x66, 0x37, 0xb1, 0x9d, 0xc6, 0x34, 0x88, 0x4a, 0xdc, 0xfc, + 0xfc, 0x9e, 0x97, 0xf9, 0xed, 0x33, 0xcf, 0xcb, 0x18, 0xdc, 0x90, 0xf3, 0x70, 0x40, 0x1b, 0xc1, + 0x80, 0x27, 0xfd, 0x86, 0x8c, 0x29, 0x0d, 0x9e, 0x37, 0x5e, 0xac, 0x1f, 0x52, 0x45, 0xd6, 0x0d, + 0xe8, 0x1b, 0xd0, 0x8d, 0x05, 0x57, 0x1c, 0xfd, 0xdf, 0xd8, 0xbb, 0x5a, 0xe5, 0x66, 0xaa, 0xcc, + 0xbe, 0xbe, 0x92, 0x05, 0x23, 0x71, 0xd4, 0x20, 0x8c, 0x71, 0x45, 0x54, 0xc4, 0x99, 0x34, 0xae, + 0xf5, 0xbb, 0x99, 0x76, 0xc0, 0x59, 0x28, 0x12, 0xc6, 0x22, 0x16, 0x36, 0x78, 0x4c, 0xc5, 0x98, + 0xd1, 0xed, 0xcc, 0x48, 0x4b, 0x87, 0xc9, 0x51, 0xa3, 0x9f, 0x18, 0x83, 0x4c, 0x7f, 0x67, 0x52, + 0xaf, 0xa2, 0x21, 0x95, 0x8a, 0x0c, 0xe3, 0xcc, 0xe0, 0x46, 0x66, 0x20, 0xe2, 0xa0, 0x21, 0x15, + 0x51, 0x49, 0x16, 0xd9, 0xf9, 0xde, 0x82, 0x6b, 0xfb, 0x27, 0x2c, 0xc0, 0x34, 0xe0, 0x21, 0x8b, + 0xbe, 0xa2, 0x98, 0x7e, 0x99, 0x50, 0xa9, 0x50, 0x1b, 0xe6, 0x02, 0xce, 0x8e, 0xa2, 0xb0, 0x66, + 0xad, 0x5a, 0x6b, 0x4b, 0x1b, 0xae, 0x3b, 0xe5, 0x1b, 0xdd, 0xcc, 0x3d, 0xa5, 0xd4, 0xd2, 0x5e, + 0x38, 0xf3, 0x46, 0x2d, 0x28, 0x91, 0xa4, 0x1f, 0xf1, 0x5a, 0x41, 0x87, 0x79, 0xf7, 0x75, 0xc3, + 0x34, 0x53, 0x27, 0x6c, 0x7c, 0x9d, 0x1f, 0x2c, 0xb8, 0xde, 0x94, 0xff, 0x79, 0x9a, 0xbf, 0x58, + 0x70, 0x73, 0x5f, 0x09, 0x4a, 0x86, 0x11, 0x0b, 0xcf, 0x51, 0xed, 0x83, 0x2d, 0x73, 0xa5, 0x3f, + 0x46, 0xfa, 0xe1, 0xd4, 0xd3, 0x26, 0x23, 0x9e, 0xb1, 0xdf, 0x9e, 0xc1, 0xcb, 0xa7, 0x21, 0x0d, + 0x84, 0xde, 0x86, 0x8a, 0x26, 0x93, 0x9e, 0xa0, 0x28, 0x53, 0xfa, 0x83, 0xca, 0xdb, 0x33, 0xb8, + 0xac, 0xe1, 0x96, 0x41, 0x37, 0xaf, 0xc2, 0x95, 0x33, 0x32, 0xc2, 0x30, 0x74, 0x7e, 0xb6, 0xa0, + 0x7e, 0xf1, 0x69, 0xff, 0x5a, 0xae, 0xef, 0x81, 0x2d, 0x23, 0x16, 0x0e, 0xa8, 0x9f, 0x28, 0x45, + 0x05, 0x61, 0x01, 0xd5, 0x2c, 0x17, 0xf0, 0xb2, 0xc1, 0x0f, 0x72, 0x18, 0xbd, 0x03, 0xcb, 0x11, + 0x53, 0x54, 0x44, 0x43, 0x5f, 0x50, 0x99, 0x0c, 0x94, 0xac, 0x15, 0xb5, 0x65, 0x35, 0x83, 0xb1, + 0x41, 0x9d, 0x5f, 0x8b, 0x70, 0xe5, 0x3c, 0xe3, 0x4f, 0x61, 0x81, 0xb2, 0x80, 0xf7, 0x23, 0x66, + 0x38, 0x57, 0x37, 0x3e, 0xba, 0x1c, 0x67, 0x57, 0xdf, 0xaf, 0x97, 0xc5, 0xc0, 0xa7, 0xd1, 0xd0, + 0x1d, 0x58, 0x92, 0x64, 0x18, 0x0f, 0xa8, 0x2f, 0x88, 0x32, 0xf4, 0x4b, 0x18, 0x0c, 0x84, 0x89, + 0xa2, 0xe8, 0x2e, 0x54, 0x06, 0x84, 0x85, 0x09, 0x09, 0xa9, 0x1f, 0xf0, 0x3e, 0xd5, 0xbc, 0x17, + 0x71, 0x39, 0x07, 0x5b, 0xbc, 0x4f, 0xd3, 0x4c, 0x0c, 0xc9, 0xb1, 0x4f, 0x06, 0x8a, 0x0a, 0x46, + 0x54, 0xf4, 0x82, 0xca, 0xda, 0xac, 0x0e, 0xb5, 0x3c, 0x24, 0xc7, 0xcd, 0x11, 0x38, 0x35, 0x8d, + 0x05, 0x3f, 0x22, 0x2c, 0x52, 0x27, 0xfe, 0x51, 0x94, 0xaa, 0x6a, 0x25, 0x93, 0xb4, 0x53, 0xbc, + 0xad, 0x61, 0xf4, 0x14, 0xaa, 0xe6, 0xbb, 0x4c, 0x0d, 0x1c, 0xab, 0xda, 0x9c, 0xbe, 0xaf, 0xfb, + 0xd3, 0xcb, 0x4c, 0x8b, 0x2d, 0xe3, 0x81, 0x2b, 0x72, 0x54, 0x74, 0x08, 0x54, 0xc6, 0x32, 0x81, + 0x6a, 0x70, 0xcd, 0xeb, 0xb6, 0x76, 0xb7, 0x3a, 0xdd, 0x8f, 0xfd, 0x83, 0xee, 0xfe, 0x9e, 0xd7, + 0xea, 0xb4, 0x3b, 0xde, 0x96, 0x3d, 0x83, 0xca, 0xb0, 0xf0, 0xa4, 0xd3, 0xf5, 0x9a, 0x78, 0xfd, + 0x81, 0x6d, 0xa1, 0x05, 0x98, 0x6d, 0x3f, 0x69, 0xb6, 0xec, 0x02, 0x5a, 0x84, 0xd2, 0xce, 0xc1, + 0x93, 0xe6, 0x33, 0xbb, 0x88, 0xe6, 0xa1, 0xd8, 0xdc, 0xc1, 0xf6, 0x2c, 0x02, 0x98, 0x6b, 0xee, + 0x60, 0xff, 0xd9, 0xa6, 0x5d, 0x72, 0xee, 0x41, 0x65, 0x8c, 0x02, 0xaa, 0xc1, 0x7c, 0xfc, 0x5c, + 0x10, 0x49, 0x65, 0xcd, 0x5a, 0x2d, 0xae, 0x2d, 0xe2, 0x5c, 0x74, 0x30, 0xd8, 0x93, 0x2d, 0x88, + 0xea, 0x30, 0x9f, 0x57, 0xbc, 0x95, 0x55, 0x7c, 0x0e, 0x20, 0x04, 0xc5, 0x44, 0x44, 0xfa, 0x92, + 0x16, 0xb7, 0x67, 0x70, 0x2a, 0x6c, 0x56, 0xc1, 0x34, 0x84, 0x2f, 0x79, 0x22, 0x02, 0xea, 0x84, + 0x70, 0x7d, 0x62, 0x0e, 0xca, 0x98, 0x33, 0x49, 0x51, 0x17, 0xe6, 0xf3, 0xd2, 0x2b, 0xac, 0x16, + 0xd7, 0x96, 0x36, 0x3e, 0x78, 0x8d, 0x34, 0x8e, 0xd0, 0x33, 0x15, 0x8a, 0xf3, 0x20, 0xce, 0x73, + 0xf8, 0xdf, 0xe4, 0x28, 0x7b, 0x43, 0x27, 0xfd, 0x66, 0x4d, 0x1e, 0xb5, 0x43, 0x15, 0xe9, 0x13, + 0x45, 0xb2, 0x6a, 0x0a, 0x05, 0x95, 0xd2, 0x8f, 0xa9, 0x08, 0xf2, 0xb4, 0x95, 0x74, 0x35, 0x69, + 0x7c, 0xcf, 0xc0, 0xe8, 0x11, 0x80, 0x54, 0x44, 0x28, 0x3f, 0xdd, 0x29, 0xd9, 0x78, 0xac, 0xe7, + 0xc4, 0xf2, 0x85, 0xe3, 0xf6, 0xf2, 0x85, 0x83, 0x17, 0xb5, 0x75, 0x2a, 0xa3, 0x2d, 0xb0, 0x07, + 0x44, 0x2a, 0x3f, 0x89, 0xfb, 0x44, 0x51, 0x13, 0xa0, 0xf8, 0xb7, 0x01, 0xaa, 0xa9, 0xcf, 0x81, + 0x76, 0x49, 0x41, 0xe7, 0xc7, 0xe2, 0xf9, 0xa9, 0x34, 0x92, 0xb5, 0x35, 0x28, 0x51, 0x21, 0xb8, + 0xc8, 0x86, 0x12, 0xca, 0x23, 0x8b, 0x38, 0x70, 0xf7, 0xf5, 0xaa, 0xc3, 0xc6, 0x00, 0x3d, 0x9d, + 0xcc, 0xef, 0xe5, 0xe7, 0xee, 0x44, 0x8a, 0xd1, 0x5b, 0x50, 0x36, 0x3f, 0xfd, 0x88, 0xf5, 0xe9, + 0xb1, 0xfe, 0xba, 0x12, 0x5e, 0x32, 0x58, 0x27, 0x85, 0x10, 0x87, 0x65, 0xca, 0xfa, 0x31, 0xd7, + 0x03, 0xcb, 0x57, 0x27, 0x31, 0xd5, 0x2d, 0x5e, 0xdd, 0x68, 0x5f, 0xea, 0xf4, 0xb3, 0x2f, 0x76, + 0xbd, 0xd3, 0x70, 0xbd, 0x93, 0x98, 0xe2, 0x2a, 0x1d, 0x93, 0x9d, 0x97, 0x16, 0x54, 0xc7, 0x4d, + 0xd0, 0x2a, 0xac, 0x78, 0xdd, 0xad, 0xbd, 0xdd, 0x4e, 0xb7, 0xe7, 0x61, 0xdf, 0xfb, 0xc4, 0xeb, + 0xf6, 0x26, 0xba, 0xf6, 0x2a, 0x2c, 0xef, 0xf7, 0x9a, 0xb8, 0xe7, 0xef, 0xb6, 0xfd, 0xfd, 0x3d, + 0xcf, 0x6b, 0x6d, 0xdb, 0x16, 0xba, 0x02, 0x15, 0xaf, 0xbb, 0x35, 0x02, 0x15, 0x90, 0x0d, 0xe5, + 0x0c, 0x6a, 0x1e, 0x6c, 0x75, 0x76, 0xed, 0x22, 0xba, 0x06, 0x76, 0x86, 0x1c, 0xf4, 0x7a, 0x1e, + 0x6e, 0x76, 0x5b, 0x9e, 0x3d, 0xeb, 0xfc, 0x74, 0xc1, 0x2a, 0x31, 0x09, 0x44, 0x9f, 0x43, 0x79, + 0x6c, 0xe8, 0x59, 0xfa, 0x3e, 0x1e, 0x5d, 0xae, 0xde, 0x47, 0xe6, 0x23, 0x1e, 0x0b, 0x87, 0x6e, + 0xc2, 0x42, 0x24, 0xfd, 0xa3, 0x88, 0x91, 0x41, 0xb6, 0x59, 0xe6, 0x23, 0xd9, 0x4e, 0x45, 0xb4, + 0x02, 0x69, 0x81, 0x1e, 0x46, 0x83, 0x48, 0x9d, 0xe8, 0xeb, 0x2a, 0xe0, 0x33, 0xc0, 0x39, 0x86, + 0x1b, 0x17, 0xb4, 0xd5, 0x1b, 0xa6, 0xec, 0x7c, 0x01, 0x2b, 0xd3, 0xac, 0xd1, 0x6d, 0x00, 0x25, + 0x08, 0x93, 0x81, 0x88, 0x62, 0xd3, 0xab, 0x8b, 0x78, 0x04, 0x49, 0xf5, 0x7a, 0xbd, 0xf6, 0x69, + 0xbe, 0x4e, 0x0b, 0x78, 0x04, 0xd9, 0xf8, 0xbd, 0x08, 0x73, 0xe6, 0x00, 0xf4, 0x9d, 0x05, 0x95, + 0xb1, 0x59, 0x87, 0xd6, 0xa7, 0x7f, 0xc5, 0x2b, 0x1e, 0x5e, 0xf5, 0x8d, 0xcb, 0xb8, 0x98, 0xc2, + 0x75, 0xd6, 0x5e, 0xfe, 0xf1, 0xe7, 0xb7, 0x05, 0xc7, 0xb9, 0x75, 0xfa, 0x96, 0x36, 0x6e, 0x8f, + 0xd3, 0x21, 0x25, 0x72, 0xf3, 0xc7, 0xd6, 0x7d, 0xf4, 0x8d, 0x05, 0xd5, 0xf1, 0xd1, 0x85, 0xa6, + 0x1f, 0xf8, 0xca, 0xd7, 0x61, 0xfd, 0x56, 0xee, 0x33, 0xf2, 0xba, 0x76, 0x77, 0xf3, 0xd7, 0xb5, + 0x73, 0x4f, 0xf3, 0xb9, 0xeb, 0xdc, 0x9e, 0xe4, 0x43, 0xce, 0x11, 0xfa, 0xda, 0x02, 0x74, 0xbe, + 0x25, 0xd1, 0x83, 0x4b, 0xf7, 0xb0, 0x21, 0xf6, 0xf0, 0x1f, 0xf6, 0xfe, 0x9a, 0xf5, 0x9e, 0xb5, + 0x29, 0xe1, 0x4e, 0xc0, 0x87, 0xd3, 0x22, 0x6c, 0x2e, 0x99, 0xab, 0xde, 0x4b, 0xa7, 0xeb, 0x9e, + 0xf5, 0x59, 0x33, 0xb3, 0x0d, 0x79, 0xfa, 0xfc, 0x70, 0xb9, 0x08, 0x1b, 0x21, 0x65, 0x7a, 0xf6, + 0x36, 0x8c, 0x8a, 0xc4, 0x91, 0x7c, 0xe5, 0xdf, 0x9d, 0x0f, 0x8d, 0x78, 0x38, 0xa7, 0xad, 0xdf, + 0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x0d, 0x06, 0x48, 0x1b, 0x0d, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go new file mode 100644 index 000000000..1a65d2eb4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go @@ -0,0 +1,1646 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/speech/v1_1beta1/cloud_speech.proto + +/* +Package speech is a generated protocol buffer package. + +It is generated from these files: + google/cloud/speech/v1_1beta1/cloud_speech.proto + +It has these top-level messages: + RecognizeRequest + LongRunningRecognizeRequest + StreamingRecognizeRequest + StreamingRecognitionConfig + RecognitionConfig + RecognitionMetadata + SpeechContext + RecognitionAudio + RecognizeResponse + LongRunningRecognizeResponse + LongRunningRecognizeMetadata + StreamingRecognizeResponse + StreamingRecognitionResult + SpeechRecognitionResult + SpeechRecognitionAlternative + WordInfo +*/ +package speech + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audio encoding of the data sent in the audio message. All encodings support +// only 1 channel (mono) audio. Only `FLAC` includes a header that describes +// the bytes of audio that follow the header. The other encodings are raw +// audio bytes with no header. +// +// For best results, the audio source should be captured and transmitted using +// a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be +// reduced if lossy codecs, which include the other codecs listed in +// this section, are used to capture or transmit the audio, particularly if +// background noise is present. +type RecognitionConfig_AudioEncoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. + RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 + // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio + // Codec) is the recommended encoding because it is + // lossless--therefore recognition is not compromised--and + // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream + // encoding supports 16-bit and 24-bit samples, however, not all fields in + // `STREAMINFO` are supported. + RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2 + // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. + RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3 + // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000. + RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4 + // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000. + RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5 + // Opus encoded audio frames in Ogg container + // ([OggOpus](https://wiki.xiph.org/OggOpus)). + // `sample_rate_hertz` must be 16000. + RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6 + // Although the use of lossy encodings is not recommended, if a very low + // bitrate encoding is required, `OGG_OPUS` is highly preferred over + // Speex encoding. The [Speex](https://speex.org/) encoding supported by + // Cloud Speech API has a header byte in each block, as in MIME type + // `audio/x-speex-with-header-byte`. + // It is a variant of the RTP Speex encoding defined in + // [RFC 5574](https://tools.ietf.org/html/rfc5574). + // The stream is a sequence of blocks, one block per RTP packet. Each block + // starts with a byte containing the length of the block, in bytes, followed + // by one or more frames of Speex data, padded to an integral number of + // bytes (octets) as specified in RFC 5574. In other words, each RTP header + // is replaced with a single byte containing the block length. Only Speex + // wideband is supported. `sample_rate_hertz` must be 16000. + RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7 +) + +var RecognitionConfig_AudioEncoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "FLAC", + 3: "MULAW", + 4: "AMR", + 5: "AMR_WB", + 6: "OGG_OPUS", + 7: "SPEEX_WITH_HEADER_BYTE", +} +var RecognitionConfig_AudioEncoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "FLAC": 2, + "MULAW": 3, + "AMR": 4, + "AMR_WB": 5, + "OGG_OPUS": 6, + "SPEEX_WITH_HEADER_BYTE": 7, +} + +func (x RecognitionConfig_AudioEncoding) String() string { + return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x)) +} +func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0} +} + +// Use case categories that the audio recognition request can be described +// by. +type RecognitionMetadata_InteractionType int32 + +const ( + // Use case is either unknown or is something other than one of the other + // values below. + RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0 + // Multiple people in a conversation or discussion. For example in a + // meeting with two or more people actively participating. Typically + // all the primary people speaking would be in the same room (if not, + // see PHONE_CALL) + RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1 + // One or more persons lecturing or presenting to others, mostly + // uninterrupted. + RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2 + // A phone-call or video-conference in which two or more people, who are + // not in the same room, are actively participating. + RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3 + // A recorded message intended for another person to listen to. + RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4 + // Professionally produced audio (eg. TV Show, Podcast). + RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5 + // Transcribe spoken questions and queries into text. + RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6 + // Transcribe voice commands, such as for controlling a device. + RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7 + // Transcribe speech to text to create a written document, such as a + // text-message, email or report. + RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8 +) + +var RecognitionMetadata_InteractionType_name = map[int32]string{ + 0: "INTERACTION_TYPE_UNSPECIFIED", + 1: "DISCUSSION", + 2: "PRESENTATION", + 3: "PHONE_CALL", + 4: "VOICEMAIL", + 5: "PROFESSIONALLY_PRODUCED", + 6: "VOICE_SEARCH", + 7: "VOICE_COMMAND", + 8: "DICTATION", +} +var RecognitionMetadata_InteractionType_value = map[string]int32{ + "INTERACTION_TYPE_UNSPECIFIED": 0, + "DISCUSSION": 1, + "PRESENTATION": 2, + "PHONE_CALL": 3, + "VOICEMAIL": 4, + "PROFESSIONALLY_PRODUCED": 5, + "VOICE_SEARCH": 6, + "VOICE_COMMAND": 7, + "DICTATION": 8, +} + +func (x RecognitionMetadata_InteractionType) String() string { + return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x)) +} +func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + +// Enumerates the types of capture settings describing an audio file. +type RecognitionMetadata_MicrophoneDistance int32 + +const ( + // Audio type is not known. + RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0 + // The audio was captured from a closely placed microphone. Eg. phone, + // dictaphone, or handheld microphone. Generally if there speaker is within + // 1 meter of the microphone. + RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1 + // The speaker if within 3 meters of the microphone. + RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2 + // The speaker is more than 3 meters away from the microphone. + RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3 +) + +var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{ + 0: "MICROPHONE_DISTANCE_UNSPECIFIED", + 1: "NEARFIELD", + 2: "MIDFIELD", + 3: "FARFIELD", +} +var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{ + "MICROPHONE_DISTANCE_UNSPECIFIED": 0, + "NEARFIELD": 1, + "MIDFIELD": 2, + "FARFIELD": 3, +} + +func (x RecognitionMetadata_MicrophoneDistance) String() string { + return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x)) +} +func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 1} +} + +// The original media the speech was recorded on. +type RecognitionMetadata_OriginalMediaType int32 + +const ( + // Unknown original media type. + RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0 + // The speech data is an audio recording. + RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1 + // The speech data originally recorded on a video. + RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2 +) + +var RecognitionMetadata_OriginalMediaType_name = map[int32]string{ + 0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED", + 1: "AUDIO", + 2: "VIDEO", +} +var RecognitionMetadata_OriginalMediaType_value = map[string]int32{ + "ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0, + "AUDIO": 1, + "VIDEO": 2, +} + +func (x RecognitionMetadata_OriginalMediaType) String() string { + return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x)) +} +func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 2} +} + +// How many speakers expected in the speech to be recognized. +type RecognitionMetadata_NumberOfSpeakers int32 + +const ( + // Unknown number of persons speaking. + RecognitionMetadata_NUMBER_OF_SPEAKERS_UNSPECIFIED RecognitionMetadata_NumberOfSpeakers = 0 + // Only one person is the prominent speaker (ignore background voices). + RecognitionMetadata_ONE_SPEAKER RecognitionMetadata_NumberOfSpeakers = 1 + // Two people are the prominent speakers (transcript should focus + // on the two most prominent speakers). + RecognitionMetadata_TWO_SPEAKERS RecognitionMetadata_NumberOfSpeakers = 2 + // Transcribe all voices. + RecognitionMetadata_MULTIPLE_SPEAKERS RecognitionMetadata_NumberOfSpeakers = 3 +) + +var RecognitionMetadata_NumberOfSpeakers_name = map[int32]string{ + 0: "NUMBER_OF_SPEAKERS_UNSPECIFIED", + 1: "ONE_SPEAKER", + 2: "TWO_SPEAKERS", + 3: "MULTIPLE_SPEAKERS", +} +var RecognitionMetadata_NumberOfSpeakers_value = map[string]int32{ + "NUMBER_OF_SPEAKERS_UNSPECIFIED": 0, + "ONE_SPEAKER": 1, + "TWO_SPEAKERS": 2, + "MULTIPLE_SPEAKERS": 3, +} + +func (x RecognitionMetadata_NumberOfSpeakers) String() string { + return proto.EnumName(RecognitionMetadata_NumberOfSpeakers_name, int32(x)) +} +func (RecognitionMetadata_NumberOfSpeakers) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 3} +} + +// The type of device the speech was recorded with. +type RecognitionMetadata_RecordingDeviceType int32 + +const ( + // The recording device is unknown. + RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0 + // Speech was recorded on a smartphone. + RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1 + // Speech was recorded using a personal computer or tablet. + RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2 + // Speech was recorded over a phone line. + RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3 + // Speech was recorded in a vehicle. + RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4 + // Speech was recorded outdoors. + RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5 + // Speech was recorded indoors. + RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6 +) + +var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{ + 0: "RECORDING_DEVICE_TYPE_UNSPECIFIED", + 1: "SMARTPHONE", + 2: "PC", + 3: "PHONE_LINE", + 4: "VEHICLE", + 5: "OTHER_OUTDOOR_DEVICE", + 6: "OTHER_INDOOR_DEVICE", +} +var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{ + "RECORDING_DEVICE_TYPE_UNSPECIFIED": 0, + "SMARTPHONE": 1, + "PC": 2, + "PHONE_LINE": 3, + "VEHICLE": 4, + "OTHER_OUTDOOR_DEVICE": 5, + "OTHER_INDOOR_DEVICE": 6, +} + +func (x RecognitionMetadata_RecordingDeviceType) String() string { + return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x)) +} +func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 4} +} + +// Indicates the type of speech event. +type StreamingRecognizeResponse_SpeechEventType int32 + +const ( + // No speech event specified. + StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0 + // This event indicates that the server has detected the end of the user's + // speech utterance and expects no additional speech. Therefore, the server + // will not process additional audio (although it may subsequently return + // additional results). The client should stop sending additional audio + // data, half-close the gRPC connection, and wait for any additional results + // until the server closes the gRPC connection. This event is only sent if + // `single_utterance` was set to `true`, and is not used otherwise. + StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1 +) + +var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{ + 0: "SPEECH_EVENT_UNSPECIFIED", + 1: "END_OF_SINGLE_UTTERANCE", +} +var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{ + "SPEECH_EVENT_UNSPECIFIED": 0, + "END_OF_SINGLE_UTTERANCE": 1, +} + +func (x StreamingRecognizeResponse_SpeechEventType) String() string { + return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x)) +} +func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{11, 0} +} + +// The top-level message sent by the client for the `Recognize` method. +type RecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} } +func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*RecognizeRequest) ProtoMessage() {} +func (*RecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *RecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *RecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `LongRunningRecognize` +// method. +type LongRunningRecognizeRequest struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Required* The audio data to be recognized. + Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio" json:"audio,omitempty"` +} + +func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} } +func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeRequest) ProtoMessage() {} +func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio { + if m != nil { + return m.Audio + } + return nil +} + +// The top-level message sent by the client for the `StreamingRecognize` method. +// Multiple `StreamingRecognizeRequest` messages are sent. The first message +// must contain a `streaming_config` message and must not contain `audio` data. +// All subsequent messages must contain `audio` data and must not contain a +// `streaming_config` message. +type StreamingRecognizeRequest struct { + // The streaming request, which is either a streaming config or audio content. + // + // Types that are valid to be assigned to StreamingRequest: + // *StreamingRecognizeRequest_StreamingConfig + // *StreamingRecognizeRequest_AudioContent + StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"` +} + +func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} } +func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeRequest) ProtoMessage() {} +func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isStreamingRecognizeRequest_StreamingRequest interface { + isStreamingRecognizeRequest_StreamingRequest() +} + +type StreamingRecognizeRequest_StreamingConfig struct { + StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,oneof"` +} +type StreamingRecognizeRequest_AudioContent struct { + AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"` +} + +func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {} +func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {} + +func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest { + if m != nil { + return m.StreamingRequest + } + return nil +} + +func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok { + return x.StreamingConfig + } + return nil +} + +func (m *StreamingRecognizeRequest) GetAudioContent() []byte { + if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok { + return x.AudioContent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{ + (*StreamingRecognizeRequest_StreamingConfig)(nil), + (*StreamingRecognizeRequest_AudioContent)(nil), + } +} + +func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StreamingConfig); err != nil { + return err + } + case *StreamingRecognizeRequest_AudioContent: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AudioContent) + case nil: + default: + return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x) + } + return nil +} + +func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StreamingRecognizeRequest) + switch tag { + case 1: // streaming_request.streaming_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StreamingRecognitionConfig) + err := b.DecodeMessage(msg) + m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg} + return true, err + case 2: // streaming_request.audio_content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x} + return true, err + default: + return false, nil + } +} + +func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StreamingRecognizeRequest) + // streaming_request + switch x := m.StreamingRequest.(type) { + case *StreamingRecognizeRequest_StreamingConfig: + s := proto.Size(x.StreamingConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StreamingRecognizeRequest_AudioContent: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AudioContent))) + n += len(x.AudioContent) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Provides information to the recognizer that specifies how to process the +// request. +type StreamingRecognitionConfig struct { + // *Required* Provides information to the recognizer that specifies how to + // process the request. + Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + // *Optional* If `false` or omitted, the recognizer will perform continuous + // recognition (continuing to wait for and process audio even if the user + // pauses speaking) until the client closes the input stream (gRPC API) or + // until the maximum time limit has been reached. May return multiple + // `StreamingRecognitionResult`s with the `is_final` flag set to `true`. + // + // If `true`, the recognizer will detect a single spoken utterance. When it + // detects that the user has paused or stopped speaking, it will return an + // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no + // more than one `StreamingRecognitionResult` with the `is_final` flag set to + // `true`. + SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance" json:"single_utterance,omitempty"` + // *Optional* If `true`, interim results (tentative hypotheses) may be + // returned as they become available (these interim results are indicated with + // the `is_final=false` flag). + // If `false` or omitted, only `is_final=true` result(s) are returned. + InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults" json:"interim_results,omitempty"` +} + +func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} } +func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionConfig) ProtoMessage() {} +func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *StreamingRecognitionConfig) GetSingleUtterance() bool { + if m != nil { + return m.SingleUtterance + } + return false +} + +func (m *StreamingRecognitionConfig) GetInterimResults() bool { + if m != nil { + return m.InterimResults + } + return false +} + +// Provides information to the recognizer that specifies how to process the +// request. +type RecognitionConfig struct { + // *Required* Encoding of audio data sent in all `RecognitionAudio` messages. + Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"` + // *Required* Sample rate in Hertz of the audio data sent in all + // `RecognitionAudio` messages. Valid values are: 8000-48000. + // 16000 is optimal. For best results, set the sampling rate of the audio + // source to 16000 Hz. If that's not possible, use the native sample rate of + // the audio source (instead of re-sampling). + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` + // *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // *Optional* Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechRecognitionResult`. + // The server may return fewer than `max_alternatives`. + // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of + // one. If omitted, will return a maximum of one. + MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives" json:"max_alternatives,omitempty"` + // *Optional* If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter" json:"profanity_filter,omitempty"` + // *Optional* A means to provide context to assist the speech recognition. + SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts" json:"speech_contexts,omitempty"` + // *Optional* If `true`, the top result includes a list of words and + // the start and end time offsets (timestamps) for those words. If + // `false`, no word-level time offset information is returned. The default is + // `false`. + EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets" json:"enable_word_time_offsets,omitempty"` + // *Optional* If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. + // The default 'false' value does not add punctuation to result hypotheses. + // NOTE: "This is currently offered as an experimental service, complimentary + // to all users. In the future this may be exclusively available as a + // premium feature." + EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation" json:"enable_automatic_punctuation,omitempty"` + // *Optional* Metadata regarding this request. + Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata" json:"metadata,omitempty"` +} + +func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } +func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) } +func (*RecognitionConfig) ProtoMessage() {} +func (*RecognitionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding { + if m != nil { + return m.Encoding + } + return RecognitionConfig_ENCODING_UNSPECIFIED +} + +func (m *RecognitionConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +func (m *RecognitionConfig) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *RecognitionConfig) GetMaxAlternatives() int32 { + if m != nil { + return m.MaxAlternatives + } + return 0 +} + +func (m *RecognitionConfig) GetProfanityFilter() bool { + if m != nil { + return m.ProfanityFilter + } + return false +} + +func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext { + if m != nil { + return m.SpeechContexts + } + return nil +} + +func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool { + if m != nil { + return m.EnableWordTimeOffsets + } + return false +} + +func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool { + if m != nil { + return m.EnableAutomaticPunctuation + } + return false +} + +func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +// Description of audio data to be recognized. +type RecognitionMetadata struct { + // The use case most closely describing the audio content to be recognized. + InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"` + // The industry vertical to which this speech recognition request most + // closely applies. This is most indicative of the topics contained + // in the audio. Use the 6-digit NAICS code to identify the industry + // vertical - see https://www.naics.com/search/. + IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio" json:"industry_naics_code_of_audio,omitempty"` + // The audio type that most closely describes the audio being recognized. + MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"` + // The original media the speech was recorded on. + OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"` + // How many people are speaking prominently in the audio and expected to be + // recognized. + NumberOfSpeakers RecognitionMetadata_NumberOfSpeakers `protobuf:"varint,6,opt,name=number_of_speakers,json=numberOfSpeakers,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_NumberOfSpeakers" json:"number_of_speakers,omitempty"` + // The type of device the speech was recorded with. + RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,7,opt,name=recording_device_type,json=recordingDeviceType,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"` + // The device used to make the recording. Examples 'Nexus 5X' or + // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or + // 'Cardioid Microphone'. + RecordingDeviceName string `protobuf:"bytes,8,opt,name=recording_device_name,json=recordingDeviceName" json:"recording_device_name,omitempty"` + // Mime type of the original audio file. For example `audio/m4a`, + // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. + // A list of possible audio mime types is maintained at + // http://www.iana.org/assignments/media-types/media-types.xhtml#audio + OriginalMimeType string `protobuf:"bytes,9,opt,name=original_mime_type,json=originalMimeType" json:"original_mime_type,omitempty"` + // Obfuscated (privacy-protected) ID of the user, to identify number of + // unique users using the service. + ObfuscatedId int64 `protobuf:"varint,10,opt,name=obfuscated_id,json=obfuscatedId" json:"obfuscated_id,omitempty"` + // Description of the content. Eg. "Recordings of federal supreme court + // hearings from 2012". + AudioTopic string `protobuf:"bytes,11,opt,name=audio_topic,json=audioTopic" json:"audio_topic,omitempty"` +} + +func (m *RecognitionMetadata) Reset() { *m = RecognitionMetadata{} } +func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) } +func (*RecognitionMetadata) ProtoMessage() {} +func (*RecognitionMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType { + if m != nil { + return m.InteractionType + } + return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED +} + +func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 { + if m != nil { + return m.IndustryNaicsCodeOfAudio + } + return 0 +} + +func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance { + if m != nil { + return m.MicrophoneDistance + } + return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED +} + +func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType { + if m != nil { + return m.OriginalMediaType + } + return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED +} + +func (m *RecognitionMetadata) GetNumberOfSpeakers() RecognitionMetadata_NumberOfSpeakers { + if m != nil { + return m.NumberOfSpeakers + } + return RecognitionMetadata_NUMBER_OF_SPEAKERS_UNSPECIFIED +} + +func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType { + if m != nil { + return m.RecordingDeviceType + } + return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED +} + +func (m *RecognitionMetadata) GetRecordingDeviceName() string { + if m != nil { + return m.RecordingDeviceName + } + return "" +} + +func (m *RecognitionMetadata) GetOriginalMimeType() string { + if m != nil { + return m.OriginalMimeType + } + return "" +} + +func (m *RecognitionMetadata) GetObfuscatedId() int64 { + if m != nil { + return m.ObfuscatedId + } + return 0 +} + +func (m *RecognitionMetadata) GetAudioTopic() string { + if m != nil { + return m.AudioTopic + } + return "" +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +type SpeechContext struct { + // *Optional* A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + Phrases []string `protobuf:"bytes,1,rep,name=phrases" json:"phrases,omitempty"` +} + +func (m *SpeechContext) Reset() { *m = SpeechContext{} } +func (m *SpeechContext) String() string { return proto.CompactTextString(m) } +func (*SpeechContext) ProtoMessage() {} +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *SpeechContext) GetPhrases() []string { + if m != nil { + return m.Phrases + } + return nil +} + +// Contains audio data in the encoding specified in the `RecognitionConfig`. +// Either `content` or `uri` must be supplied. Supplying both or neither +// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See +// [audio limits](https://cloud.google.com/speech/limits#content). +type RecognitionAudio struct { + // The audio source, which is either inline content or a GCS uri. + // + // Types that are valid to be assigned to AudioSource: + // *RecognitionAudio_Content + // *RecognitionAudio_Uri + AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"` +} + +func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } +func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } +func (*RecognitionAudio) ProtoMessage() {} +func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isRecognitionAudio_AudioSource interface { + isRecognitionAudio_AudioSource() +} + +type RecognitionAudio_Content struct { + Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"` +} +type RecognitionAudio_Uri struct { + Uri string `protobuf:"bytes,2,opt,name=uri,oneof"` +} + +func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {} +func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {} + +func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource { + if m != nil { + return m.AudioSource + } + return nil +} + +func (m *RecognitionAudio) GetContent() []byte { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok { + return x.Content + } + return nil +} + +func (m *RecognitionAudio) GetUri() string { + if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok { + return x.Uri + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{ + (*RecognitionAudio_Content)(nil), + (*RecognitionAudio_Uri)(nil), + } +} + +func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Content) + case *RecognitionAudio_Uri: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uri) + case nil: + default: + return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x) + } + return nil +} + +func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecognitionAudio) + switch tag { + case 1: // audio_source.content + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.AudioSource = &RecognitionAudio_Content{x} + return true, err + case 2: // audio_source.uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.AudioSource = &RecognitionAudio_Uri{x} + return true, err + default: + return false, nil + } +} + +func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecognitionAudio) + // audio_source + switch x := m.AudioSource.(type) { + case *RecognitionAudio_Content: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Content))) + n += len(x.Content) + case *RecognitionAudio_Uri: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uri))) + n += len(x.Uri) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The only message returned to the client by the `Recognize` method. It +// contains the result as zero or more sequential `SpeechRecognitionResult` +// messages. +type RecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} } +func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*RecognizeResponse) ProtoMessage() {} +func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// The only message returned to the client by the `LongRunningRecognize` method. +// It contains the result as zero or more sequential `SpeechRecognitionResult` +// messages. It is included in the `result.response` field of the `Operation` +// returned by the `GetOperation` call of the `google::longrunning::Operations` +// service. +type LongRunningRecognizeResponse struct { + // *Output-only* Sequential list of transcription results corresponding to + // sequential portions of audio. + Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` +} + +func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} } +func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeResponse) ProtoMessage() {} +func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +// Describes the progress of a long-running `LongRunningRecognize` call. It is +// included in the `metadata` field of the `Operation` returned by the +// `GetOperation` call of the `google::longrunning::Operations` service. +type LongRunningRecognizeMetadata struct { + // Approximate percentage of audio processed thus far. Guaranteed to be 100 + // when the audio is fully processed and the results are available. + ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent processing update. + LastUpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"` +} + +func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} } +func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) } +func (*LongRunningRecognizeMetadata) ProtoMessage() {} +func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *LongRunningRecognizeMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.LastUpdateTime + } + return nil +} + +// `StreamingRecognizeResponse` is the only message returned to the client by +// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` +// messages are streamed back to the client. If there is no recognizable +// audio, and `single_utterance` is set to false, then no messages are streamed +// back to the client. +// +// Here's an example of a series of ten `StreamingRecognizeResponse`s that might +// be returned while processing audio: +// +// 1. results { alternatives { transcript: "tube" } stability: 0.01 } +// +// 2. results { alternatives { transcript: "to be a" } stability: 0.01 } +// +// 3. results { alternatives { transcript: "to be" } stability: 0.9 } +// results { alternatives { transcript: " or not to be" } stability: 0.01 } +// +// 4. results { alternatives { transcript: "to be or not to be" +// confidence: 0.92 } +// alternatives { transcript: "to bee or not to bee" } +// is_final: true } +// +// 5. results { alternatives { transcript: " that's" } stability: 0.01 } +// +// 6. results { alternatives { transcript: " that is" } stability: 0.9 } +// results { alternatives { transcript: " the question" } stability: 0.01 } +// +// 7. results { alternatives { transcript: " that is the question" +// confidence: 0.98 } +// alternatives { transcript: " that was the question" } +// is_final: true } +// +// Notes: +// +// - Only two of the above responses #4 and #7 contain final results; they are +// indicated by `is_final: true`. Concatenating these together generates the +// full transcript: "to be or not to be that is the question". +// +// - The others contain interim `results`. #3 and #6 contain two interim +// `results`: the first portion has a high stability and is less likely to +// change; the second portion has a low stability and is very likely to +// change. A UI designer might choose to show only high stability `results`. +// +// - The specific `stability` and `confidence` values shown above are only for +// illustrative purposes. Actual values may vary. +// +// - In each response, only one of these fields will be set: +// `error`, +// `speech_event_type`, or +// one or more (repeated) `results`. +type StreamingRecognizeResponse struct { + // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + Error *google_rpc.Status `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // *Output-only* This repeated list contains zero or more results that + // correspond to consecutive portions of the audio currently being processed. + // It contains zero or more `is_final=false` results followed by zero or one + // `is_final=true` result (the newly settled portion). + Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` + // *Output-only* Indicates the type of speech event. + SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"` +} + +func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } +func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognizeResponse) ProtoMessage() {} +func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType { + if m != nil { + return m.SpeechEventType + } + return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED +} + +// A streaming speech recognition result corresponding to a portion of the audio +// that is currently being processed. +type StreamingRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` + // *Output-only* If `false`, this `StreamingRecognitionResult` represents an + // interim result that may change. If `true`, this is the final time the + // speech service will return this particular `StreamingRecognitionResult`, + // the recognizer will not return any further hypotheses for this portion of + // the transcript and corresponding audio. + IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal" json:"is_final,omitempty"` + // *Output-only* An estimate of the likelihood that the recognizer will not + // change its guess about this interim result. Values range from 0.0 + // (completely unstable) to 1.0 (completely stable). + // This field is only provided for interim results (`is_final=false`). + // The default of 0.0 is a sentinel value indicating `stability` was not set. + Stability float32 `protobuf:"fixed32,3,opt,name=stability" json:"stability,omitempty"` +} + +func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } +func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*StreamingRecognitionResult) ProtoMessage() {} +func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +func (m *StreamingRecognitionResult) GetIsFinal() bool { + if m != nil { + return m.IsFinal + } + return false +} + +func (m *StreamingRecognitionResult) GetStability() float32 { + if m != nil { + return m.Stability + } + return 0 +} + +// A speech recognition result corresponding to a portion of the audio. +type SpeechRecognitionResult struct { + // *Output-only* May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + // These alternatives are ordered in terms of accuracy, with the top (first) + // alternative being the most probable, as ranked by the recognizer. + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` +} + +func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } +func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionResult) ProtoMessage() {} +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +// Alternative hypotheses (a.k.a. n-best list). +type SpeechRecognitionAlternative struct { + // *Output-only* Transcript text representing the words that the user spoke. + Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` + // *Output-only* The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is typically provided only for the top hypothesis, and + // only for `is_final=true` results. Clients should not rely on the + // `confidence` field as it is not guaranteed to be accurate, or even set, in + // any of the results. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // *Output-only* A list of word-specific information for each recognized word. + Words []*WordInfo `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` +} + +func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } +func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionAlternative) ProtoMessage() {} +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *SpeechRecognitionAlternative) GetTranscript() string { + if m != nil { + return m.Transcript + } + return "" +} + +func (m *SpeechRecognitionAlternative) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo { + if m != nil { + return m.Words + } + return nil +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +type WordInfo struct { + // *Output-only* Time offset relative to the beginning of the audio, + // and corresponding to the start of the spoken word. + // This field is only set if `enable_word_time_offsets=true` and only + // in the top hypothesis. + // This is an experimental feature and the accuracy of the time offset can + // vary. + StartTime *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // *Output-only* Time offset relative to the beginning of the audio, + // and corresponding to the end of the spoken word. + // This field is only set if `enable_word_time_offsets=true` and only + // in the top hypothesis. + // This is an experimental feature and the accuracy of the time offset can + // vary. + EndTime *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // *Output-only* The word corresponding to this set of information. + Word string `protobuf:"bytes,3,opt,name=word" json:"word,omitempty"` +} + +func (m *WordInfo) Reset() { *m = WordInfo{} } +func (m *WordInfo) String() string { return proto.CompactTextString(m) } +func (*WordInfo) ProtoMessage() {} +func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *WordInfo) GetStartTime() *google_protobuf3.Duration { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *WordInfo) GetEndTime() *google_protobuf3.Duration { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *WordInfo) GetWord() string { + if m != nil { + return m.Word + } + return "" +} + +func init() { + proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest") + proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest") + proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest") + proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig") + proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig") + proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata") + proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext") + proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio") + proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse") + proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse") + proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata") + proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse") + proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult") + proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult") + proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative") + proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo") + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_NumberOfSpeakers", RecognitionMetadata_NumberOfSpeakers_name, RecognitionMetadata_NumberOfSpeakers_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Speech service + +type SpeechClient interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // google.longrunning.Operations interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // a `LongRunningRecognizeResponse` message. + LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) +} + +type speechClient struct { + cc *grpc.ClientConn +} + +func NewSpeechClient(cc *grpc.ClientConn) SpeechClient { + return &speechClient{cc} +} + +func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) { + out := new(RecognizeResponse) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Speech_serviceDesc.Streams[0], c.cc, "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...) + if err != nil { + return nil, err + } + x := &speechStreamingRecognizeClient{stream} + return x, nil +} + +type Speech_StreamingRecognizeClient interface { + Send(*StreamingRecognizeRequest) error + Recv() (*StreamingRecognizeResponse, error) + grpc.ClientStream +} + +type speechStreamingRecognizeClient struct { + grpc.ClientStream +} + +func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) { + m := new(StreamingRecognizeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Speech service + +type SpeechServer interface { + // Performs synchronous speech recognition: receive results after all audio + // has been sent and processed. + Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error) + // Performs asynchronous speech recognition: receive results via the + // google.longrunning.Operations interface. Returns either an + // `Operation.error` or an `Operation.response` which contains + // a `LongRunningRecognizeResponse` message. + LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*google_longrunning.Operation, error) + // Performs bidirectional streaming speech recognition: receive results while + // sending audio. This method is only available via the gRPC API (not REST). + StreamingRecognize(Speech_StreamingRecognizeServer) error +} + +func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { + s.RegisterService(&_Speech_serviceDesc, srv) +} + +func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).Recognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LongRunningRecognizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpeechServer).LongRunningRecognize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream}) +} + +type Speech_StreamingRecognizeServer interface { + Send(*StreamingRecognizeResponse) error + Recv() (*StreamingRecognizeRequest, error) + grpc.ServerStream +} + +type speechStreamingRecognizeServer struct { + grpc.ServerStream +} + +func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) { + m := new(StreamingRecognizeRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Speech_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.speech.v1p1beta1.Speech", + HandlerType: (*SpeechServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Recognize", + Handler: _Speech_Recognize_Handler, + }, + { + MethodName: "LongRunningRecognize", + Handler: _Speech_LongRunningRecognize_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingRecognize", + Handler: _Speech_StreamingRecognize_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/cloud/speech/v1_1beta1/cloud_speech.proto", +} + +func init() { proto.RegisterFile("google/cloud/speech/v1_1beta1/cloud_speech.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2005 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xbf, 0x73, 0xdb, 0xc8, + 0xf5, 0x37, 0x48, 0x51, 0x12, 0x9f, 0x7e, 0x41, 0xab, 0xf3, 0xd7, 0xb4, 0xac, 0xb3, 0x75, 0xf0, + 0xdc, 0x9d, 0xee, 0xbe, 0x37, 0xa4, 0xa5, 0x64, 0x2e, 0x67, 0xdf, 0xe4, 0x26, 0x10, 0x00, 0x99, + 0x98, 0x90, 0x00, 0x67, 0x49, 0xda, 0xf1, 0x35, 0x3b, 0x10, 0xb8, 0xa4, 0x31, 0x21, 0x01, 0x1c, + 0xb0, 0x50, 0x6c, 0x97, 0x69, 0x33, 0x49, 0x93, 0x99, 0x74, 0xa9, 0x72, 0x75, 0xca, 0x14, 0x69, + 0x52, 0x25, 0x45, 0xda, 0x34, 0x29, 0xaf, 0xc8, 0x1f, 0x91, 0x32, 0xb3, 0xbb, 0x00, 0x45, 0x91, + 0xb2, 0x2d, 0x6b, 0x72, 0x33, 0xe9, 0x88, 0xcf, 0xbe, 0xf7, 0x79, 0x6f, 0xdf, 0xbe, 0x7d, 0x7c, + 0x6f, 0xe1, 0xc1, 0x28, 0x8a, 0x46, 0x63, 0xda, 0xf0, 0xc7, 0x51, 0x36, 0x68, 0xa4, 0x31, 0xa5, + 0xfe, 0xf3, 0xc6, 0xd9, 0x21, 0x39, 0x3c, 0xa5, 0xcc, 0x3b, 0x94, 0x30, 0x91, 0x70, 0x3d, 0x4e, + 0x22, 0x16, 0xa1, 0xf7, 0xa5, 0x46, 0x5d, 0x2c, 0xd5, 0xf3, 0xa5, 0xb3, 0xc3, 0x58, 0x6a, 0xec, + 0xee, 0xe5, 0x84, 0x5e, 0x1c, 0x34, 0xbc, 0x30, 0x8c, 0x98, 0xc7, 0x82, 0x28, 0x4c, 0xa5, 0xf2, + 0xee, 0xfd, 0x7c, 0x75, 0x1c, 0x85, 0xa3, 0x24, 0x0b, 0xc3, 0x20, 0x1c, 0x35, 0xa2, 0x98, 0x26, + 0x17, 0x84, 0xee, 0xe6, 0x42, 0xe2, 0xeb, 0x34, 0x1b, 0x36, 0x06, 0x99, 0x14, 0xc8, 0xd7, 0xef, + 0xcd, 0xaf, 0xb3, 0x60, 0x42, 0x53, 0xe6, 0x4d, 0xe2, 0x5c, 0xe0, 0x56, 0x2e, 0x90, 0xc4, 0x7e, + 0x23, 0x65, 0x1e, 0xcb, 0x72, 0x66, 0xed, 0x0f, 0x0a, 0xa8, 0x98, 0xfa, 0xd1, 0x28, 0x0c, 0x5e, + 0x51, 0x4c, 0xbf, 0xc9, 0x68, 0xca, 0x50, 0x13, 0x96, 0xfd, 0x28, 0x1c, 0x06, 0xa3, 0x9a, 0xb2, + 0xaf, 0x1c, 0xac, 0x1d, 0x3d, 0xa8, 0xbf, 0x71, 0x87, 0xf5, 0x9c, 0x80, 0x3b, 0x64, 0x08, 0x3d, + 0x9c, 0xeb, 0x23, 0x0b, 0x2a, 0x5e, 0x36, 0x08, 0xa2, 0x5a, 0x49, 0x10, 0x35, 0xae, 0x4e, 0xa4, + 0x73, 0x35, 0x2c, 0xb5, 0xb5, 0x3f, 0x2a, 0x70, 0xa7, 0x15, 0x85, 0x23, 0x2c, 0x03, 0xf4, 0xbf, + 0xef, 0xf0, 0x5f, 0x14, 0xb8, 0xdd, 0x65, 0x09, 0xf5, 0x26, 0x97, 0xb9, 0x3b, 0x04, 0x35, 0x2d, + 0x16, 0xc9, 0x05, 0xc7, 0x1f, 0xbe, 0xc5, 0xde, 0x3c, 0xe7, 0xf9, 0x0e, 0x9a, 0x37, 0xf0, 0xd6, + 0x94, 0x54, 0x42, 0xe8, 0x43, 0xd8, 0x10, 0xee, 0x70, 0x1b, 0x8c, 0x86, 0x4c, 0x6c, 0x6a, 0xbd, + 0x79, 0x03, 0xaf, 0x0b, 0xd8, 0x90, 0xe8, 0xf1, 0x0e, 0x6c, 0x9f, 0xbb, 0x93, 0x48, 0x1f, 0xb5, + 0x3f, 0x2b, 0xb0, 0xfb, 0x7a, 0x6b, 0xff, 0xc5, 0x88, 0x7f, 0x02, 0x6a, 0x1a, 0x84, 0xa3, 0x31, + 0x25, 0x19, 0x63, 0x34, 0xf1, 0x42, 0x9f, 0x0a, 0x3f, 0x57, 0xf1, 0x96, 0xc4, 0xfb, 0x05, 0x8c, + 0x3e, 0x86, 0xad, 0x20, 0x64, 0x34, 0x09, 0x26, 0x24, 0xa1, 0x69, 0x36, 0x66, 0x69, 0xad, 0x2c, + 0x24, 0x37, 0x73, 0x18, 0x4b, 0x54, 0xfb, 0x5b, 0x05, 0xb6, 0x17, 0x7d, 0xfe, 0x1a, 0x56, 0x69, + 0xe8, 0x47, 0x83, 0x20, 0x94, 0x5e, 0x6f, 0x1e, 0x7d, 0xf5, 0xae, 0x5e, 0xd7, 0xc5, 0x29, 0x5b, + 0x39, 0x0b, 0x9e, 0xf2, 0xa1, 0x4f, 0x61, 0x3b, 0xf5, 0x26, 0xf1, 0x98, 0x92, 0xc4, 0x63, 0x94, + 0x3c, 0xa7, 0x09, 0x7b, 0x25, 0xb6, 0x51, 0xc1, 0x5b, 0x72, 0x01, 0x7b, 0x8c, 0x36, 0x39, 0x8c, + 0xee, 0xc3, 0xc6, 0xd8, 0x0b, 0x47, 0x99, 0x37, 0xa2, 0xc4, 0x8f, 0x06, 0x54, 0x6c, 0xa2, 0x8a, + 0xd7, 0x0b, 0xd0, 0x88, 0x06, 0x94, 0x87, 0x65, 0xe2, 0xbd, 0x20, 0xde, 0x98, 0xd1, 0x24, 0xf4, + 0x58, 0x70, 0x46, 0xd3, 0xda, 0x92, 0xe4, 0x9b, 0x78, 0x2f, 0xf4, 0x19, 0x98, 0x8b, 0xc6, 0x49, + 0x34, 0xf4, 0xc2, 0x80, 0xbd, 0x24, 0xc3, 0x80, 0x2f, 0xd5, 0x2a, 0x32, 0x82, 0x53, 0xfc, 0x44, + 0xc0, 0xa8, 0x0f, 0x5b, 0x72, 0x93, 0x32, 0x25, 0x5e, 0xb0, 0xb4, 0xb6, 0xbc, 0x5f, 0x3e, 0x58, + 0x3b, 0xfa, 0xec, 0x6d, 0x89, 0x27, 0x00, 0x43, 0x2a, 0xe1, 0xcd, 0x74, 0xf6, 0x33, 0x45, 0x3f, + 0x82, 0x1a, 0x0d, 0xbd, 0xd3, 0x31, 0x25, 0xbf, 0x88, 0x92, 0x01, 0xe1, 0xd5, 0x87, 0x44, 0xc3, + 0x61, 0x4a, 0x59, 0x5a, 0x5b, 0x15, 0x9e, 0xdc, 0x94, 0xeb, 0x4f, 0xa3, 0x64, 0xd0, 0x0b, 0x26, + 0xd4, 0x95, 0x8b, 0xe8, 0x27, 0xb0, 0x97, 0x2b, 0x7a, 0x19, 0x8b, 0x26, 0x1e, 0x0b, 0x7c, 0x12, + 0x67, 0xa1, 0xcf, 0x32, 0x51, 0xde, 0x6a, 0x6b, 0x42, 0x79, 0x57, 0xca, 0xe8, 0x85, 0x48, 0xe7, + 0x5c, 0x02, 0x39, 0xb0, 0x3a, 0xa1, 0xcc, 0x1b, 0x78, 0xcc, 0xab, 0x55, 0x45, 0x2a, 0x1e, 0x5d, + 0xfd, 0x50, 0xdb, 0xb9, 0x26, 0x9e, 0x72, 0x68, 0xbf, 0x52, 0x60, 0xe3, 0xc2, 0x21, 0xa3, 0x1a, + 0xbc, 0x67, 0x39, 0x86, 0x6b, 0xda, 0xce, 0x63, 0xd2, 0x77, 0xba, 0x1d, 0xcb, 0xb0, 0x4f, 0x6c, + 0xcb, 0x54, 0x6f, 0xa0, 0x75, 0x58, 0x6d, 0xd9, 0x8e, 0xa5, 0xe3, 0xc3, 0xcf, 0x55, 0x05, 0xad, + 0xc2, 0xd2, 0x49, 0x4b, 0x37, 0xd4, 0x12, 0xaa, 0x42, 0xa5, 0xdd, 0x6f, 0xe9, 0x4f, 0xd5, 0x32, + 0x5a, 0x81, 0xb2, 0xde, 0xc6, 0xea, 0x12, 0x02, 0x58, 0xd6, 0xdb, 0x98, 0x3c, 0x3d, 0x56, 0x2b, + 0x5c, 0xcf, 0x7d, 0xfc, 0x98, 0xb8, 0x9d, 0x7e, 0x57, 0x5d, 0x46, 0xbb, 0xf0, 0x7f, 0xdd, 0x8e, + 0x65, 0xfd, 0x8c, 0x3c, 0xb5, 0x7b, 0x4d, 0xd2, 0xb4, 0x74, 0xd3, 0xc2, 0xe4, 0xf8, 0x59, 0xcf, + 0x52, 0x57, 0xb4, 0xef, 0xd6, 0x60, 0xe7, 0x12, 0x7f, 0xd1, 0x04, 0x54, 0x91, 0xf2, 0x9e, 0xcf, + 0x61, 0xc2, 0x5e, 0xc6, 0x34, 0x4f, 0xe9, 0xe3, 0x77, 0xdf, 0x7d, 0xdd, 0x3e, 0xa7, 0xea, 0xbd, + 0x8c, 0x29, 0xde, 0x0a, 0x2e, 0x02, 0xe8, 0x2b, 0xd8, 0x0b, 0xc2, 0x41, 0x96, 0xb2, 0xe4, 0x25, + 0x09, 0xbd, 0xc0, 0x4f, 0x45, 0xde, 0x92, 0x68, 0x48, 0x64, 0xb1, 0xe4, 0x09, 0xbc, 0x81, 0x6b, + 0x85, 0x8c, 0xc3, 0x45, 0x78, 0x16, 0xbb, 0x43, 0x11, 0x4a, 0x74, 0x06, 0x3b, 0x93, 0xc0, 0x4f, + 0xa2, 0xf8, 0x79, 0x14, 0x52, 0x32, 0x08, 0x52, 0x26, 0xae, 0xf9, 0x92, 0xf0, 0xd8, 0xba, 0x86, + 0xc7, 0xed, 0x29, 0x9b, 0x99, 0x93, 0x61, 0x34, 0x59, 0xc0, 0x10, 0x83, 0x9d, 0x28, 0x09, 0x46, + 0x41, 0xe8, 0x8d, 0xc9, 0x84, 0x0e, 0x02, 0x4f, 0x46, 0xaa, 0x22, 0xec, 0x9a, 0xd7, 0xb0, 0xeb, + 0xe6, 0x6c, 0x6d, 0x4e, 0x26, 0x62, 0xb5, 0x1d, 0xcd, 0x43, 0xe8, 0x1b, 0x40, 0x61, 0x36, 0x39, + 0xa5, 0x09, 0x0f, 0x50, 0x1a, 0x53, 0xef, 0xe7, 0x34, 0xe1, 0xf7, 0x8c, 0x1b, 0x35, 0xae, 0x61, + 0xd4, 0x11, 0x64, 0xee, 0xb0, 0x9b, 0x53, 0x61, 0x35, 0x9c, 0x43, 0xd0, 0x2b, 0xb8, 0x99, 0x50, + 0x3f, 0x4a, 0x78, 0xc2, 0x92, 0x01, 0x3d, 0x0b, 0x7c, 0x2a, 0xb7, 0xba, 0x22, 0xac, 0x9e, 0x5c, + 0xc3, 0x2a, 0x2e, 0xf8, 0x4c, 0x41, 0x27, 0x36, 0xbb, 0x93, 0x2c, 0x82, 0xe8, 0xe8, 0x12, 0xdb, + 0xa1, 0x37, 0xa1, 0xe2, 0xe6, 0x57, 0x17, 0x74, 0x1c, 0x6f, 0x42, 0xd1, 0x67, 0x80, 0xce, 0x0f, + 0x86, 0x57, 0x0b, 0xe1, 0x6c, 0x55, 0x28, 0xa8, 0xd3, 0x88, 0x06, 0x13, 0x69, 0xe1, 0x3e, 0x6c, + 0x44, 0xa7, 0xc3, 0x2c, 0xf5, 0x3d, 0x46, 0x07, 0x24, 0x18, 0xd4, 0x60, 0x5f, 0x39, 0x28, 0xe3, + 0xf5, 0x73, 0xd0, 0x1e, 0xa0, 0x7b, 0xb0, 0x26, 0xff, 0xec, 0x58, 0x14, 0x07, 0xbe, 0xa8, 0x1c, + 0x55, 0x0c, 0x02, 0xea, 0x71, 0x44, 0xfb, 0xab, 0x02, 0x5b, 0x73, 0x99, 0x8e, 0xf6, 0x61, 0xcf, + 0x76, 0x7a, 0x16, 0xd6, 0x8d, 0x9e, 0xed, 0x3a, 0xa4, 0xf7, 0xac, 0x63, 0xcd, 0xdd, 0xf1, 0x4d, + 0x00, 0xd3, 0xee, 0x1a, 0xfd, 0x6e, 0xd7, 0x76, 0x1d, 0x55, 0x41, 0x2a, 0xac, 0x77, 0xb0, 0xd5, + 0xb5, 0x9c, 0x9e, 0xce, 0x55, 0xd4, 0x12, 0x97, 0xe8, 0x34, 0x5d, 0xc7, 0x22, 0x86, 0xde, 0x6a, + 0xa9, 0x65, 0xb4, 0x01, 0xd5, 0x27, 0xae, 0x6d, 0x58, 0x6d, 0xdd, 0x6e, 0xa9, 0x4b, 0xe8, 0x0e, + 0xdc, 0xea, 0x60, 0xf7, 0xc4, 0x12, 0x04, 0x7a, 0xab, 0xf5, 0x8c, 0x74, 0xb0, 0x6b, 0xf6, 0x0d, + 0xcb, 0x54, 0x2b, 0x9c, 0x4d, 0xc8, 0x92, 0xae, 0xa5, 0x63, 0xa3, 0xa9, 0x2e, 0xa3, 0x6d, 0xd8, + 0x90, 0x88, 0xe1, 0xb6, 0xdb, 0xba, 0x63, 0xaa, 0x2b, 0x9c, 0xd0, 0xb4, 0x8d, 0xdc, 0xde, 0xaa, + 0x36, 0x00, 0xb4, 0x98, 0xfe, 0xe8, 0x3e, 0xdc, 0x6b, 0xdb, 0x06, 0x76, 0xa5, 0x2b, 0xa6, 0xdd, + 0xed, 0xe9, 0x8e, 0x31, 0xbf, 0x99, 0x0d, 0xa8, 0xf2, 0x72, 0x75, 0x62, 0x5b, 0x2d, 0x53, 0x55, + 0x78, 0x1d, 0x6a, 0xdb, 0xa6, 0xfc, 0x2a, 0xf1, 0xaf, 0x93, 0x62, 0xad, 0xac, 0x39, 0xb0, 0xbd, + 0x90, 0xec, 0xdc, 0x88, 0x8b, 0xed, 0xc7, 0xb6, 0xa3, 0xb7, 0x48, 0xdb, 0x32, 0x6d, 0xfd, 0xb2, + 0x88, 0x55, 0xa1, 0xa2, 0xf7, 0x4d, 0xdb, 0x55, 0x15, 0xfe, 0xf3, 0x89, 0x6d, 0x5a, 0xae, 0x5a, + 0xd2, 0x62, 0x50, 0xe7, 0xf3, 0x18, 0x69, 0x70, 0xd7, 0xe9, 0xb7, 0x8f, 0x2d, 0x4c, 0xdc, 0x13, + 0xd2, 0xed, 0x58, 0xfa, 0x4f, 0x2d, 0xdc, 0x9d, 0x63, 0xdb, 0x82, 0x35, 0xbe, 0xa1, 0x7c, 0x55, + 0x1e, 0x40, 0xef, 0xa9, 0x3b, 0x15, 0x57, 0x4b, 0xe8, 0x26, 0x6c, 0xb7, 0xfb, 0xad, 0x9e, 0xdd, + 0x69, 0x59, 0xe7, 0x70, 0x59, 0xfb, 0x56, 0x91, 0xb5, 0x73, 0x3e, 0x5f, 0x3f, 0x84, 0x0f, 0xb0, + 0x65, 0xb8, 0x58, 0x14, 0x74, 0xd3, 0x7a, 0xc2, 0x83, 0x7d, 0xf9, 0xc1, 0x77, 0xdb, 0x3a, 0xee, + 0x89, 0x80, 0xaa, 0x0a, 0x5a, 0x86, 0x52, 0xc7, 0x98, 0x3d, 0x6e, 0x5e, 0xfa, 0xd5, 0x32, 0x5a, + 0x83, 0x95, 0x27, 0x56, 0xd3, 0x36, 0x5a, 0x96, 0xba, 0xc4, 0xff, 0x2b, 0xdc, 0x5e, 0x93, 0x6f, + 0xa8, 0xdf, 0x33, 0x5d, 0x17, 0xe7, 0xfc, 0x6a, 0x05, 0xdd, 0x82, 0x1d, 0xb9, 0x62, 0x3b, 0xb3, + 0x0b, 0xcb, 0xda, 0x27, 0xb0, 0x71, 0xe1, 0xcf, 0x15, 0xd5, 0x60, 0x25, 0x7e, 0x9e, 0x78, 0x29, + 0x4d, 0x6b, 0xca, 0x7e, 0xf9, 0xa0, 0x8a, 0x8b, 0x4f, 0x0d, 0x4f, 0x7b, 0xf5, 0x69, 0xc3, 0x89, + 0x76, 0x61, 0xa5, 0xe8, 0xee, 0x94, 0xbc, 0xbb, 0x2b, 0x00, 0x84, 0xa0, 0x9c, 0x25, 0x81, 0x68, + 0x43, 0xaa, 0xcd, 0x1b, 0x98, 0x7f, 0x1c, 0x6f, 0x82, 0x6c, 0xfe, 0x48, 0x1a, 0x65, 0x89, 0x4f, + 0x35, 0x3a, 0xed, 0x94, 0x78, 0x7f, 0x9a, 0xc6, 0x51, 0x98, 0x52, 0xd4, 0x81, 0x95, 0xa2, 0xc1, + 0x2a, 0x89, 0xf6, 0xe0, 0xf3, 0x2b, 0xb5, 0x07, 0x33, 0xce, 0xc9, 0x4e, 0x0c, 0x17, 0x34, 0x5a, + 0x0c, 0x7b, 0x97, 0x37, 0xf0, 0xdf, 0x9b, 0xc5, 0xbf, 0x2b, 0x97, 0x9b, 0x9c, 0xfe, 0x87, 0xca, + 0xb6, 0x69, 0x94, 0xd0, 0x34, 0x25, 0x31, 0x4d, 0xfc, 0x22, 0x84, 0x15, 0xd1, 0x36, 0x09, 0xbc, + 0x23, 0x61, 0xf4, 0x10, 0x20, 0x65, 0x5e, 0xc2, 0x44, 0x67, 0x93, 0x8f, 0x06, 0xbb, 0x85, 0x83, + 0xc5, 0xd0, 0x55, 0xef, 0x15, 0x43, 0x17, 0xae, 0x0a, 0x69, 0xfe, 0x8d, 0x4c, 0x50, 0xc7, 0x5e, + 0xca, 0x48, 0x16, 0x0f, 0x78, 0x63, 0x28, 0x08, 0xca, 0x6f, 0x25, 0xd8, 0xe4, 0x3a, 0x7d, 0xa1, + 0xc2, 0x41, 0xed, 0xbb, 0xd2, 0x62, 0x37, 0x3e, 0x13, 0xbd, 0x03, 0xa8, 0xd0, 0x24, 0x89, 0x92, + 0xbc, 0x19, 0x47, 0x05, 0x73, 0x12, 0xfb, 0xf5, 0xae, 0x18, 0xf7, 0xb0, 0x14, 0x40, 0xdd, 0xf9, + 0x38, 0x5f, 0x67, 0xe2, 0x98, 0x0b, 0x35, 0xca, 0x60, 0x3b, 0xef, 0x2a, 0xe9, 0x19, 0x0d, 0x99, + 0x2c, 0xe6, 0xf2, 0xcf, 0xdd, 0x7e, 0x47, 0xfa, 0xf3, 0x4d, 0xe5, 0x27, 0x6c, 0x71, 0x46, 0xd9, + 0x95, 0xa4, 0x17, 0x01, 0xad, 0x05, 0x5b, 0x73, 0x32, 0x68, 0x0f, 0x6a, 0xbc, 0x97, 0x32, 0x9a, + 0xc4, 0x7a, 0x62, 0x39, 0xbd, 0xb9, 0x2b, 0x7d, 0x07, 0x6e, 0x59, 0x8e, 0x29, 0x8a, 0x8d, 0xed, + 0x3c, 0x6e, 0x59, 0xa4, 0xdf, 0xe3, 0xb5, 0xdf, 0x31, 0x2c, 0x55, 0xd1, 0xfe, 0xf4, 0x9a, 0x81, + 0x47, 0x6e, 0x16, 0x11, 0x58, 0xbf, 0xd0, 0x8b, 0x2b, 0x22, 0x7a, 0x5f, 0xbe, 0x6b, 0x96, 0xce, + 0x34, 0xee, 0xf8, 0x02, 0x21, 0xba, 0x0d, 0xab, 0x41, 0x4a, 0x86, 0xbc, 0xe0, 0xe6, 0xf3, 0xcf, + 0x4a, 0x90, 0x9e, 0xf0, 0x4f, 0xb4, 0x07, 0x3c, 0xa1, 0x4e, 0x83, 0x71, 0xc0, 0x5e, 0x8a, 0xe4, + 0x29, 0xe1, 0x73, 0x40, 0x7b, 0x05, 0xb7, 0x5e, 0x73, 0x19, 0xbe, 0x77, 0xa7, 0xb5, 0xdf, 0x2b, + 0xb0, 0xf7, 0x26, 0x71, 0x74, 0x17, 0x80, 0x25, 0x5e, 0x98, 0xfa, 0x49, 0x10, 0xcb, 0xeb, 0x55, + 0xc5, 0x33, 0x08, 0x5f, 0x17, 0x73, 0xe0, 0x80, 0x16, 0x73, 0x5f, 0x09, 0xcf, 0x20, 0xe8, 0xc7, + 0x50, 0xe1, 0x23, 0x05, 0x1f, 0xf4, 0xb8, 0xeb, 0x1f, 0xbf, 0xc5, 0x75, 0x3e, 0x5f, 0xd8, 0xe1, + 0x30, 0xc2, 0x52, 0x4b, 0xfb, 0x8d, 0x02, 0xab, 0x05, 0x86, 0xbe, 0xb8, 0x70, 0x8b, 0xe5, 0x55, + 0xb9, 0xbd, 0x70, 0x09, 0xcd, 0xfc, 0x69, 0x65, 0xf6, 0x12, 0xff, 0x90, 0x4f, 0x8e, 0x83, 0xd9, + 0xdb, 0xff, 0x06, 0xbd, 0x15, 0x1a, 0x8a, 0x19, 0x07, 0x21, 0x58, 0xe2, 0x5e, 0xe4, 0xe3, 0x9d, + 0xf8, 0x7d, 0xf4, 0xcf, 0x32, 0x2c, 0xcb, 0x80, 0xa1, 0xdf, 0x29, 0x50, 0x9d, 0x66, 0x3d, 0xba, + 0xe2, 0x4b, 0xc3, 0xf4, 0x11, 0x61, 0xf7, 0xc1, 0xd5, 0x15, 0xe4, 0x85, 0xd2, 0x3e, 0xfa, 0xe5, + 0x3f, 0xfe, 0xf5, 0xdb, 0xd2, 0xbe, 0x76, 0xa7, 0x31, 0x95, 0xca, 0xdf, 0xb7, 0x1e, 0x25, 0x85, + 0xf0, 0x23, 0xe5, 0x53, 0xf4, 0xad, 0x02, 0xef, 0x5d, 0x56, 0x39, 0xd1, 0xa3, 0xb7, 0x98, 0x7c, + 0xc3, 0x13, 0xcd, 0xee, 0xfb, 0x85, 0xee, 0xcc, 0x43, 0x57, 0xdd, 0x2d, 0x1e, 0xba, 0xb4, 0x43, + 0xe1, 0xdb, 0xff, 0x6b, 0x1f, 0x2d, 0xfa, 0x36, 0xa3, 0x70, 0xc1, 0xcd, 0x5f, 0x2b, 0x80, 0x16, + 0xcb, 0x07, 0xfa, 0xe2, 0x1a, 0x15, 0x47, 0xba, 0xf8, 0xf0, 0xda, 0xb5, 0xea, 0x40, 0x79, 0xa0, + 0x1c, 0xbf, 0x82, 0x0f, 0xfc, 0x68, 0xf2, 0x66, 0x8e, 0xe3, 0x35, 0x79, 0xf8, 0x1d, 0x9e, 0x35, + 0x1d, 0xe5, 0x6b, 0x23, 0x97, 0x1e, 0x45, 0x7c, 0xf8, 0xaf, 0x47, 0xc9, 0xa8, 0x31, 0xa2, 0xa1, + 0xc8, 0xa9, 0x86, 0x5c, 0xf2, 0xe2, 0x20, 0x9d, 0x7f, 0x8b, 0xcc, 0xc9, 0xbe, 0x94, 0xc0, 0xbf, + 0x15, 0xe5, 0x74, 0x59, 0xa8, 0xfc, 0xe0, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x32, 0x25, + 0x1e, 0xbd, 0x14, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/support/common/common.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/support/common/common.pb.go new file mode 100644 index 000000000..b9bbfb864 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/support/common/common.pb.go @@ -0,0 +1,864 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/support/common.proto + +/* +Package common is a generated protocol buffer package. + +It is generated from these files: + google/cloud/support/common.proto + +It has these top-level messages: + SupportAccount + Case + CustomerIssue + SupportRole + Comment + IssueTaxonomy +*/ +package common + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The current state of this SupportAccount. +type SupportAccount_State int32 + +const ( + // Account is in an unknown state. + SupportAccount_STATE_UNSPECIFIED SupportAccount_State = 0 + // Account is in an active state. + SupportAccount_ACTIVE SupportAccount_State = 1 + // Account has been created but is being provisioned in support systems. + SupportAccount_PENDING SupportAccount_State = 2 + // Account deletion has been requested by the user. + SupportAccount_PENDING_DELETION SupportAccount_State = 3 +) + +var SupportAccount_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "PENDING", + 3: "PENDING_DELETION", +} +var SupportAccount_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "PENDING": 2, + "PENDING_DELETION": 3, +} + +func (x SupportAccount_State) String() string { + return proto.EnumName(SupportAccount_State_name, int32(x)) +} +func (SupportAccount_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// Pricing model applicable to this support account. +type SupportAccount_PricingModel int32 + +const ( + // This account is subscribed to an unknown pricing model. + SupportAccount_PRICING_MODEL_UNKNOWN SupportAccount_PricingModel = 0 + // Package based pricing (Platinum, Gold, Silver, Bronze). + SupportAccount_PACKAGES SupportAccount_PricingModel = 1 + // Support charges are calculated based on user seats a.k.a, + // "Pick Your Team" model. + SupportAccount_USER_ROLES SupportAccount_PricingModel = 2 +) + +var SupportAccount_PricingModel_name = map[int32]string{ + 0: "PRICING_MODEL_UNKNOWN", + 1: "PACKAGES", + 2: "USER_ROLES", +} +var SupportAccount_PricingModel_value = map[string]int32{ + "PRICING_MODEL_UNKNOWN": 0, + "PACKAGES": 1, + "USER_ROLES": 2, +} + +func (x SupportAccount_PricingModel) String() string { + return proto.EnumName(SupportAccount_PricingModel_name, int32(x)) +} +func (SupportAccount_PricingModel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1} +} + +// The case priority with P0 being the most urgent and P4 the least. +type Case_Priority int32 + +const ( + // Priority is undefined or has not been set yet. + Case_PRIORITY_UNSPECIFIED Case_Priority = 0 + // Extreme impact on a production service - Service is hard down. + Case_P0 Case_Priority = 1 + // Critical impact on a production service - Service is currently unusable. + Case_P1 Case_Priority = 2 + // Severe impact on a production service - Service is usable but greatly + // impaired. + Case_P2 Case_Priority = 3 + // Medium impact on a production service - Service is available, but + // moderately impaired. + Case_P3 Case_Priority = 4 + // General questions or minor issues - Production service is fully + // available. + Case_P4 Case_Priority = 5 +) + +var Case_Priority_name = map[int32]string{ + 0: "PRIORITY_UNSPECIFIED", + 1: "P0", + 2: "P1", + 3: "P2", + 4: "P3", + 5: "P4", +} +var Case_Priority_value = map[string]int32{ + "PRIORITY_UNSPECIFIED": 0, + "P0": 1, + "P1": 2, + "P2": 3, + "P3": 4, + "P4": 5, +} + +func (x Case_Priority) String() string { + return proto.EnumName(Case_Priority_name, int32(x)) +} +func (Case_Priority) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// The state of a case. +type Case_State int32 + +const ( + // Case is in an unknown state. + Case_STATE_UNSPECIFIED Case_State = 0 + // Case has been created but no one is assigned to work on it yet. + Case_NEW Case_State = 1 + // Case has been assigned to a support agent. + Case_ASSIGNED Case_State = 2 + // A support agent is currently investigating the case. + Case_IN_PROGRESS_GOOGLE_SUPPORT Case_State = 3 + // Case has been forwarded to product team for further investigation. + Case_IN_PROGRESS_GOOGLE_ENG Case_State = 4 + // Case is under investigation and relates to a known issue. + Case_IN_PROGRESS_KNOWN_ISSUE Case_State = 5 + // Case is waiting for a response from the customer. + Case_WAITING_FOR_CUSTOMER_RESPONSE Case_State = 6 + // A solution has been offered for the case but it isn't closed yet. + Case_SOLUTION_OFFERED Case_State = 7 + // Cases has been fully resolved and is in a closed state. + Case_CLOSED Case_State = 8 +) + +var Case_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "NEW", + 2: "ASSIGNED", + 3: "IN_PROGRESS_GOOGLE_SUPPORT", + 4: "IN_PROGRESS_GOOGLE_ENG", + 5: "IN_PROGRESS_KNOWN_ISSUE", + 6: "WAITING_FOR_CUSTOMER_RESPONSE", + 7: "SOLUTION_OFFERED", + 8: "CLOSED", +} +var Case_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "NEW": 1, + "ASSIGNED": 2, + "IN_PROGRESS_GOOGLE_SUPPORT": 3, + "IN_PROGRESS_GOOGLE_ENG": 4, + "IN_PROGRESS_KNOWN_ISSUE": 5, + "WAITING_FOR_CUSTOMER_RESPONSE": 6, + "SOLUTION_OFFERED": 7, + "CLOSED": 8, +} + +func (x Case_State) String() string { + return proto.EnumName(Case_State_name, int32(x)) +} +func (Case_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} } + +// The status of a customer issue. +type CustomerIssue_IssueState int32 + +const ( + // Issue in an unknown state. + CustomerIssue_ISSUE_STATE_UNSPECIFIED CustomerIssue_IssueState = 0 + // Issue is currently open but the work on it has not been started. + CustomerIssue_OPEN CustomerIssue_IssueState = 1 + // Issue is currently being worked on. + CustomerIssue_IN_PROGRESS CustomerIssue_IssueState = 2 + // Issue is fixed. + CustomerIssue_FIXED CustomerIssue_IssueState = 3 + // Issue has been marked as invalid. + CustomerIssue_WONT_FIX CustomerIssue_IssueState = 4 + // Issue verified and in production. + CustomerIssue_VERIFIED CustomerIssue_IssueState = 5 +) + +var CustomerIssue_IssueState_name = map[int32]string{ + 0: "ISSUE_STATE_UNSPECIFIED", + 1: "OPEN", + 2: "IN_PROGRESS", + 3: "FIXED", + 4: "WONT_FIX", + 5: "VERIFIED", +} +var CustomerIssue_IssueState_value = map[string]int32{ + "ISSUE_STATE_UNSPECIFIED": 0, + "OPEN": 1, + "IN_PROGRESS": 2, + "FIXED": 3, + "WONT_FIX": 4, + "VERIFIED": 5, +} + +func (x CustomerIssue_IssueState) String() string { + return proto.EnumName(CustomerIssue_IssueState_name, int32(x)) +} +func (CustomerIssue_IssueState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// A role which determines the support resources and features a user might +// get access to. +type SupportRole_Role int32 + +const ( + // An unknown role. + SupportRole_ROLE_UNSPECIFIED SupportRole_Role = 0 + // The basic support role. + SupportRole_BASIC SupportRole_Role = 1 + // The developer role. + SupportRole_DEVELOPER SupportRole_Role = 2 + // The operation role. + SupportRole_OPERATION SupportRole_Role = 3 + // The site reliability role. + SupportRole_SITE_RELIABILITY SupportRole_Role = 4 +) + +var SupportRole_Role_name = map[int32]string{ + 0: "ROLE_UNSPECIFIED", + 1: "BASIC", + 2: "DEVELOPER", + 3: "OPERATION", + 4: "SITE_RELIABILITY", +} +var SupportRole_Role_value = map[string]int32{ + "ROLE_UNSPECIFIED": 0, + "BASIC": 1, + "DEVELOPER": 2, + "OPERATION": 3, + "SITE_RELIABILITY": 4, +} + +func (x SupportRole_Role) String() string { + return proto.EnumName(SupportRole_Role_name, int32(x)) +} +func (SupportRole_Role) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +// A Google Cloud Platform account that identifies support eligibility for a +// Cloud resource. Currently the Cloud resource can only be an Organization +// but this might change in future. +type SupportAccount struct { + // The resource name for a support account in format + // `supportAccounts/{account_id}`. + // Output only. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Identifier for this entity that gets persisted in storage system. The + // resource name is populated using this field in format + // `supportAccounts/{account_id}`. + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId" json:"account_id,omitempty"` + // The Cloud resource with which this support account is associated. + CloudResource string `protobuf:"bytes,3,opt,name=cloud_resource,json=cloudResource" json:"cloud_resource,omitempty"` + // A user friendly display name assigned to this support account. + DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Indicates the current state of an account. + State SupportAccount_State `protobuf:"varint,5,opt,name=state,enum=google.cloud.support.common.SupportAccount_State" json:"state,omitempty"` + // Time when this account was created. + // Output only. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The resource name of a billing account associated with this support + // account. For example, `billingAccounts/ABCDEF-012345-567890`. + BillingAccountName string `protobuf:"bytes,7,opt,name=billing_account_name,json=billingAccountName" json:"billing_account_name,omitempty"` + UnifyAccountId string `protobuf:"bytes,8,opt,name=unify_account_id,json=unifyAccountId" json:"unify_account_id,omitempty"` + // The PricingModel applicable to this support account. + PricingModel SupportAccount_PricingModel `protobuf:"varint,9,opt,name=pricing_model,json=pricingModel,enum=google.cloud.support.common.SupportAccount_PricingModel" json:"pricing_model,omitempty"` +} + +func (m *SupportAccount) Reset() { *m = SupportAccount{} } +func (m *SupportAccount) String() string { return proto.CompactTextString(m) } +func (*SupportAccount) ProtoMessage() {} +func (*SupportAccount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SupportAccount) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SupportAccount) GetAccountId() string { + if m != nil { + return m.AccountId + } + return "" +} + +func (m *SupportAccount) GetCloudResource() string { + if m != nil { + return m.CloudResource + } + return "" +} + +func (m *SupportAccount) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *SupportAccount) GetState() SupportAccount_State { + if m != nil { + return m.State + } + return SupportAccount_STATE_UNSPECIFIED +} + +func (m *SupportAccount) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *SupportAccount) GetBillingAccountName() string { + if m != nil { + return m.BillingAccountName + } + return "" +} + +func (m *SupportAccount) GetUnifyAccountId() string { + if m != nil { + return m.UnifyAccountId + } + return "" +} + +func (m *SupportAccount) GetPricingModel() SupportAccount_PricingModel { + if m != nil { + return m.PricingModel + } + return SupportAccount_PRICING_MODEL_UNKNOWN +} + +// A support case created by the user. +type Case struct { + // The resource name for the Case in format + // `supportAccounts/{account_id}/cases/{case_id}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The short summary of the issue reported in this case. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // The board description of issue provided with initial summary. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The product component for which this Case is reported. + Component string `protobuf:"bytes,4,opt,name=component" json:"component,omitempty"` + // The product subcomponent for which this Case is reported. + Subcomponent string `protobuf:"bytes,5,opt,name=subcomponent" json:"subcomponent,omitempty"` + // Timezone the client sending this request is in. + // It should be in a format IANA recognizes: https://www.iana.org/time-zone + // There is no additional validation done by the API. + ClientTimezone string `protobuf:"bytes,6,opt,name=client_timezone,json=clientTimezone" json:"client_timezone,omitempty"` + // The email addresses that can be copied to receive updates on this case. + // Users can specify a maximum of 10 email addresses. + CcAddresses []string `protobuf:"bytes,7,rep,name=cc_addresses,json=ccAddresses" json:"cc_addresses,omitempty"` + // The Google Cloud Platform project ID for which this case is created. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // List of customer issues associated with this case. + Issues []*CustomerIssue `protobuf:"bytes,10,rep,name=issues" json:"issues,omitempty"` + // The current priority of this case. + Priority Case_Priority `protobuf:"varint,11,opt,name=priority,enum=google.cloud.support.common.Case_Priority" json:"priority,omitempty"` + // The current state of this case. + State Case_State `protobuf:"varint,12,opt,name=state,enum=google.cloud.support.common.Case_State" json:"state,omitempty"` + // Time when this case was created. + // Output only. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,13,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time when this case was last updated. + // Output only. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,14,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // Email address of user who created this case. + // Output only. It is inferred from credentials supplied during case creation. + CreatorEmail string `protobuf:"bytes,15,opt,name=creator_email,json=creatorEmail" json:"creator_email,omitempty"` + // The issue category applicable to this case. + Category string `protobuf:"bytes,16,opt,name=category" json:"category,omitempty"` +} + +func (m *Case) Reset() { *m = Case{} } +func (m *Case) String() string { return proto.CompactTextString(m) } +func (*Case) ProtoMessage() {} +func (*Case) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Case) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Case) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Case) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Case) GetComponent() string { + if m != nil { + return m.Component + } + return "" +} + +func (m *Case) GetSubcomponent() string { + if m != nil { + return m.Subcomponent + } + return "" +} + +func (m *Case) GetClientTimezone() string { + if m != nil { + return m.ClientTimezone + } + return "" +} + +func (m *Case) GetCcAddresses() []string { + if m != nil { + return m.CcAddresses + } + return nil +} + +func (m *Case) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Case) GetIssues() []*CustomerIssue { + if m != nil { + return m.Issues + } + return nil +} + +func (m *Case) GetPriority() Case_Priority { + if m != nil { + return m.Priority + } + return Case_PRIORITY_UNSPECIFIED +} + +func (m *Case) GetState() Case_State { + if m != nil { + return m.State + } + return Case_STATE_UNSPECIFIED +} + +func (m *Case) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Case) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *Case) GetCreatorEmail() string { + if m != nil { + return m.CreatorEmail + } + return "" +} + +func (m *Case) GetCategory() string { + if m != nil { + return m.Category + } + return "" +} + +// Reference to a Google internal ticket used for investigating a support case. +// Not every support case will have an internal ticket associated with it. +// A support case can have multiple tickets linked to it. +type CustomerIssue struct { + // Unique identifier for the internal issue. + // Output only. + IssueId string `protobuf:"bytes,1,opt,name=issue_id,json=issueId" json:"issue_id,omitempty"` + // Represents current status of the internal ticket. + // Output only. + State CustomerIssue_IssueState `protobuf:"varint,2,opt,name=state,enum=google.cloud.support.common.CustomerIssue_IssueState" json:"state,omitempty"` + // Time when the internal issue was created. + // Output only. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time when the internal issue was marked as resolved. + // Output only. + ResolveTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=resolve_time,json=resolveTime" json:"resolve_time,omitempty"` + // Time when the internal issue was last updated. + // Output only. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *CustomerIssue) Reset() { *m = CustomerIssue{} } +func (m *CustomerIssue) String() string { return proto.CompactTextString(m) } +func (*CustomerIssue) ProtoMessage() {} +func (*CustomerIssue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *CustomerIssue) GetIssueId() string { + if m != nil { + return m.IssueId + } + return "" +} + +func (m *CustomerIssue) GetState() CustomerIssue_IssueState { + if m != nil { + return m.State + } + return CustomerIssue_ISSUE_STATE_UNSPECIFIED +} + +func (m *CustomerIssue) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *CustomerIssue) GetResolveTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ResolveTime + } + return nil +} + +func (m *CustomerIssue) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// A message that contains mapping of a user and their role under a support +// account. +type SupportRole struct { + // Email address of user being added through this Role. + Email string `protobuf:"bytes,1,opt,name=email" json:"email,omitempty"` + // The type of role assigned to user. + Role SupportRole_Role `protobuf:"varint,2,opt,name=role,enum=google.cloud.support.common.SupportRole_Role" json:"role,omitempty"` +} + +func (m *SupportRole) Reset() { *m = SupportRole{} } +func (m *SupportRole) String() string { return proto.CompactTextString(m) } +func (*SupportRole) ProtoMessage() {} +func (*SupportRole) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SupportRole) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *SupportRole) GetRole() SupportRole_Role { + if m != nil { + return m.Role + } + return SupportRole_ROLE_UNSPECIFIED +} + +// The comment text associated with a `Case`. +type Comment struct { + // Text containing a maximum of 3000 characters. + Text string `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"` + // Time when this update was created. + // Output only. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The email address/name of user who created this comment. + // Output only. + Author string `protobuf:"bytes,3,opt,name=author" json:"author,omitempty"` + // The resource name for this comment in format + // `supportAccounts/{account_id}/cases/{case_id}/{comment_id}`. + // Output only. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` +} + +func (m *Comment) Reset() { *m = Comment{} } +func (m *Comment) String() string { return proto.CompactTextString(m) } +func (*Comment) ProtoMessage() {} +func (*Comment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Comment) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func (m *Comment) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Comment) GetAuthor() string { + if m != nil { + return m.Author + } + return "" +} + +func (m *Comment) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Represents the product component taxonomy that is to be used while creating +// or updating a `Case`. A client should obtain the list of issue categories, +// component/subcomponent from this object and specify it in `Case.category`, +// `Case.component` and `Case.subcomponent` fields respectively. +type IssueTaxonomy struct { + // Map of available categories. + Categories map[string]*IssueTaxonomy_Category `protobuf:"bytes,1,rep,name=categories" json:"categories,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *IssueTaxonomy) Reset() { *m = IssueTaxonomy{} } +func (m *IssueTaxonomy) String() string { return proto.CompactTextString(m) } +func (*IssueTaxonomy) ProtoMessage() {} +func (*IssueTaxonomy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *IssueTaxonomy) GetCategories() map[string]*IssueTaxonomy_Category { + if m != nil { + return m.Categories + } + return nil +} + +// The representation of a product component. It is composed of a canonical +// name for the product (e.g., Google App Engine), languages in which a +// support ticket can be created under this component, a template that +// provides hints on important details to be filled out before submitting a +// case. It also contains an embedded list of product subcomponents that have +// similar attributes as top-level components. +// (e.g., Google App Engine > Memcache). +type IssueTaxonomy_Component struct { + // User friendly name of this component. + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // List of languages in which a support case can be created under this + // component. Represented by language codes in ISO_639-1 standard. + Languages []string `protobuf:"bytes,2,rep,name=languages" json:"languages,omitempty"` + // Template to be used while filling the description of a support case. + Template string `protobuf:"bytes,3,opt,name=template" json:"template,omitempty"` + // List of subcomponents under this component. + Subcomponents []*IssueTaxonomy_Component `protobuf:"bytes,4,rep,name=subcomponents" json:"subcomponents,omitempty"` +} + +func (m *IssueTaxonomy_Component) Reset() { *m = IssueTaxonomy_Component{} } +func (m *IssueTaxonomy_Component) String() string { return proto.CompactTextString(m) } +func (*IssueTaxonomy_Component) ProtoMessage() {} +func (*IssueTaxonomy_Component) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 0} } + +func (m *IssueTaxonomy_Component) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *IssueTaxonomy_Component) GetLanguages() []string { + if m != nil { + return m.Languages + } + return nil +} + +func (m *IssueTaxonomy_Component) GetTemplate() string { + if m != nil { + return m.Template + } + return "" +} + +func (m *IssueTaxonomy_Component) GetSubcomponents() []*IssueTaxonomy_Component { + if m != nil { + return m.Subcomponents + } + return nil +} + +// Represents the category of issue (Technical or Non-Technical) +// reported through a support case. +type IssueTaxonomy_Category struct { + // User friendly name of this category. + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Map of product components under this category. + Components map[string]*IssueTaxonomy_Component `protobuf:"bytes,2,rep,name=components" json:"components,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *IssueTaxonomy_Category) Reset() { *m = IssueTaxonomy_Category{} } +func (m *IssueTaxonomy_Category) String() string { return proto.CompactTextString(m) } +func (*IssueTaxonomy_Category) ProtoMessage() {} +func (*IssueTaxonomy_Category) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} } + +func (m *IssueTaxonomy_Category) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *IssueTaxonomy_Category) GetComponents() map[string]*IssueTaxonomy_Component { + if m != nil { + return m.Components + } + return nil +} + +func init() { + proto.RegisterType((*SupportAccount)(nil), "google.cloud.support.common.SupportAccount") + proto.RegisterType((*Case)(nil), "google.cloud.support.common.Case") + proto.RegisterType((*CustomerIssue)(nil), "google.cloud.support.common.CustomerIssue") + proto.RegisterType((*SupportRole)(nil), "google.cloud.support.common.SupportRole") + proto.RegisterType((*Comment)(nil), "google.cloud.support.common.Comment") + proto.RegisterType((*IssueTaxonomy)(nil), "google.cloud.support.common.IssueTaxonomy") + proto.RegisterType((*IssueTaxonomy_Component)(nil), "google.cloud.support.common.IssueTaxonomy.Component") + proto.RegisterType((*IssueTaxonomy_Category)(nil), "google.cloud.support.common.IssueTaxonomy.Category") + proto.RegisterEnum("google.cloud.support.common.SupportAccount_State", SupportAccount_State_name, SupportAccount_State_value) + proto.RegisterEnum("google.cloud.support.common.SupportAccount_PricingModel", SupportAccount_PricingModel_name, SupportAccount_PricingModel_value) + proto.RegisterEnum("google.cloud.support.common.Case_Priority", Case_Priority_name, Case_Priority_value) + proto.RegisterEnum("google.cloud.support.common.Case_State", Case_State_name, Case_State_value) + proto.RegisterEnum("google.cloud.support.common.CustomerIssue_IssueState", CustomerIssue_IssueState_name, CustomerIssue_IssueState_value) + proto.RegisterEnum("google.cloud.support.common.SupportRole_Role", SupportRole_Role_name, SupportRole_Role_value) +} + +func init() { proto.RegisterFile("google/cloud/support/common.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x61, 0x6e, 0xdb, 0xc6, + 0x12, 0x0e, 0x29, 0xc9, 0x92, 0x46, 0x96, 0xbd, 0x59, 0x38, 0x79, 0x8c, 0x92, 0xbc, 0x38, 0x7a, + 0x78, 0x88, 0x51, 0xa0, 0x72, 0xe2, 0xa4, 0x40, 0x90, 0x20, 0x3f, 0x64, 0x6a, 0x2d, 0xb0, 0x91, + 0x49, 0x82, 0xa4, 0xe3, 0x24, 0x45, 0x41, 0xd0, 0xd4, 0x46, 0x65, 0x43, 0x72, 0x09, 0x92, 0x4a, + 0xa3, 0x1e, 0xa0, 0x3d, 0x45, 0xef, 0xd0, 0x5f, 0xbd, 0x40, 0x7b, 0x83, 0xde, 0xa1, 0xe7, 0x28, + 0x76, 0x49, 0xc9, 0xb2, 0x63, 0xd8, 0x51, 0xfe, 0x68, 0x39, 0xb3, 0x33, 0xb3, 0x33, 0xb3, 0xdf, + 0xb7, 0x63, 0xc3, 0xfd, 0x09, 0x63, 0x93, 0x90, 0xee, 0xfa, 0x21, 0x9b, 0x8e, 0x77, 0xb3, 0x69, + 0x92, 0xb0, 0x34, 0xdf, 0xf5, 0x59, 0x14, 0xb1, 0xb8, 0x97, 0xa4, 0x2c, 0x67, 0xf8, 0x76, 0x61, + 0xd2, 0x13, 0x26, 0xbd, 0xd2, 0xa4, 0x57, 0x98, 0x74, 0xee, 0x94, 0xfe, 0x5e, 0x12, 0xec, 0x7a, + 0x71, 0xcc, 0x72, 0x2f, 0x0f, 0x58, 0x9c, 0x15, 0xae, 0x9d, 0x7b, 0xe5, 0xae, 0x90, 0x4e, 0xa6, + 0xef, 0x76, 0xf3, 0x20, 0xa2, 0x59, 0xee, 0x45, 0x49, 0x61, 0xd0, 0xfd, 0xa7, 0x0a, 0x1b, 0x76, + 0x11, 0xb1, 0xef, 0xfb, 0x6c, 0x1a, 0xe7, 0x18, 0x43, 0x35, 0xf6, 0x22, 0xaa, 0x48, 0xdb, 0xd2, + 0x4e, 0xd3, 0x12, 0xdf, 0xf8, 0x2e, 0x80, 0x57, 0x6c, 0xbb, 0xc1, 0x58, 0x91, 0xc5, 0x4e, 0xb3, + 0xd4, 0x68, 0x63, 0xfc, 0x7f, 0xd8, 0x10, 0xc9, 0xb9, 0x29, 0xcd, 0xd8, 0x34, 0xf5, 0xa9, 0x52, + 0x11, 0x26, 0x6d, 0xa1, 0xb5, 0x4a, 0x25, 0xbe, 0x0f, 0xeb, 0xe3, 0x20, 0x4b, 0x42, 0x6f, 0xe6, + 0x8a, 0x13, 0xaa, 0xc2, 0xa8, 0x55, 0xea, 0x74, 0x7e, 0xd0, 0x10, 0x6a, 0x59, 0xee, 0xe5, 0x54, + 0xa9, 0x6d, 0x4b, 0x3b, 0x1b, 0x7b, 0x8f, 0x7a, 0x97, 0xd4, 0xde, 0x3b, 0x9b, 0x78, 0xcf, 0xe6, + 0x8e, 0x56, 0xe1, 0x8f, 0x9f, 0x43, 0xcb, 0x4f, 0xa9, 0x97, 0x53, 0x97, 0x97, 0xac, 0xac, 0x6d, + 0x4b, 0x3b, 0xad, 0xbd, 0xce, 0x3c, 0xdc, 0xbc, 0x1f, 0x3d, 0x67, 0xde, 0x0f, 0x0b, 0x0a, 0x73, + 0xae, 0xc0, 0x0f, 0x61, 0xeb, 0x24, 0x08, 0xc3, 0x20, 0x9e, 0xb8, 0xf3, 0xb2, 0x45, 0xc2, 0x75, + 0x91, 0x30, 0x2e, 0xf7, 0xca, 0x73, 0x45, 0xde, 0x3b, 0x80, 0xa6, 0x71, 0xf0, 0x6e, 0xe6, 0x2e, + 0xb5, 0xa9, 0x21, 0xac, 0x37, 0x84, 0xbe, 0xbf, 0xe8, 0xd5, 0xf7, 0xd0, 0x4e, 0xd2, 0xc0, 0xe7, + 0xb1, 0x23, 0x36, 0xa6, 0xa1, 0xd2, 0x14, 0x95, 0x3e, 0x5d, 0xa5, 0x52, 0xb3, 0x08, 0x70, 0xc8, + 0xfd, 0xad, 0xf5, 0x64, 0x49, 0xea, 0x1e, 0x42, 0x4d, 0xf4, 0x01, 0xdf, 0x80, 0xeb, 0xb6, 0xd3, + 0x77, 0x88, 0x7b, 0xa4, 0xdb, 0x26, 0x51, 0xb5, 0x03, 0x8d, 0x0c, 0xd0, 0x35, 0x0c, 0xb0, 0xd6, + 0x57, 0x1d, 0xed, 0x15, 0x41, 0x12, 0x6e, 0x41, 0xdd, 0x24, 0xfa, 0x40, 0xd3, 0x87, 0x48, 0xc6, + 0x5b, 0x80, 0x4a, 0xc1, 0x1d, 0x90, 0x11, 0x71, 0x34, 0x43, 0x47, 0x95, 0xee, 0x10, 0xd6, 0x97, + 0x0f, 0xc3, 0xb7, 0xe0, 0x86, 0x69, 0x69, 0x2a, 0xb7, 0x3a, 0x34, 0x06, 0x64, 0xe4, 0x1e, 0xe9, + 0x2f, 0x75, 0xe3, 0x58, 0x47, 0xd7, 0xf0, 0x3a, 0x34, 0xcc, 0xbe, 0xfa, 0xb2, 0x3f, 0x24, 0x36, + 0x92, 0xf0, 0x06, 0xc0, 0x91, 0x4d, 0x2c, 0xd7, 0x32, 0x46, 0xc4, 0x46, 0x72, 0xf7, 0x8f, 0x3a, + 0x54, 0x55, 0x2f, 0xa3, 0x17, 0xc2, 0xeb, 0x3c, 0x30, 0xe4, 0x4f, 0x81, 0xb1, 0x0d, 0xad, 0x31, + 0xcd, 0xfc, 0x34, 0x48, 0x38, 0xbe, 0x4b, 0x7c, 0x2d, 0xab, 0xf0, 0x1d, 0x68, 0xfa, 0x2c, 0x4a, + 0x58, 0x4c, 0xe3, 0xbc, 0x84, 0xd6, 0xa9, 0x02, 0x77, 0x61, 0x3d, 0x9b, 0x9e, 0x9c, 0x1a, 0xd4, + 0x84, 0xc1, 0x19, 0x1d, 0x7e, 0x00, 0x9b, 0x7e, 0x18, 0xd0, 0x38, 0x17, 0x98, 0xf9, 0x99, 0xc5, + 0x05, 0x6e, 0x9a, 0xd6, 0x46, 0xa1, 0x76, 0x4a, 0x2d, 0xcf, 0xd7, 0xf7, 0x5d, 0x6f, 0x3c, 0x4e, + 0x69, 0x96, 0xd1, 0x4c, 0xa9, 0x6f, 0x57, 0x78, 0x36, 0xbe, 0xdf, 0x9f, 0xab, 0x38, 0x63, 0x92, + 0x94, 0xfd, 0x48, 0xfd, 0x25, 0x28, 0x34, 0x4b, 0x8d, 0x36, 0xc6, 0xfb, 0xb0, 0x16, 0x64, 0xd9, + 0x94, 0x66, 0x0a, 0x6c, 0x57, 0x76, 0x5a, 0x7b, 0x5f, 0x5d, 0x7a, 0xfd, 0xea, 0x34, 0xcb, 0x59, + 0x44, 0x53, 0x8d, 0xbb, 0x58, 0xa5, 0x27, 0x3e, 0x80, 0x46, 0x92, 0x06, 0x2c, 0x0d, 0xf2, 0x99, + 0xd2, 0x12, 0x20, 0xba, 0x22, 0x8a, 0x97, 0x51, 0x0e, 0x1d, 0xe1, 0x61, 0x2d, 0x7c, 0xf1, 0x8b, + 0x39, 0xe7, 0xd6, 0x45, 0x90, 0x07, 0x57, 0x07, 0xb9, 0x8c, 0x69, 0xed, 0x95, 0x98, 0xf6, 0x1c, + 0x5a, 0xd3, 0x64, 0xbc, 0x70, 0xde, 0xb8, 0xda, 0xb9, 0x30, 0x17, 0xce, 0xff, 0x83, 0xb6, 0x08, + 0xc5, 0x52, 0x97, 0x46, 0x5e, 0x10, 0x2a, 0x9b, 0xc5, 0xa5, 0x96, 0x4a, 0xc2, 0x75, 0xb8, 0x03, + 0x0d, 0xdf, 0xcb, 0xe9, 0x84, 0xa5, 0x33, 0x05, 0x89, 0xfd, 0x85, 0xdc, 0x1d, 0x41, 0x63, 0xde, + 0x0f, 0xac, 0xc0, 0x96, 0x69, 0x69, 0x86, 0xa5, 0x39, 0x6f, 0xce, 0x51, 0x66, 0x0d, 0x64, 0xf3, + 0x21, 0x92, 0xc4, 0xfa, 0x08, 0xc9, 0x62, 0xdd, 0x43, 0x15, 0xb1, 0x3e, 0x46, 0x55, 0xb1, 0x3e, + 0x41, 0xb5, 0xee, 0xdf, 0xd2, 0x15, 0xdc, 0xab, 0x43, 0x45, 0x27, 0xc7, 0x48, 0xe2, 0x54, 0xe9, + 0xdb, 0xb6, 0x36, 0xd4, 0xc9, 0x00, 0xc9, 0xf8, 0xbf, 0xd0, 0xd1, 0x74, 0xd7, 0xb4, 0x8c, 0xa1, + 0x45, 0x6c, 0xdb, 0x1d, 0x1a, 0xc6, 0x70, 0x44, 0x5c, 0xfb, 0xc8, 0x34, 0x0d, 0xcb, 0x41, 0x15, + 0xdc, 0x81, 0x9b, 0x17, 0xec, 0x13, 0x7d, 0x88, 0xaa, 0xf8, 0x36, 0xfc, 0x67, 0x79, 0x4f, 0x70, + 0xd1, 0xd5, 0x6c, 0xfb, 0x88, 0xa0, 0x1a, 0xbe, 0x0f, 0x77, 0x8f, 0xfb, 0x9a, 0xc3, 0xc9, 0x7a, + 0x60, 0x58, 0xae, 0x7a, 0x64, 0x3b, 0xc6, 0x21, 0xe7, 0x24, 0xb1, 0x4d, 0x43, 0xb7, 0x09, 0x5a, + 0xe3, 0xac, 0xb7, 0x8d, 0xd1, 0x11, 0x67, 0xbb, 0x6b, 0x1c, 0x1c, 0x10, 0x8b, 0x0c, 0x50, 0x9d, + 0x3f, 0x12, 0xea, 0xc8, 0xb0, 0xc9, 0x00, 0x35, 0xba, 0xbf, 0x55, 0xa0, 0x7d, 0x06, 0x7f, 0xf8, + 0x16, 0x34, 0x04, 0x02, 0x39, 0xb0, 0x0b, 0x16, 0xd7, 0x85, 0xac, 0x8d, 0xf1, 0xcb, 0x39, 0x94, + 0x64, 0x01, 0xa5, 0x6f, 0x3e, 0x1f, 0xd5, 0x3d, 0xf1, 0x7b, 0x19, 0xb0, 0x2a, 0x2b, 0x01, 0xeb, + 0x05, 0xac, 0xf3, 0x61, 0x14, 0x7e, 0x28, 0xbd, 0xab, 0x57, 0x7a, 0xb7, 0x4a, 0xfb, 0x8b, 0x70, + 0x59, 0x5b, 0x05, 0x97, 0xdd, 0xf7, 0x00, 0xa7, 0xd5, 0x88, 0x2b, 0xe2, 0x17, 0xe2, 0x5e, 0x04, + 0x89, 0x06, 0x54, 0x0d, 0x93, 0xe8, 0x48, 0xc2, 0x9b, 0xd0, 0x5a, 0xba, 0x49, 0x24, 0xe3, 0x26, + 0xd4, 0x0e, 0xb4, 0xd7, 0x64, 0x80, 0x2a, 0x1c, 0x2f, 0xc7, 0x86, 0xee, 0xb8, 0x07, 0xda, 0x6b, + 0x54, 0xe5, 0xd2, 0x2b, 0x62, 0x15, 0x11, 0x6a, 0xdd, 0x3f, 0x25, 0x68, 0x95, 0xe3, 0xc1, 0x62, + 0x21, 0xc5, 0x5b, 0x50, 0x2b, 0xc8, 0x50, 0x5c, 0x4d, 0x21, 0xe0, 0x3e, 0x54, 0x53, 0x16, 0xce, + 0xef, 0xe5, 0xeb, 0xcf, 0x19, 0x36, 0x3c, 0x5a, 0x8f, 0xff, 0x58, 0xc2, 0xb5, 0xfb, 0x1d, 0x54, + 0xcb, 0x03, 0x10, 0x7f, 0xd4, 0xcf, 0x15, 0xd2, 0x84, 0xda, 0x7e, 0xdf, 0xd6, 0x54, 0x24, 0xe1, + 0x36, 0x34, 0x07, 0xe4, 0x15, 0x19, 0x19, 0x26, 0xb1, 0x90, 0xcc, 0x45, 0xfe, 0xd5, 0x2f, 0x26, + 0x8a, 0x40, 0x9c, 0xe6, 0x10, 0xd7, 0x22, 0x23, 0xad, 0xbf, 0xaf, 0x8d, 0x34, 0xe7, 0x0d, 0xaa, + 0x76, 0x7f, 0x91, 0xa0, 0xae, 0xb2, 0x28, 0xa2, 0xc5, 0x1f, 0x20, 0x39, 0xfd, 0x98, 0xcf, 0x27, + 0x04, 0xff, 0x3e, 0x8f, 0x05, 0x79, 0x25, 0x2c, 0xdc, 0x84, 0x35, 0x6f, 0x9a, 0xff, 0xc0, 0xd2, + 0x72, 0x6c, 0x94, 0xd2, 0x62, 0x14, 0x55, 0x4f, 0x47, 0x51, 0xf7, 0xf7, 0x1a, 0xb4, 0xc5, 0xe5, + 0x39, 0xde, 0x47, 0x16, 0xb3, 0x68, 0x86, 0xdf, 0x02, 0x94, 0x0f, 0x46, 0x40, 0x33, 0x45, 0x12, + 0xcf, 0xf5, 0xb3, 0x4b, 0x1b, 0x78, 0xc6, 0xbf, 0xa7, 0x2e, 0x9c, 0x49, 0x9c, 0xa7, 0x33, 0x6b, + 0x29, 0x5a, 0xe7, 0x2f, 0x09, 0x9a, 0xea, 0x62, 0xfe, 0x9c, 0x1f, 0x83, 0xd2, 0xa7, 0x63, 0xf0, + 0x0e, 0x34, 0x43, 0x2f, 0x9e, 0x4c, 0xbd, 0x09, 0xcd, 0x14, 0x59, 0x8c, 0x9d, 0x53, 0x05, 0x7f, + 0xeb, 0x72, 0x1a, 0x25, 0x21, 0x67, 0x60, 0x51, 0xea, 0x42, 0xc6, 0x6f, 0xa1, 0xbd, 0x3c, 0xec, + 0x32, 0xa5, 0x2a, 0x2a, 0x79, 0xb2, 0x4a, 0x25, 0x73, 0x67, 0xeb, 0x6c, 0xa8, 0xce, 0xaf, 0x32, + 0x34, 0xca, 0x32, 0x67, 0x9f, 0x53, 0x85, 0x0f, 0xb0, 0x94, 0x88, 0x2c, 0x12, 0x51, 0x57, 0x6f, + 0xe9, 0x52, 0x46, 0x8b, 0xde, 0x9e, 0x26, 0x95, 0xc1, 0xe6, 0xb9, 0x6d, 0x8c, 0xa0, 0xf2, 0x9e, + 0xce, 0xca, 0x8c, 0xf8, 0x27, 0xfe, 0x16, 0x6a, 0x1f, 0xbc, 0x70, 0x3a, 0x47, 0xd4, 0x97, 0x75, + 0xa3, 0x08, 0xf1, 0x4c, 0x7e, 0x2a, 0x75, 0x52, 0xd8, 0x3c, 0x77, 0xdf, 0x17, 0x1c, 0xaa, 0x9d, + 0x3d, 0xf4, 0xf1, 0x17, 0x54, 0xbe, 0x74, 0xe6, 0xfe, 0x4f, 0x70, 0xcf, 0x67, 0xd1, 0x65, 0x41, + 0xf6, 0xaf, 0xab, 0x5c, 0x5b, 0x12, 0xdb, 0xe4, 0x6c, 0x79, 0xdb, 0x2f, 0xed, 0x27, 0x8c, 0xe3, + 0xa7, 0xc7, 0xd2, 0xc9, 0xee, 0x84, 0xc6, 0x82, 0x49, 0xbb, 0xc5, 0x96, 0x97, 0x04, 0xd9, 0x85, + 0xff, 0x97, 0x3c, 0x2f, 0x96, 0x93, 0x35, 0x61, 0xfd, 0xf8, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x0c, 0xd0, 0x7b, 0x46, 0xc4, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/support/v1alpha1/cloud_support.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/support/v1alpha1/cloud_support.pb.go new file mode 100644 index 000000000..80ae418c0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/support/v1alpha1/cloud_support.pb.go @@ -0,0 +1,806 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/support/v1alpha1/cloud_support.proto + +/* +Package support is a generated protocol buffer package. + +It is generated from these files: + google/cloud/support/v1alpha1/cloud_support.proto + +It has these top-level messages: + GetSupportAccountRequest + ListSupportAccountsRequest + ListSupportAccountsResponse + GetCaseRequest + ListCasesRequest + ListCasesResponse + ListCommentsRequest + ListCommentsResponse + CreateCaseRequest + UpdateCaseRequest + CreateCommentRequest + GetIssueTaxonomyRequest +*/ +package support + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_cloud_support_common "google.golang.org/genproto/googleapis/cloud/support/common" +import _ "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message for `GetSupportAccount`. +type GetSupportAccountRequest struct { + // The resource name of the support accounts. For example: + // `supportAccounts/accountA`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetSupportAccountRequest) Reset() { *m = GetSupportAccountRequest{} } +func (m *GetSupportAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetSupportAccountRequest) ProtoMessage() {} +func (*GetSupportAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GetSupportAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for `ListSupportAccount`. +type ListSupportAccountsRequest struct { + // The filter applied to search results. It only supports filtering a support + // account list by a cloud_resource. For example, to filter results by support + // accounts associated with an Organization, its value should be: + // "cloud_resource:organizations/" + Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + // Maximum number of accounts fetched with each request. + PageSize int64 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying the page of results to return. If unspecified, the + // first page is retrieved. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListSupportAccountsRequest) Reset() { *m = ListSupportAccountsRequest{} } +func (m *ListSupportAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSupportAccountsRequest) ProtoMessage() {} +func (*ListSupportAccountsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ListSupportAccountsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListSupportAccountsRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSupportAccountsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response message for `ListSupportAccount`. +type ListSupportAccountsResponse struct { + // A list of support accounts. + Accounts []*google_cloud_support_common.SupportAccount `protobuf:"bytes,1,rep,name=accounts" json:"accounts,omitempty"` + // A token to retrieve the next page of results. This should be passed on in + // `page_token` field of `ListSupportAccountRequest` for next request. If + // unspecified, there are no more results to retrieve. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListSupportAccountsResponse) Reset() { *m = ListSupportAccountsResponse{} } +func (m *ListSupportAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSupportAccountsResponse) ProtoMessage() {} +func (*ListSupportAccountsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListSupportAccountsResponse) GetAccounts() []*google_cloud_support_common.SupportAccount { + if m != nil { + return m.Accounts + } + return nil +} + +func (m *ListSupportAccountsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request message for `GetCase` method. +type GetCaseRequest struct { + // Name of case resource requested. + // For example: "supportAccounts/accountA/cases/123" + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetCaseRequest) Reset() { *m = GetCaseRequest{} } +func (m *GetCaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetCaseRequest) ProtoMessage() {} +func (*GetCaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetCaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for `ListCase` method. +type ListCasesRequest struct { + // Name of the account resource for which cases are requested. For example: + // "supportAccounts/accountA" + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The filter applied to the search results. Currently it only accepts "OPEN" + // or "CLOSED" strings, filtering out cases that are open or resolved. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Maximum number of cases fetched with each request. + PageSize int64 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying the page of results to return. If unspecified, the + // first page is retrieved. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListCasesRequest) Reset() { *m = ListCasesRequest{} } +func (m *ListCasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListCasesRequest) ProtoMessage() {} +func (*ListCasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListCasesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListCasesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListCasesRequest) GetPageSize() int64 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response message for `ListCase` method. +type ListCasesResponse struct { + // A list of cases. + Cases []*google_cloud_support_common.Case `protobuf:"bytes,1,rep,name=cases" json:"cases,omitempty"` + // A token to retrieve the next page of results. This should be passed on in + // `page_token` field of `ListCaseRequest` for next request. If unspecified, + // there are no more results to retrieve. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListCasesResponse) Reset() { *m = ListCasesResponse{} } +func (m *ListCasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListCasesResponse) ProtoMessage() {} +func (*ListCasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListCasesResponse) GetCases() []*google_cloud_support_common.Case { + if m != nil { + return m.Cases + } + return nil +} + +func (m *ListCasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request message for `ListComments` method. +type ListCommentsRequest struct { + // The resource name of case for which comments should be listed. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ListCommentsRequest) Reset() { *m = ListCommentsRequest{} } +func (m *ListCommentsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCommentsRequest) ProtoMessage() {} +func (*ListCommentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ListCommentsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The response message for `ListComments` method. +type ListCommentsResponse struct { + // A list of comments. + Comments []*google_cloud_support_common.Comment `protobuf:"bytes,1,rep,name=comments" json:"comments,omitempty"` +} + +func (m *ListCommentsResponse) Reset() { *m = ListCommentsResponse{} } +func (m *ListCommentsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCommentsResponse) ProtoMessage() {} +func (*ListCommentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListCommentsResponse) GetComments() []*google_cloud_support_common.Comment { + if m != nil { + return m.Comments + } + return nil +} + +// The request message for `CreateCase` method. +type CreateCaseRequest struct { + // The resource name for `SupportAccount` under which this case is created. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The case resource to create. + Case *google_cloud_support_common.Case `protobuf:"bytes,2,opt,name=case" json:"case,omitempty"` +} + +func (m *CreateCaseRequest) Reset() { *m = CreateCaseRequest{} } +func (m *CreateCaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCaseRequest) ProtoMessage() {} +func (*CreateCaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CreateCaseRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateCaseRequest) GetCase() *google_cloud_support_common.Case { + if m != nil { + return m.Case + } + return nil +} + +// The request message for `UpdateCase` method. +type UpdateCaseRequest struct { + // The case resource to update. + Case *google_cloud_support_common.Case `protobuf:"bytes,1,opt,name=case" json:"case,omitempty"` + // A field that represents attributes of a Case object that should be updated + // as part of this request. + UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateCaseRequest) Reset() { *m = UpdateCaseRequest{} } +func (m *UpdateCaseRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateCaseRequest) ProtoMessage() {} +func (*UpdateCaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *UpdateCaseRequest) GetCase() *google_cloud_support_common.Case { + if m != nil { + return m.Case + } + return nil +} + +func (m *UpdateCaseRequest) GetUpdateMask() *google_protobuf3.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// The request message for `CreateComment` method. +type CreateCommentRequest struct { + // The resource name of case to which this comment should be added. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The `Comment` to be added to this case. + Comment *google_cloud_support_common.Comment `protobuf:"bytes,2,opt,name=comment" json:"comment,omitempty"` +} + +func (m *CreateCommentRequest) Reset() { *m = CreateCommentRequest{} } +func (m *CreateCommentRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCommentRequest) ProtoMessage() {} +func (*CreateCommentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CreateCommentRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateCommentRequest) GetComment() *google_cloud_support_common.Comment { + if m != nil { + return m.Comment + } + return nil +} + +// The request message for `GetIssueTaxonomy` method. +type GetIssueTaxonomyRequest struct { +} + +func (m *GetIssueTaxonomyRequest) Reset() { *m = GetIssueTaxonomyRequest{} } +func (m *GetIssueTaxonomyRequest) String() string { return proto.CompactTextString(m) } +func (*GetIssueTaxonomyRequest) ProtoMessage() {} +func (*GetIssueTaxonomyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func init() { + proto.RegisterType((*GetSupportAccountRequest)(nil), "google.cloud.support.v1alpha1.GetSupportAccountRequest") + proto.RegisterType((*ListSupportAccountsRequest)(nil), "google.cloud.support.v1alpha1.ListSupportAccountsRequest") + proto.RegisterType((*ListSupportAccountsResponse)(nil), "google.cloud.support.v1alpha1.ListSupportAccountsResponse") + proto.RegisterType((*GetCaseRequest)(nil), "google.cloud.support.v1alpha1.GetCaseRequest") + proto.RegisterType((*ListCasesRequest)(nil), "google.cloud.support.v1alpha1.ListCasesRequest") + proto.RegisterType((*ListCasesResponse)(nil), "google.cloud.support.v1alpha1.ListCasesResponse") + proto.RegisterType((*ListCommentsRequest)(nil), "google.cloud.support.v1alpha1.ListCommentsRequest") + proto.RegisterType((*ListCommentsResponse)(nil), "google.cloud.support.v1alpha1.ListCommentsResponse") + proto.RegisterType((*CreateCaseRequest)(nil), "google.cloud.support.v1alpha1.CreateCaseRequest") + proto.RegisterType((*UpdateCaseRequest)(nil), "google.cloud.support.v1alpha1.UpdateCaseRequest") + proto.RegisterType((*CreateCommentRequest)(nil), "google.cloud.support.v1alpha1.CreateCommentRequest") + proto.RegisterType((*GetIssueTaxonomyRequest)(nil), "google.cloud.support.v1alpha1.GetIssueTaxonomyRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CloudSupport service + +type CloudSupportClient interface { + // Retrieves the support account details given an account identifier. + // The authenticated user calling this method must be the account owner. + GetSupportAccount(ctx context.Context, in *GetSupportAccountRequest, opts ...grpc.CallOption) (*google_cloud_support_common.SupportAccount, error) + // Retrieves the list of accounts the current authenticated user has access + // to. + ListSupportAccounts(ctx context.Context, in *ListSupportAccountsRequest, opts ...grpc.CallOption) (*ListSupportAccountsResponse, error) + // Retrieves the details for a support case. The current authenticated user + // calling this method must have permissions to view this case. + GetCase(ctx context.Context, in *GetCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) + // Retrieves the list of support cases associated with an account. The current + // authenticated user must have the permission to list and view these cases. + ListCases(ctx context.Context, in *ListCasesRequest, opts ...grpc.CallOption) (*ListCasesResponse, error) + // Lists all comments from a case. + ListComments(ctx context.Context, in *ListCommentsRequest, opts ...grpc.CallOption) (*ListCommentsResponse, error) + // Creates a case and associates it with a + // [SupportAccount][google.cloud.support.v1alpha2.SupportAcccount]. The + // authenticated user attempting this action must have permissions to create a + // `Case` under that [SupportAccount]. + CreateCase(ctx context.Context, in *CreateCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) + // Updates a support case. Only a small set of details (priority, subject and + // cc_address) can be update after a case is created. + UpdateCase(ctx context.Context, in *UpdateCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) + // Adds a new comment to a case. + CreateComment(ctx context.Context, in *CreateCommentRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Comment, error) + // Retrieves the taxonomy of product categories and components to be used + // while creating a support case. + GetIssueTaxonomy(ctx context.Context, in *GetIssueTaxonomyRequest, opts ...grpc.CallOption) (*google_cloud_support_common.IssueTaxonomy, error) +} + +type cloudSupportClient struct { + cc *grpc.ClientConn +} + +func NewCloudSupportClient(cc *grpc.ClientConn) CloudSupportClient { + return &cloudSupportClient{cc} +} + +func (c *cloudSupportClient) GetSupportAccount(ctx context.Context, in *GetSupportAccountRequest, opts ...grpc.CallOption) (*google_cloud_support_common.SupportAccount, error) { + out := new(google_cloud_support_common.SupportAccount) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/GetSupportAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) ListSupportAccounts(ctx context.Context, in *ListSupportAccountsRequest, opts ...grpc.CallOption) (*ListSupportAccountsResponse, error) { + out := new(ListSupportAccountsResponse) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/ListSupportAccounts", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) GetCase(ctx context.Context, in *GetCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) { + out := new(google_cloud_support_common.Case) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/GetCase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) ListCases(ctx context.Context, in *ListCasesRequest, opts ...grpc.CallOption) (*ListCasesResponse, error) { + out := new(ListCasesResponse) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/ListCases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) ListComments(ctx context.Context, in *ListCommentsRequest, opts ...grpc.CallOption) (*ListCommentsResponse, error) { + out := new(ListCommentsResponse) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/ListComments", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) CreateCase(ctx context.Context, in *CreateCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) { + out := new(google_cloud_support_common.Case) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/CreateCase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) UpdateCase(ctx context.Context, in *UpdateCaseRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Case, error) { + out := new(google_cloud_support_common.Case) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/UpdateCase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) CreateComment(ctx context.Context, in *CreateCommentRequest, opts ...grpc.CallOption) (*google_cloud_support_common.Comment, error) { + out := new(google_cloud_support_common.Comment) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/CreateComment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudSupportClient) GetIssueTaxonomy(ctx context.Context, in *GetIssueTaxonomyRequest, opts ...grpc.CallOption) (*google_cloud_support_common.IssueTaxonomy, error) { + out := new(google_cloud_support_common.IssueTaxonomy) + err := grpc.Invoke(ctx, "/google.cloud.support.v1alpha1.CloudSupport/GetIssueTaxonomy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CloudSupport service + +type CloudSupportServer interface { + // Retrieves the support account details given an account identifier. + // The authenticated user calling this method must be the account owner. + GetSupportAccount(context.Context, *GetSupportAccountRequest) (*google_cloud_support_common.SupportAccount, error) + // Retrieves the list of accounts the current authenticated user has access + // to. + ListSupportAccounts(context.Context, *ListSupportAccountsRequest) (*ListSupportAccountsResponse, error) + // Retrieves the details for a support case. The current authenticated user + // calling this method must have permissions to view this case. + GetCase(context.Context, *GetCaseRequest) (*google_cloud_support_common.Case, error) + // Retrieves the list of support cases associated with an account. The current + // authenticated user must have the permission to list and view these cases. + ListCases(context.Context, *ListCasesRequest) (*ListCasesResponse, error) + // Lists all comments from a case. + ListComments(context.Context, *ListCommentsRequest) (*ListCommentsResponse, error) + // Creates a case and associates it with a + // [SupportAccount][google.cloud.support.v1alpha2.SupportAcccount]. The + // authenticated user attempting this action must have permissions to create a + // `Case` under that [SupportAccount]. + CreateCase(context.Context, *CreateCaseRequest) (*google_cloud_support_common.Case, error) + // Updates a support case. Only a small set of details (priority, subject and + // cc_address) can be update after a case is created. + UpdateCase(context.Context, *UpdateCaseRequest) (*google_cloud_support_common.Case, error) + // Adds a new comment to a case. + CreateComment(context.Context, *CreateCommentRequest) (*google_cloud_support_common.Comment, error) + // Retrieves the taxonomy of product categories and components to be used + // while creating a support case. + GetIssueTaxonomy(context.Context, *GetIssueTaxonomyRequest) (*google_cloud_support_common.IssueTaxonomy, error) +} + +func RegisterCloudSupportServer(s *grpc.Server, srv CloudSupportServer) { + s.RegisterService(&_CloudSupport_serviceDesc, srv) +} + +func _CloudSupport_GetSupportAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSupportAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).GetSupportAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/GetSupportAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).GetSupportAccount(ctx, req.(*GetSupportAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_ListSupportAccounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSupportAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).ListSupportAccounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/ListSupportAccounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).ListSupportAccounts(ctx, req.(*ListSupportAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_GetCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).GetCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/GetCase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).GetCase(ctx, req.(*GetCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_ListCases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).ListCases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/ListCases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).ListCases(ctx, req.(*ListCasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_ListComments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCommentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).ListComments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/ListComments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).ListComments(ctx, req.(*ListCommentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_CreateCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).CreateCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/CreateCase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).CreateCase(ctx, req.(*CreateCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_UpdateCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).UpdateCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/UpdateCase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).UpdateCase(ctx, req.(*UpdateCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_CreateComment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCommentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).CreateComment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/CreateComment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).CreateComment(ctx, req.(*CreateCommentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudSupport_GetIssueTaxonomy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIssueTaxonomyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudSupportServer).GetIssueTaxonomy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.support.v1alpha1.CloudSupport/GetIssueTaxonomy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudSupportServer).GetIssueTaxonomy(ctx, req.(*GetIssueTaxonomyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CloudSupport_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.support.v1alpha1.CloudSupport", + HandlerType: (*CloudSupportServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSupportAccount", + Handler: _CloudSupport_GetSupportAccount_Handler, + }, + { + MethodName: "ListSupportAccounts", + Handler: _CloudSupport_ListSupportAccounts_Handler, + }, + { + MethodName: "GetCase", + Handler: _CloudSupport_GetCase_Handler, + }, + { + MethodName: "ListCases", + Handler: _CloudSupport_ListCases_Handler, + }, + { + MethodName: "ListComments", + Handler: _CloudSupport_ListComments_Handler, + }, + { + MethodName: "CreateCase", + Handler: _CloudSupport_CreateCase_Handler, + }, + { + MethodName: "UpdateCase", + Handler: _CloudSupport_UpdateCase_Handler, + }, + { + MethodName: "CreateComment", + Handler: _CloudSupport_CreateComment_Handler, + }, + { + MethodName: "GetIssueTaxonomy", + Handler: _CloudSupport_GetIssueTaxonomy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/support/v1alpha1/cloud_support.proto", +} + +func init() { proto.RegisterFile("google/cloud/support/v1alpha1/cloud_support.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 863 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x41, 0x4f, 0x33, 0x45, + 0x18, 0xce, 0xb4, 0xc8, 0x07, 0x2f, 0xdf, 0xa7, 0x5f, 0x47, 0x82, 0x65, 0x0b, 0x49, 0x3b, 0x21, + 0xa6, 0x56, 0xdd, 0x85, 0x36, 0x88, 0x96, 0x40, 0x14, 0x88, 0x8d, 0x89, 0x26, 0xa4, 0x60, 0x62, + 0xbc, 0x34, 0x43, 0x19, 0xd6, 0x95, 0xee, 0xce, 0xda, 0x99, 0x1a, 0x40, 0xbd, 0x78, 0xf1, 0xa6, + 0x07, 0x6f, 0x7a, 0xe1, 0xe2, 0x59, 0x0f, 0xfe, 0x13, 0xff, 0x82, 0xfe, 0x0f, 0xb3, 0xb3, 0xb3, + 0xed, 0x76, 0x69, 0x77, 0x17, 0x6e, 0xdd, 0x77, 0xde, 0xe7, 0x7d, 0x9f, 0x79, 0xe6, 0x9d, 0x67, + 0x0a, 0x3b, 0x36, 0xe7, 0xf6, 0x80, 0x59, 0xfd, 0x01, 0x1f, 0x5d, 0x5a, 0x62, 0xe4, 0xfb, 0x7c, + 0x28, 0xad, 0x6f, 0x77, 0xe8, 0xc0, 0xff, 0x8a, 0xee, 0x84, 0xe1, 0x9e, 0x0e, 0x9b, 0xfe, 0x90, + 0x4b, 0x8e, 0x37, 0x43, 0x88, 0xa9, 0xd6, 0xcc, 0x68, 0x2d, 0x82, 0x18, 0x1b, 0xba, 0x22, 0xf5, + 0x1d, 0x8b, 0x7a, 0x1e, 0x97, 0x54, 0x3a, 0xdc, 0x13, 0x21, 0xd8, 0xa8, 0xcd, 0xec, 0xd7, 0xe7, + 0xae, 0xcb, 0x3d, 0x9d, 0x52, 0xd1, 0x29, 0xea, 0xeb, 0x62, 0x74, 0x65, 0x31, 0xd7, 0x97, 0xb7, + 0x7a, 0xb1, 0x9a, 0x5c, 0xbc, 0x72, 0xd8, 0xe0, 0xb2, 0xe7, 0x52, 0x71, 0x1d, 0x66, 0x10, 0x13, + 0xca, 0x1d, 0x26, 0xcf, 0xc2, 0xca, 0x1f, 0xf5, 0xfb, 0x7c, 0xe4, 0xc9, 0x2e, 0xfb, 0x66, 0xc4, + 0x84, 0xc4, 0x18, 0x16, 0x3c, 0xea, 0xb2, 0x32, 0xaa, 0xa2, 0xfa, 0x72, 0x57, 0xfd, 0x26, 0x3e, + 0x18, 0x9f, 0x3a, 0x22, 0x01, 0x10, 0x11, 0x62, 0x0d, 0x16, 0xaf, 0x9c, 0x81, 0x64, 0x43, 0x8d, + 0xd1, 0x5f, 0xb8, 0x02, 0xcb, 0x3e, 0xb5, 0x59, 0x4f, 0x38, 0x77, 0xac, 0x5c, 0xa8, 0xa2, 0x7a, + 0xb1, 0xbb, 0x14, 0x04, 0xce, 0x9c, 0x3b, 0x86, 0x37, 0x01, 0xd4, 0xa2, 0xe4, 0xd7, 0xcc, 0x2b, + 0x17, 0x15, 0x50, 0xa5, 0x9f, 0x07, 0x01, 0xf2, 0x33, 0x82, 0xca, 0xcc, 0x96, 0xc2, 0xe7, 0x9e, + 0x60, 0xb8, 0x03, 0x4b, 0x54, 0xc7, 0xca, 0xa8, 0x5a, 0xac, 0xaf, 0x34, 0xdf, 0x36, 0x67, 0x6a, + 0xae, 0x65, 0x4b, 0xec, 0x75, 0x0c, 0xc6, 0x6f, 0xc2, 0x6b, 0x1e, 0xbb, 0x91, 0xbd, 0x18, 0x99, + 0x82, 0x22, 0xf3, 0x22, 0x08, 0x9f, 0x8e, 0x09, 0x6d, 0xc1, 0xab, 0x1d, 0x26, 0x8f, 0xa9, 0x60, + 0x69, 0x42, 0xdd, 0xc1, 0xcb, 0x80, 0x75, 0x90, 0x26, 0x52, 0xf2, 0x62, 0x92, 0x15, 0xe6, 0x4b, + 0x56, 0x4c, 0x95, 0x6c, 0x21, 0x29, 0x99, 0x84, 0x52, 0xac, 0xb7, 0xd6, 0x69, 0x0f, 0x5e, 0xe9, + 0x07, 0x01, 0x2d, 0x52, 0x2d, 0x55, 0x24, 0xb5, 0xbb, 0x30, 0x3f, 0xb7, 0x2e, 0x6f, 0xc1, 0xeb, + 0xaa, 0x2b, 0x77, 0x5d, 0x16, 0x9b, 0x89, 0x59, 0xe2, 0x7c, 0x01, 0xab, 0xd3, 0xa9, 0x9a, 0xe3, + 0x87, 0xb0, 0xd4, 0xd7, 0x31, 0x4d, 0x73, 0x2b, 0x9d, 0x66, 0x98, 0xdc, 0x1d, 0xa3, 0xc8, 0x05, + 0x94, 0x8e, 0x87, 0x8c, 0x4a, 0x16, 0x3f, 0x9f, 0x35, 0x58, 0xf4, 0xe9, 0x90, 0x79, 0x32, 0x1a, + 0xcb, 0xf0, 0x0b, 0xef, 0xc2, 0x42, 0xb0, 0x45, 0xb5, 0x9d, 0x5c, 0x8a, 0xa8, 0x74, 0xf2, 0x13, + 0x82, 0xd2, 0xe7, 0xfe, 0x65, 0xa2, 0x49, 0x54, 0x0c, 0x3d, 0xaa, 0x18, 0xde, 0x87, 0x95, 0x91, + 0xaa, 0xa5, 0x6e, 0xa5, 0xa6, 0x62, 0x44, 0xe8, 0xe8, 0xe2, 0x9a, 0x1f, 0x07, 0x17, 0xf7, 0x33, + 0x2a, 0xae, 0xbb, 0x10, 0xa6, 0x07, 0xbf, 0xc9, 0xd7, 0xb0, 0xaa, 0x77, 0xab, 0x85, 0x48, 0x19, + 0xb4, 0x43, 0x78, 0xa6, 0x55, 0xd2, 0x4d, 0xf2, 0x49, 0x1b, 0x81, 0xc8, 0x3a, 0xbc, 0xd1, 0x61, + 0xf2, 0x13, 0x21, 0x46, 0xec, 0x9c, 0xde, 0x70, 0x8f, 0xbb, 0xb7, 0xba, 0x5d, 0xf3, 0xbf, 0x15, + 0x78, 0x7e, 0x1c, 0x14, 0xd1, 0x77, 0x0b, 0xff, 0x81, 0xa0, 0xf4, 0xc0, 0x56, 0xf0, 0x9e, 0x99, + 0xea, 0x85, 0xe6, 0x3c, 0x23, 0x32, 0x1e, 0x73, 0xa1, 0x49, 0xe3, 0xc7, 0x7f, 0xfe, 0xfd, 0xb5, + 0xb0, 0x85, 0xc9, 0xc4, 0x97, 0xbf, 0x0b, 0x04, 0x38, 0x10, 0xd3, 0x06, 0x62, 0x35, 0x7e, 0xc0, + 0x7f, 0xa2, 0x70, 0x66, 0x13, 0xde, 0x82, 0x3f, 0xc8, 0x60, 0x3a, 0xdf, 0x02, 0x8d, 0xf6, 0x53, + 0xa0, 0xe1, 0xf8, 0x93, 0x9a, 0xa2, 0x5e, 0xc1, 0xeb, 0x13, 0xea, 0x09, 0xd2, 0xf8, 0x17, 0x04, + 0xcf, 0xb4, 0xfb, 0xe0, 0x77, 0xb3, 0xf5, 0x8c, 0x0d, 0xa8, 0x91, 0x3d, 0x92, 0xa4, 0xa9, 0x08, + 0xbc, 0x83, 0x1b, 0x99, 0xda, 0x59, 0xca, 0x1b, 0x02, 0x0d, 0xef, 0x11, 0x2c, 0x8f, 0xdd, 0x06, + 0x5b, 0x39, 0xb6, 0x1f, 0xf7, 0x44, 0x63, 0x3b, 0x3f, 0x40, 0xab, 0xb4, 0xad, 0x48, 0x36, 0x70, + 0x3d, 0xfb, 0x80, 0x43, 0x96, 0xf8, 0x6f, 0x04, 0xcf, 0xe3, 0x7e, 0x83, 0x9b, 0x79, 0x9a, 0x4e, + 0xfb, 0x98, 0xd1, 0x7a, 0x14, 0x46, 0x73, 0xdd, 0x57, 0x5c, 0x77, 0x71, 0x2b, 0xbf, 0xa0, 0x56, + 0xe4, 0x65, 0xf8, 0x77, 0x04, 0x30, 0x31, 0x33, 0x9c, 0xa5, 0xd4, 0x03, 0xdf, 0xcb, 0x73, 0xe2, + 0xef, 0x2b, 0x82, 0x4d, 0x12, 0x3f, 0xf1, 0xd0, 0x1d, 0xe7, 0xca, 0xd9, 0x0e, 0x8d, 0xeb, 0x1e, + 0x01, 0x4c, 0x5c, 0x30, 0x93, 0xdd, 0x03, 0xc3, 0xcc, 0xc3, 0xee, 0x40, 0xb1, 0xdb, 0x6b, 0x5a, + 0x31, 0x76, 0x41, 0x73, 0x33, 0x43, 0x43, 0x4d, 0xf1, 0x2f, 0x04, 0x2f, 0xa6, 0xfc, 0x11, 0xb7, + 0xf2, 0x69, 0x38, 0xe5, 0xa6, 0x46, 0x2e, 0xa3, 0x24, 0x27, 0x8a, 0xeb, 0x21, 0x79, 0xca, 0x51, + 0xb7, 0x23, 0x97, 0xc5, 0xbf, 0x21, 0x78, 0x99, 0xb4, 0x59, 0xfc, 0x5e, 0xf6, 0x45, 0x9f, 0xe5, + 0xcb, 0x46, 0x23, 0x95, 0xf8, 0x14, 0x84, 0x10, 0x45, 0x7f, 0x03, 0x1b, 0x63, 0xfa, 0x6d, 0x3b, + 0x51, 0xf6, 0xe8, 0x7b, 0xa8, 0xf5, 0xb9, 0x9b, 0x4e, 0xe6, 0xa8, 0x14, 0x7f, 0x09, 0x4e, 0x83, + 0xf7, 0xeb, 0xcb, 0x13, 0x8d, 0xb0, 0xf9, 0x80, 0x7a, 0xb6, 0xc9, 0x87, 0xb6, 0x65, 0x33, 0x4f, + 0xbd, 0x6d, 0x56, 0xb8, 0x44, 0x7d, 0x47, 0xcc, 0xf9, 0x57, 0xbd, 0xaf, 0x03, 0x17, 0x8b, 0x0a, + 0xd0, 0xfa, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xad, 0xe1, 0xf2, 0x57, 0x85, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1/video_intelligence.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1/video_intelligence.pb.go new file mode 100644 index 000000000..5ac81f8a3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1/video_intelligence.pb.go @@ -0,0 +1,989 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/videointelligence/v1beta1/video_intelligence.proto + +/* +Package videointelligence is a generated protocol buffer package. + +It is generated from these files: + google/cloud/videointelligence/v1beta1/video_intelligence.proto + +It has these top-level messages: + AnnotateVideoRequest + VideoContext + VideoSegment + LabelLocation + LabelAnnotation + SafeSearchAnnotation + BoundingBox + FaceLocation + FaceAnnotation + VideoAnnotationResults + AnnotateVideoResponse + VideoAnnotationProgress + AnnotateVideoProgress +*/ +package videointelligence + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Video annotation feature. +type Feature int32 + +const ( + // Unspecified. + Feature_FEATURE_UNSPECIFIED Feature = 0 + // Label detection. Detect objects, such as dog or flower. + Feature_LABEL_DETECTION Feature = 1 + // Human face detection and tracking. + Feature_FACE_DETECTION Feature = 2 + // Shot change detection. + Feature_SHOT_CHANGE_DETECTION Feature = 3 + // Safe search detection. + Feature_SAFE_SEARCH_DETECTION Feature = 4 +) + +var Feature_name = map[int32]string{ + 0: "FEATURE_UNSPECIFIED", + 1: "LABEL_DETECTION", + 2: "FACE_DETECTION", + 3: "SHOT_CHANGE_DETECTION", + 4: "SAFE_SEARCH_DETECTION", +} +var Feature_value = map[string]int32{ + "FEATURE_UNSPECIFIED": 0, + "LABEL_DETECTION": 1, + "FACE_DETECTION": 2, + "SHOT_CHANGE_DETECTION": 3, + "SAFE_SEARCH_DETECTION": 4, +} + +func (x Feature) String() string { + return proto.EnumName(Feature_name, int32(x)) +} +func (Feature) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Label level (scope). +type LabelLevel int32 + +const ( + // Unspecified. + LabelLevel_LABEL_LEVEL_UNSPECIFIED LabelLevel = 0 + // Video-level. Corresponds to the whole video. + LabelLevel_VIDEO_LEVEL LabelLevel = 1 + // Segment-level. Corresponds to one of `AnnotateSpec.segments`. + LabelLevel_SEGMENT_LEVEL LabelLevel = 2 + // Shot-level. Corresponds to a single shot (i.e. a series of frames + // without a major camera position or background change). + LabelLevel_SHOT_LEVEL LabelLevel = 3 + // Frame-level. Corresponds to a single video frame. + LabelLevel_FRAME_LEVEL LabelLevel = 4 +) + +var LabelLevel_name = map[int32]string{ + 0: "LABEL_LEVEL_UNSPECIFIED", + 1: "VIDEO_LEVEL", + 2: "SEGMENT_LEVEL", + 3: "SHOT_LEVEL", + 4: "FRAME_LEVEL", +} +var LabelLevel_value = map[string]int32{ + "LABEL_LEVEL_UNSPECIFIED": 0, + "VIDEO_LEVEL": 1, + "SEGMENT_LEVEL": 2, + "SHOT_LEVEL": 3, + "FRAME_LEVEL": 4, +} + +func (x LabelLevel) String() string { + return proto.EnumName(LabelLevel_name, int32(x)) +} +func (LabelLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Label detection mode. +type LabelDetectionMode int32 + +const ( + // Unspecified. + LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0 + // Detect shot-level labels. + LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1 + // Detect frame-level labels. + LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2 + // Detect both shot-level and frame-level labels. + LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3 +) + +var LabelDetectionMode_name = map[int32]string{ + 0: "LABEL_DETECTION_MODE_UNSPECIFIED", + 1: "SHOT_MODE", + 2: "FRAME_MODE", + 3: "SHOT_AND_FRAME_MODE", +} +var LabelDetectionMode_value = map[string]int32{ + "LABEL_DETECTION_MODE_UNSPECIFIED": 0, + "SHOT_MODE": 1, + "FRAME_MODE": 2, + "SHOT_AND_FRAME_MODE": 3, +} + +func (x LabelDetectionMode) String() string { + return proto.EnumName(LabelDetectionMode_name, int32(x)) +} +func (LabelDetectionMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Bucketized representation of likelihood. +type Likelihood int32 + +const ( + // Unknown likelihood. + Likelihood_UNKNOWN Likelihood = 0 + // Very unlikely. + Likelihood_VERY_UNLIKELY Likelihood = 1 + // Unlikely. + Likelihood_UNLIKELY Likelihood = 2 + // Possible. + Likelihood_POSSIBLE Likelihood = 3 + // Likely. + Likelihood_LIKELY Likelihood = 4 + // Very likely. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "UNKNOWN": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// Video annotation request. +type AnnotateVideoRequest struct { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // The video data bytes. Encoding: base64. If unset, the input video(s) + // should be specified via `input_uri`. If set, `input_uri` should be unset. + InputContent string `protobuf:"bytes,6,opt,name=input_content,json=inputContent" json:"input_content,omitempty"` + // Requested video annotation features. + Features []Feature `protobuf:"varint,2,rep,packed,name=features,enum=google.cloud.videointelligence.v1beta1.Feature" json:"features,omitempty"` + // Additional video context and/or feature-specific parameters. + VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext" json:"video_context,omitempty"` + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId" json:"location_id,omitempty"` +} + +func (m *AnnotateVideoRequest) Reset() { *m = AnnotateVideoRequest{} } +func (m *AnnotateVideoRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoRequest) ProtoMessage() {} +func (*AnnotateVideoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AnnotateVideoRequest) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetInputContent() string { + if m != nil { + return m.InputContent + } + return "" +} + +func (m *AnnotateVideoRequest) GetFeatures() []Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext { + if m != nil { + return m.VideoContext + } + return nil +} + +func (m *AnnotateVideoRequest) GetOutputUri() string { + if m != nil { + return m.OutputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetLocationId() string { + if m != nil { + return m.LocationId + } + return "" +} + +// Video context and/or feature-specific parameters. +type VideoContext struct { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video + // is treated as a single segment. + Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"` + // If label detection has been requested, what labels should be detected + // in addition to video-level labels or segment-level labels. If unspecified, + // defaults to `SHOT_MODE`. + LabelDetectionMode LabelDetectionMode `protobuf:"varint,2,opt,name=label_detection_mode,json=labelDetectionMode,enum=google.cloud.videointelligence.v1beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"` + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + StationaryCamera bool `protobuf:"varint,3,opt,name=stationary_camera,json=stationaryCamera" json:"stationary_camera,omitempty"` + // Model to use for label detection. + // Supported values: "latest" and "stable" (the default). + LabelDetectionModel string `protobuf:"bytes,4,opt,name=label_detection_model,json=labelDetectionModel" json:"label_detection_model,omitempty"` + // Model to use for face detection. + // Supported values: "latest" and "stable" (the default). + FaceDetectionModel string `protobuf:"bytes,5,opt,name=face_detection_model,json=faceDetectionModel" json:"face_detection_model,omitempty"` + // Model to use for shot change detection. + // Supported values: "latest" and "stable" (the default). + ShotChangeDetectionModel string `protobuf:"bytes,6,opt,name=shot_change_detection_model,json=shotChangeDetectionModel" json:"shot_change_detection_model,omitempty"` + // Model to use for safe search detection. + // Supported values: "latest" and "stable" (the default). + SafeSearchDetectionModel string `protobuf:"bytes,7,opt,name=safe_search_detection_model,json=safeSearchDetectionModel" json:"safe_search_detection_model,omitempty"` +} + +func (m *VideoContext) Reset() { *m = VideoContext{} } +func (m *VideoContext) String() string { return proto.CompactTextString(m) } +func (*VideoContext) ProtoMessage() {} +func (*VideoContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *VideoContext) GetSegments() []*VideoSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *VideoContext) GetLabelDetectionMode() LabelDetectionMode { + if m != nil { + return m.LabelDetectionMode + } + return LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED +} + +func (m *VideoContext) GetStationaryCamera() bool { + if m != nil { + return m.StationaryCamera + } + return false +} + +func (m *VideoContext) GetLabelDetectionModel() string { + if m != nil { + return m.LabelDetectionModel + } + return "" +} + +func (m *VideoContext) GetFaceDetectionModel() string { + if m != nil { + return m.FaceDetectionModel + } + return "" +} + +func (m *VideoContext) GetShotChangeDetectionModel() string { + if m != nil { + return m.ShotChangeDetectionModel + } + return "" +} + +func (m *VideoContext) GetSafeSearchDetectionModel() string { + if m != nil { + return m.SafeSearchDetectionModel + } + return "" +} + +// Video segment. +type VideoSegment struct { + // Start offset in microseconds (inclusive). Unset means 0. + StartTimeOffset int64 `protobuf:"varint,1,opt,name=start_time_offset,json=startTimeOffset" json:"start_time_offset,omitempty"` + // End offset in microseconds (inclusive). Unset means 0. + EndTimeOffset int64 `protobuf:"varint,2,opt,name=end_time_offset,json=endTimeOffset" json:"end_time_offset,omitempty"` +} + +func (m *VideoSegment) Reset() { *m = VideoSegment{} } +func (m *VideoSegment) String() string { return proto.CompactTextString(m) } +func (*VideoSegment) ProtoMessage() {} +func (*VideoSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *VideoSegment) GetStartTimeOffset() int64 { + if m != nil { + return m.StartTimeOffset + } + return 0 +} + +func (m *VideoSegment) GetEndTimeOffset() int64 { + if m != nil { + return m.EndTimeOffset + } + return 0 +} + +// Label location. +type LabelLocation struct { + // Video segment. Set to [-1, -1] for video-level labels. + // Set to [timestamp, timestamp] for frame-level labels. + // Otherwise, corresponds to one of `AnnotateSpec.segments` + // (if specified) or to shot boundaries (if requested). + Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` + // Confidence that the label is accurate. Range: [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // Label level. + Level LabelLevel `protobuf:"varint,3,opt,name=level,enum=google.cloud.videointelligence.v1beta1.LabelLevel" json:"level,omitempty"` +} + +func (m *LabelLocation) Reset() { *m = LabelLocation{} } +func (m *LabelLocation) String() string { return proto.CompactTextString(m) } +func (*LabelLocation) ProtoMessage() {} +func (*LabelLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *LabelLocation) GetSegment() *VideoSegment { + if m != nil { + return m.Segment + } + return nil +} + +func (m *LabelLocation) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *LabelLocation) GetLevel() LabelLevel { + if m != nil { + return m.Level + } + return LabelLevel_LABEL_LEVEL_UNSPECIFIED +} + +// Label annotation. +type LabelAnnotation struct { + // Textual description, e.g. `Fixed-gear bicycle`. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // Language code for `description` in BCP-47 format. + LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Where the label was detected and with what confidence. + Locations []*LabelLocation `protobuf:"bytes,3,rep,name=locations" json:"locations,omitempty"` +} + +func (m *LabelAnnotation) Reset() { *m = LabelAnnotation{} } +func (m *LabelAnnotation) String() string { return proto.CompactTextString(m) } +func (*LabelAnnotation) ProtoMessage() {} +func (*LabelAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *LabelAnnotation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *LabelAnnotation) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *LabelAnnotation) GetLocations() []*LabelLocation { + if m != nil { + return m.Locations + } + return nil +} + +// Safe search annotation (based on per-frame visual signals only). +// If no unsafe content has been detected in a frame, no annotations +// are present for that frame. If only some types of unsafe content +// have been detected in a frame, the likelihood is set to `UNKNOWN` +// for all other types of unsafe content. +type SafeSearchAnnotation struct { + // Likelihood of adult content. + Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"adult,omitempty"` + // Likelihood that an obvious modification was made to the original + // version to make it appear funny or offensive. + Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"spoof,omitempty"` + // Likelihood of medical content. + Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"medical,omitempty"` + // Likelihood of violent content. + Violent Likelihood `protobuf:"varint,4,opt,name=violent,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"violent,omitempty"` + // Likelihood of racy content. + Racy Likelihood `protobuf:"varint,5,opt,name=racy,enum=google.cloud.videointelligence.v1beta1.Likelihood" json:"racy,omitempty"` + // Video time offset in microseconds. + TimeOffset int64 `protobuf:"varint,6,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` +} + +func (m *SafeSearchAnnotation) Reset() { *m = SafeSearchAnnotation{} } +func (m *SafeSearchAnnotation) String() string { return proto.CompactTextString(m) } +func (*SafeSearchAnnotation) ProtoMessage() {} +func (*SafeSearchAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *SafeSearchAnnotation) GetAdult() Likelihood { + if m != nil { + return m.Adult + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetSpoof() Likelihood { + if m != nil { + return m.Spoof + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetMedical() Likelihood { + if m != nil { + return m.Medical + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetViolent() Likelihood { + if m != nil { + return m.Violent + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetRacy() Likelihood { + if m != nil { + return m.Racy + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetTimeOffset() int64 { + if m != nil { + return m.TimeOffset + } + return 0 +} + +// Bounding box. +type BoundingBox struct { + // Left X coordinate. + Left int32 `protobuf:"varint,1,opt,name=left" json:"left,omitempty"` + // Right X coordinate. + Right int32 `protobuf:"varint,2,opt,name=right" json:"right,omitempty"` + // Bottom Y coordinate. + Bottom int32 `protobuf:"varint,3,opt,name=bottom" json:"bottom,omitempty"` + // Top Y coordinate. + Top int32 `protobuf:"varint,4,opt,name=top" json:"top,omitempty"` +} + +func (m *BoundingBox) Reset() { *m = BoundingBox{} } +func (m *BoundingBox) String() string { return proto.CompactTextString(m) } +func (*BoundingBox) ProtoMessage() {} +func (*BoundingBox) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *BoundingBox) GetLeft() int32 { + if m != nil { + return m.Left + } + return 0 +} + +func (m *BoundingBox) GetRight() int32 { + if m != nil { + return m.Right + } + return 0 +} + +func (m *BoundingBox) GetBottom() int32 { + if m != nil { + return m.Bottom + } + return 0 +} + +func (m *BoundingBox) GetTop() int32 { + if m != nil { + return m.Top + } + return 0 +} + +// Face location. +type FaceLocation struct { + // Bounding box in a frame. + BoundingBox *BoundingBox `protobuf:"bytes,1,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // Video time offset in microseconds. + TimeOffset int64 `protobuf:"varint,2,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` +} + +func (m *FaceLocation) Reset() { *m = FaceLocation{} } +func (m *FaceLocation) String() string { return proto.CompactTextString(m) } +func (*FaceLocation) ProtoMessage() {} +func (*FaceLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *FaceLocation) GetBoundingBox() *BoundingBox { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *FaceLocation) GetTimeOffset() int64 { + if m != nil { + return m.TimeOffset + } + return 0 +} + +// Face annotation. +type FaceAnnotation struct { + // Thumbnail of a representative face view (in JPEG format). Encoding: base64. + Thumbnail string `protobuf:"bytes,1,opt,name=thumbnail" json:"thumbnail,omitempty"` + // All locations where a face was detected. + // Faces are detected and tracked on a per-video basis + // (as opposed to across multiple videos). + Segments []*VideoSegment `protobuf:"bytes,2,rep,name=segments" json:"segments,omitempty"` + // Face locations at one frame per second. + Locations []*FaceLocation `protobuf:"bytes,3,rep,name=locations" json:"locations,omitempty"` +} + +func (m *FaceAnnotation) Reset() { *m = FaceAnnotation{} } +func (m *FaceAnnotation) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation) ProtoMessage() {} +func (*FaceAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *FaceAnnotation) GetThumbnail() string { + if m != nil { + return m.Thumbnail + } + return "" +} + +func (m *FaceAnnotation) GetSegments() []*VideoSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *FaceAnnotation) GetLocations() []*FaceLocation { + if m != nil { + return m.Locations + } + return nil +} + +// Annotation results for a single video. +type VideoAnnotationResults struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Label annotations. There is exactly one element for each unique label. + LabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"` + // Face annotations. There is exactly one element for each unique face. + FaceAnnotations []*FaceAnnotation `protobuf:"bytes,3,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` + // Shot annotations. Each shot is represented as a video segment. + ShotAnnotations []*VideoSegment `protobuf:"bytes,4,rep,name=shot_annotations,json=shotAnnotations" json:"shot_annotations,omitempty"` + // Safe search annotations. + SafeSearchAnnotations []*SafeSearchAnnotation `protobuf:"bytes,6,rep,name=safe_search_annotations,json=safeSearchAnnotations" json:"safe_search_annotations,omitempty"` + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + Error *google_rpc.Status `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` +} + +func (m *VideoAnnotationResults) Reset() { *m = VideoAnnotationResults{} } +func (m *VideoAnnotationResults) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationResults) ProtoMessage() {} +func (*VideoAnnotationResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *VideoAnnotationResults) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationResults) GetLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.LabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation { + if m != nil { + return m.FaceAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment { + if m != nil { + return m.ShotAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetSafeSearchAnnotations() []*SafeSearchAnnotation { + if m != nil { + return m.SafeSearchAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoResponse struct { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults" json:"annotation_results,omitempty"` +} + +func (m *AnnotateVideoResponse) Reset() { *m = AnnotateVideoResponse{} } +func (m *AnnotateVideoResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoResponse) ProtoMessage() {} +func (*AnnotateVideoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults { + if m != nil { + return m.AnnotationResults + } + return nil +} + +// Annotation progress for a single video. +type VideoAnnotationProgress struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent update. + UpdateTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *VideoAnnotationProgress) Reset() { *m = VideoAnnotationProgress{} } +func (m *VideoAnnotationProgress) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationProgress) ProtoMessage() {} +func (*VideoAnnotationProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *VideoAnnotationProgress) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationProgress) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf3.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoProgress struct { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress" json:"annotation_progress,omitempty"` +} + +func (m *AnnotateVideoProgress) Reset() { *m = AnnotateVideoProgress{} } +func (m *AnnotateVideoProgress) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoProgress) ProtoMessage() {} +func (*AnnotateVideoProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress { + if m != nil { + return m.AnnotationProgress + } + return nil +} + +func init() { + proto.RegisterType((*AnnotateVideoRequest)(nil), "google.cloud.videointelligence.v1beta1.AnnotateVideoRequest") + proto.RegisterType((*VideoContext)(nil), "google.cloud.videointelligence.v1beta1.VideoContext") + proto.RegisterType((*VideoSegment)(nil), "google.cloud.videointelligence.v1beta1.VideoSegment") + proto.RegisterType((*LabelLocation)(nil), "google.cloud.videointelligence.v1beta1.LabelLocation") + proto.RegisterType((*LabelAnnotation)(nil), "google.cloud.videointelligence.v1beta1.LabelAnnotation") + proto.RegisterType((*SafeSearchAnnotation)(nil), "google.cloud.videointelligence.v1beta1.SafeSearchAnnotation") + proto.RegisterType((*BoundingBox)(nil), "google.cloud.videointelligence.v1beta1.BoundingBox") + proto.RegisterType((*FaceLocation)(nil), "google.cloud.videointelligence.v1beta1.FaceLocation") + proto.RegisterType((*FaceAnnotation)(nil), "google.cloud.videointelligence.v1beta1.FaceAnnotation") + proto.RegisterType((*VideoAnnotationResults)(nil), "google.cloud.videointelligence.v1beta1.VideoAnnotationResults") + proto.RegisterType((*AnnotateVideoResponse)(nil), "google.cloud.videointelligence.v1beta1.AnnotateVideoResponse") + proto.RegisterType((*VideoAnnotationProgress)(nil), "google.cloud.videointelligence.v1beta1.VideoAnnotationProgress") + proto.RegisterType((*AnnotateVideoProgress)(nil), "google.cloud.videointelligence.v1beta1.AnnotateVideoProgress") + proto.RegisterEnum("google.cloud.videointelligence.v1beta1.Feature", Feature_name, Feature_value) + proto.RegisterEnum("google.cloud.videointelligence.v1beta1.LabelLevel", LabelLevel_name, LabelLevel_value) + proto.RegisterEnum("google.cloud.videointelligence.v1beta1.LabelDetectionMode", LabelDetectionMode_name, LabelDetectionMode_value) + proto.RegisterEnum("google.cloud.videointelligence.v1beta1.Likelihood", Likelihood_name, Likelihood_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for VideoIntelligenceService service + +type VideoIntelligenceServiceClient interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type videoIntelligenceServiceClient struct { + cc *grpc.ClientConn +} + +func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient { + return &videoIntelligenceServiceClient{cc} +} + +func (c *videoIntelligenceServiceClient) AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.videointelligence.v1beta1.VideoIntelligenceService/AnnotateVideo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for VideoIntelligenceService service + +type VideoIntelligenceServiceServer interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(context.Context, *AnnotateVideoRequest) (*google_longrunning.Operation, error) +} + +func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer) { + s.RegisterService(&_VideoIntelligenceService_serviceDesc, srv) +} + +func _VideoIntelligenceService_AnnotateVideo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateVideoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.videointelligence.v1beta1.VideoIntelligenceService/AnnotateVideo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, req.(*AnnotateVideoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _VideoIntelligenceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.videointelligence.v1beta1.VideoIntelligenceService", + HandlerType: (*VideoIntelligenceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnnotateVideo", + Handler: _VideoIntelligenceService_AnnotateVideo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/videointelligence/v1beta1/video_intelligence.proto", +} + +func init() { + proto.RegisterFile("google/cloud/videointelligence/v1beta1/video_intelligence.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1503 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xef, 0xfa, 0x91, 0xc4, 0x9f, 0x93, 0xd8, 0x99, 0x24, 0x8d, 0x49, 0x1b, 0x1a, 0xb9, 0xa8, + 0x0a, 0x41, 0xb2, 0xa9, 0xcb, 0x43, 0xb4, 0x40, 0xe5, 0x38, 0xeb, 0xc6, 0xaa, 0x63, 0x47, 0xeb, + 0x24, 0x55, 0xb9, 0xac, 0xd6, 0xbb, 0x63, 0x67, 0xc5, 0x7a, 0x67, 0xd9, 0x9d, 0x8d, 0xda, 0x23, + 0x1c, 0x40, 0x1c, 0x11, 0xff, 0x05, 0x12, 0xf0, 0x2f, 0x70, 0xe5, 0xc4, 0x01, 0x2e, 0x9c, 0xb8, + 0xf0, 0x7f, 0x80, 0xe6, 0xb1, 0xf6, 0xda, 0x0e, 0xd4, 0x0e, 0xdc, 0x3c, 0xdf, 0xe3, 0xf7, 0xbd, + 0x67, 0xbe, 0x35, 0x3c, 0xee, 0x13, 0xd2, 0x77, 0x70, 0xd9, 0x74, 0x48, 0x68, 0x95, 0x2f, 0x6d, + 0x0b, 0x13, 0xdb, 0xa5, 0xd8, 0x71, 0xec, 0x3e, 0x76, 0x4d, 0x5c, 0xbe, 0xbc, 0xdf, 0xc5, 0xd4, + 0xb8, 0x2f, 0x38, 0x7a, 0x9c, 0x55, 0xf2, 0x7c, 0x42, 0x09, 0xba, 0x27, 0x00, 0x4a, 0x1c, 0xa0, + 0x34, 0x05, 0x50, 0x92, 0x00, 0xdb, 0xb7, 0xa5, 0x21, 0xc3, 0xb3, 0xcb, 0x86, 0xeb, 0x12, 0x6a, + 0x50, 0x9b, 0xb8, 0x81, 0x40, 0xd9, 0xbe, 0x2b, 0xb9, 0x0e, 0x71, 0xfb, 0x7e, 0xe8, 0xba, 0xb6, + 0xdb, 0x2f, 0x13, 0x0f, 0xfb, 0x63, 0x42, 0x77, 0xa4, 0x10, 0x3f, 0x75, 0xc3, 0x5e, 0x99, 0xda, + 0x03, 0x1c, 0x50, 0x63, 0xe0, 0x49, 0x81, 0x2d, 0x29, 0xe0, 0x7b, 0x66, 0x39, 0xa0, 0x06, 0x0d, + 0xa5, 0x66, 0xf1, 0xa7, 0x04, 0x6c, 0x54, 0x85, 0x51, 0x7c, 0xce, 0x5c, 0xd4, 0xf0, 0x67, 0x21, + 0x0e, 0x28, 0xba, 0x05, 0x19, 0xdb, 0xf5, 0x42, 0xaa, 0x87, 0xbe, 0x5d, 0x50, 0x76, 0x95, 0xbd, + 0x8c, 0xb6, 0xc4, 0x09, 0x67, 0xbe, 0x8d, 0xee, 0xc2, 0x8a, 0x60, 0x9a, 0xc4, 0xa5, 0xd8, 0xa5, + 0x85, 0x05, 0x2e, 0xb0, 0xcc, 0x89, 0x35, 0x41, 0x43, 0x4f, 0x61, 0xa9, 0x87, 0x0d, 0x1a, 0xfa, + 0x38, 0x28, 0x24, 0x76, 0x93, 0x7b, 0xab, 0x95, 0x72, 0x69, 0xb6, 0x94, 0x94, 0xea, 0x42, 0x4f, + 0x1b, 0x02, 0xa0, 0xe7, 0xb0, 0x22, 0x12, 0xcd, 0x2d, 0xbe, 0xa0, 0x85, 0xe4, 0xae, 0xb2, 0x97, + 0xad, 0xbc, 0x33, 0x2b, 0x22, 0x8f, 0xad, 0x26, 0x74, 0xb5, 0xe5, 0xcb, 0xd8, 0x09, 0xed, 0x00, + 0x90, 0x90, 0x46, 0xa1, 0xa6, 0x78, 0x24, 0x19, 0x41, 0x61, 0xb1, 0xde, 0x81, 0xac, 0x43, 0x4c, + 0x9e, 0x6e, 0xdd, 0xb6, 0x0a, 0x69, 0xce, 0x87, 0x88, 0xd4, 0xb0, 0x8a, 0x7f, 0x24, 0x61, 0x39, + 0x0e, 0x8f, 0x4e, 0x60, 0x29, 0xc0, 0xfd, 0x01, 0x76, 0x69, 0x50, 0x50, 0x76, 0x93, 0x73, 0xbb, + 0xd9, 0x11, 0xca, 0xda, 0x10, 0x05, 0x39, 0xb0, 0xe1, 0x18, 0x5d, 0xec, 0xe8, 0x16, 0xa6, 0xd8, + 0xe4, 0xae, 0x0c, 0x88, 0x85, 0x0b, 0x89, 0x5d, 0x65, 0x6f, 0xb5, 0xf2, 0x70, 0x56, 0xf4, 0x26, + 0xc3, 0x38, 0x8c, 0x20, 0x8e, 0x89, 0x85, 0x35, 0xe4, 0x4c, 0xd1, 0xd0, 0x5b, 0xb0, 0x16, 0x88, + 0x26, 0x34, 0xfc, 0x97, 0xba, 0x69, 0x0c, 0xb0, 0x6f, 0xf0, 0x7c, 0x2f, 0x69, 0xf9, 0x11, 0xa3, + 0xc6, 0xe9, 0xa8, 0x02, 0x9b, 0x57, 0xb9, 0xe6, 0xc8, 0x44, 0xae, 0x4f, 0xe3, 0x3b, 0xe8, 0x6d, + 0xd8, 0xe8, 0x19, 0x26, 0x9e, 0x52, 0x11, 0xb9, 0x45, 0x8c, 0x37, 0xa1, 0xf1, 0x11, 0xdc, 0x0a, + 0x2e, 0x08, 0xd5, 0xcd, 0x0b, 0xc3, 0xed, 0x4f, 0x2b, 0x8a, 0xf6, 0x2b, 0x30, 0x91, 0x1a, 0x97, + 0xb8, 0x42, 0xdd, 0xe8, 0x61, 0x3d, 0xc0, 0x86, 0x6f, 0x5e, 0x4c, 0xa9, 0x2f, 0x4a, 0x75, 0xa3, + 0x87, 0x3b, 0x5c, 0x62, 0x5c, 0xbd, 0xd8, 0x95, 0x05, 0x96, 0x85, 0x41, 0xfb, 0x3c, 0x41, 0x3e, + 0xd5, 0xd9, 0x98, 0xe9, 0xa4, 0xd7, 0x0b, 0x30, 0xe5, 0x33, 0x92, 0xd4, 0x72, 0x9c, 0x71, 0x6a, + 0x0f, 0x70, 0x9b, 0x93, 0xd1, 0x3d, 0xc8, 0x61, 0xd7, 0x1a, 0x93, 0x4c, 0x70, 0xc9, 0x15, 0xec, + 0x5a, 0x23, 0xb9, 0xe2, 0xcf, 0x0a, 0xac, 0xf0, 0xfa, 0x34, 0x65, 0x67, 0xa1, 0x16, 0x2c, 0xca, + 0x06, 0xe0, 0xd8, 0xd7, 0xed, 0xa2, 0x08, 0x04, 0xbd, 0x0e, 0x60, 0x12, 0xb7, 0x67, 0x5b, 0x4c, + 0x96, 0x3b, 0x91, 0xd0, 0x62, 0x14, 0x74, 0x04, 0x69, 0x07, 0x5f, 0x62, 0x87, 0x97, 0x7a, 0xb5, + 0x52, 0x99, 0xab, 0xab, 0x9a, 0x4c, 0x53, 0x13, 0x00, 0xc5, 0xef, 0x15, 0xc8, 0x71, 0x6a, 0x75, + 0x78, 0x9d, 0xa1, 0x5d, 0xc8, 0x5a, 0x38, 0x30, 0x7d, 0xdb, 0x63, 0x47, 0x79, 0xa3, 0xc4, 0x49, + 0xec, 0x52, 0x71, 0x0c, 0xb7, 0x1f, 0x1a, 0x7d, 0xac, 0x9b, 0x51, 0x77, 0x67, 0xb4, 0xe5, 0x88, + 0x58, 0x63, 0xbd, 0xd9, 0x81, 0x4c, 0x34, 0x7a, 0x41, 0x21, 0xc9, 0x87, 0xeb, 0xdd, 0xf9, 0x1c, + 0x95, 0xda, 0xda, 0x08, 0xa7, 0xf8, 0x63, 0x12, 0x36, 0x3a, 0xc3, 0xe2, 0xc7, 0x9c, 0x3e, 0x82, + 0xb4, 0x61, 0x85, 0x8e, 0x28, 0xc0, 0x3c, 0x29, 0xb1, 0x3f, 0xc5, 0x8e, 0x7d, 0x41, 0x88, 0xa5, + 0x09, 0x00, 0x86, 0x14, 0x78, 0x84, 0xf4, 0xe4, 0xc8, 0x5e, 0x0b, 0x89, 0x03, 0xa0, 0x26, 0x2c, + 0x0e, 0xb0, 0x65, 0x9b, 0xc6, 0xfc, 0x85, 0x1a, 0x61, 0x45, 0x10, 0x0c, 0xed, 0xd2, 0x26, 0x0e, + 0x6b, 0xb2, 0xd4, 0xf5, 0xd1, 0x24, 0x04, 0xaa, 0x43, 0xca, 0x37, 0xcc, 0x97, 0x7c, 0x90, 0xaf, + 0x07, 0xc5, 0xf5, 0xd9, 0x9d, 0x1b, 0x1f, 0x98, 0x05, 0x3e, 0x30, 0x40, 0x47, 0xd3, 0x62, 0x40, + 0xf6, 0x80, 0x84, 0xae, 0x65, 0xbb, 0xfd, 0x03, 0xf2, 0x02, 0x21, 0x48, 0x39, 0xb8, 0x27, 0xca, + 0x94, 0xd6, 0xf8, 0x6f, 0xb4, 0x01, 0x69, 0xdf, 0xee, 0x5f, 0x88, 0x71, 0x4b, 0x6b, 0xe2, 0x80, + 0x6e, 0xc2, 0x42, 0x97, 0x50, 0x4a, 0x06, 0x3c, 0x79, 0x69, 0x4d, 0x9e, 0x50, 0x1e, 0x92, 0x94, + 0x78, 0x3c, 0x07, 0x69, 0x8d, 0xfd, 0x2c, 0x7e, 0xa5, 0xc0, 0x72, 0xdd, 0x30, 0xf1, 0x70, 0x1e, + 0xcf, 0x61, 0xb9, 0x2b, 0x6d, 0xea, 0x5d, 0xf2, 0x42, 0x0e, 0xe5, 0x83, 0x59, 0x83, 0x8c, 0xf9, + 0xab, 0x65, 0xbb, 0x31, 0xe7, 0x27, 0x82, 0x4d, 0x4c, 0x05, 0xfb, 0x9b, 0x02, 0xab, 0xcc, 0x93, + 0x58, 0x63, 0xde, 0x86, 0x0c, 0xbd, 0x08, 0x07, 0x5d, 0xd7, 0xb0, 0x1d, 0x39, 0x4b, 0x23, 0xc2, + 0xd8, 0x03, 0x94, 0xf8, 0x5f, 0x1e, 0x20, 0x6d, 0x7a, 0xec, 0x66, 0x86, 0x8c, 0x27, 0x31, 0x3e, + 0x75, 0x7f, 0x25, 0xe1, 0x26, 0x37, 0x37, 0x8a, 0x4b, 0xc3, 0x41, 0xe8, 0xd0, 0xe0, 0xdf, 0x97, + 0x0f, 0x0b, 0xd6, 0xc4, 0x8b, 0x13, 0x5b, 0x96, 0x64, 0x98, 0xef, 0xcf, 0x75, 0x15, 0xc4, 0xec, + 0xe6, 0x9d, 0x71, 0x42, 0x80, 0x0c, 0xc8, 0xf3, 0x37, 0x2a, 0x6e, 0x44, 0x04, 0xfe, 0xde, 0x3c, + 0x81, 0xc7, 0x6c, 0xe4, 0x7a, 0x63, 0xe7, 0x00, 0xe9, 0x90, 0xe7, 0x8f, 0x5a, 0xdc, 0x44, 0xea, + 0x3f, 0x94, 0x2b, 0xc7, 0xd0, 0xe2, 0x06, 0x28, 0x6c, 0xc5, 0x9f, 0xbd, 0xb8, 0x9d, 0x05, 0x6e, + 0xe7, 0xc3, 0x59, 0xed, 0x5c, 0x75, 0x3b, 0x6a, 0x9b, 0xc1, 0x15, 0xd4, 0x00, 0xed, 0x41, 0x1a, + 0xfb, 0x3e, 0xf1, 0xf9, 0x2d, 0x90, 0xad, 0xa0, 0xc8, 0x86, 0xef, 0x99, 0xa5, 0x0e, 0xdf, 0x3d, + 0x35, 0x21, 0x50, 0xfc, 0x52, 0x81, 0xcd, 0x89, 0xe5, 0x33, 0xf0, 0x88, 0x1b, 0x60, 0x34, 0x00, + 0x34, 0xf2, 0x56, 0xf7, 0x45, 0x5b, 0xc8, 0x65, 0xea, 0xe3, 0xb9, 0x92, 0x33, 0xd5, 0x5c, 0xda, + 0x9a, 0x31, 0x49, 0x2a, 0xfe, 0xae, 0xc0, 0xd6, 0x84, 0xf4, 0x89, 0x4f, 0xfa, 0x3e, 0x0e, 0x5e, + 0xd1, 0x8b, 0x6f, 0x42, 0xde, 0x93, 0x82, 0xba, 0x87, 0x7d, 0x93, 0xdd, 0xa3, 0xe2, 0xbe, 0xc9, + 0x45, 0xf4, 0x13, 0x41, 0x46, 0x1f, 0x00, 0x8c, 0x96, 0x06, 0xb9, 0xbe, 0x6e, 0x47, 0xa1, 0x44, + 0x8b, 0x7b, 0xe9, 0x34, 0x5a, 0xdc, 0xb5, 0xcc, 0x70, 0x93, 0x40, 0x8f, 0x20, 0x1b, 0x7a, 0x96, + 0x41, 0xb1, 0xd0, 0x4d, 0xbd, 0x52, 0x17, 0x84, 0x38, 0x23, 0x14, 0xbf, 0x9e, 0x4c, 0xf2, 0x30, + 0x32, 0x0f, 0xd6, 0x63, 0x49, 0x8e, 0xfc, 0x95, 0x59, 0x7e, 0x7c, 0xcd, 0x2c, 0x47, 0xe8, 0x5a, + 0xac, 0x80, 0x11, 0x6d, 0xff, 0x73, 0x05, 0x16, 0xe5, 0x6e, 0x8f, 0xb6, 0x60, 0xbd, 0xae, 0x56, + 0x4f, 0xcf, 0x34, 0x55, 0x3f, 0x6b, 0x75, 0x4e, 0xd4, 0x5a, 0xa3, 0xde, 0x50, 0x0f, 0xf3, 0x37, + 0xd0, 0x3a, 0xe4, 0x9a, 0xd5, 0x03, 0xb5, 0xa9, 0x1f, 0xaa, 0xa7, 0x6a, 0xed, 0xb4, 0xd1, 0x6e, + 0xe5, 0x15, 0x84, 0x60, 0xb5, 0x5e, 0xad, 0xa9, 0x31, 0x5a, 0x02, 0xbd, 0x06, 0x9b, 0x9d, 0xa3, + 0xf6, 0xa9, 0x5e, 0x3b, 0xaa, 0xb6, 0x9e, 0xc4, 0x59, 0x49, 0xce, 0xaa, 0xd6, 0x55, 0xbd, 0xa3, + 0x56, 0xb5, 0xda, 0x51, 0x8c, 0x95, 0xda, 0x77, 0x01, 0x46, 0x1b, 0x0b, 0xba, 0x05, 0x5b, 0xc2, + 0x58, 0x53, 0x3d, 0x57, 0x9b, 0x13, 0x9e, 0xe4, 0x20, 0x7b, 0xde, 0x38, 0x54, 0xdb, 0x82, 0x99, + 0x57, 0xd0, 0x1a, 0xac, 0x74, 0xd4, 0x27, 0xc7, 0x6a, 0xeb, 0x54, 0x92, 0x12, 0x68, 0x15, 0x80, + 0x3b, 0x21, 0xce, 0x49, 0xa6, 0x53, 0xd7, 0xaa, 0xc7, 0xaa, 0x24, 0xa4, 0xf6, 0x7d, 0x40, 0xd3, + 0x7b, 0x37, 0x7a, 0x03, 0x76, 0x27, 0x82, 0xd4, 0x8f, 0xdb, 0x87, 0x93, 0xa9, 0x58, 0x81, 0x0c, + 0x07, 0x67, 0xac, 0xbc, 0xc2, 0x6c, 0x09, 0x6c, 0x7e, 0x4e, 0xb0, 0x14, 0x72, 0x76, 0xb5, 0x75, + 0xa8, 0xc7, 0x18, 0xc9, 0x7d, 0x0c, 0x30, 0x7a, 0x53, 0x51, 0x16, 0x16, 0xcf, 0x5a, 0x4f, 0x5b, + 0xed, 0x67, 0xad, 0xfc, 0x0d, 0x16, 0xc2, 0xb9, 0xaa, 0x3d, 0xd7, 0xcf, 0x5a, 0xcd, 0xc6, 0x53, + 0xb5, 0xf9, 0x3c, 0xaf, 0xa0, 0x65, 0x58, 0x1a, 0x9e, 0x12, 0xec, 0x74, 0xd2, 0xee, 0x74, 0x1a, + 0x07, 0x4d, 0x35, 0x9f, 0x44, 0x00, 0x0b, 0x92, 0x93, 0xe2, 0xe9, 0x60, 0xaa, 0x92, 0x90, 0xae, + 0xfc, 0xa0, 0x40, 0x81, 0x97, 0xbf, 0x11, 0x6b, 0x8c, 0x0e, 0xf6, 0x2f, 0x6d, 0x13, 0xa3, 0x6f, + 0x14, 0x58, 0x19, 0xeb, 0x3b, 0x34, 0xf3, 0x6d, 0x73, 0xd5, 0x07, 0xe9, 0xf6, 0x4e, 0xa4, 0x1d, + 0xfb, 0x12, 0x2e, 0xb5, 0xa3, 0x2f, 0xe1, 0xe2, 0xdd, 0x2f, 0x7e, 0xfd, 0xf3, 0xdb, 0xc4, 0x4e, + 0xb1, 0x30, 0xfe, 0x61, 0x1e, 0x3c, 0x94, 0x6d, 0x88, 0x1f, 0x2a, 0xfb, 0x07, 0xbf, 0x28, 0xb0, + 0x6f, 0x92, 0xc1, 0x8c, 0x7e, 0x1c, 0xec, 0xfc, 0x53, 0x70, 0x27, 0x6c, 0xe4, 0x4e, 0x94, 0x4f, + 0x9e, 0x49, 0xa0, 0x3e, 0x61, 0x4b, 0x6a, 0x89, 0xf8, 0xfd, 0x72, 0x1f, 0xbb, 0x7c, 0x20, 0xcb, + 0x82, 0x65, 0x78, 0x76, 0xf0, 0xaa, 0xbf, 0x10, 0x1e, 0x4d, 0x71, 0xbe, 0x4b, 0xdc, 0x7b, 0x22, + 0x90, 0x6b, 0xdc, 0xc5, 0x29, 0x3f, 0x4a, 0xe7, 0xf7, 0x0f, 0x98, 0x6a, 0x77, 0x81, 0x1b, 0x7b, + 0xf0, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xe8, 0xc8, 0xa8, 0xae, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2/video_intelligence.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2/video_intelligence.pb.go new file mode 100644 index 000000000..fdace134c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2/video_intelligence.pb.go @@ -0,0 +1,1157 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/videointelligence/v1beta2/video_intelligence.proto + +/* +Package videointelligence is a generated protocol buffer package. + +It is generated from these files: + google/cloud/videointelligence/v1beta2/video_intelligence.proto + +It has these top-level messages: + AnnotateVideoRequest + VideoContext + LabelDetectionConfig + ShotChangeDetectionConfig + ExplicitContentDetectionConfig + FaceDetectionConfig + VideoSegment + LabelSegment + LabelFrame + Entity + LabelAnnotation + ExplicitContentFrame + ExplicitContentAnnotation + NormalizedBoundingBox + FaceSegment + FaceFrame + FaceAnnotation + VideoAnnotationResults + AnnotateVideoResponse + VideoAnnotationProgress + AnnotateVideoProgress +*/ +package videointelligence + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Video annotation feature. +type Feature int32 + +const ( + // Unspecified. + Feature_FEATURE_UNSPECIFIED Feature = 0 + // Label detection. Detect objects, such as dog or flower. + Feature_LABEL_DETECTION Feature = 1 + // Shot change detection. + Feature_SHOT_CHANGE_DETECTION Feature = 2 + // Explicit content detection. + Feature_EXPLICIT_CONTENT_DETECTION Feature = 3 + // Human face detection and tracking. + Feature_FACE_DETECTION Feature = 4 +) + +var Feature_name = map[int32]string{ + 0: "FEATURE_UNSPECIFIED", + 1: "LABEL_DETECTION", + 2: "SHOT_CHANGE_DETECTION", + 3: "EXPLICIT_CONTENT_DETECTION", + 4: "FACE_DETECTION", +} +var Feature_value = map[string]int32{ + "FEATURE_UNSPECIFIED": 0, + "LABEL_DETECTION": 1, + "SHOT_CHANGE_DETECTION": 2, + "EXPLICIT_CONTENT_DETECTION": 3, + "FACE_DETECTION": 4, +} + +func (x Feature) String() string { + return proto.EnumName(Feature_name, int32(x)) +} +func (Feature) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Label detection mode. +type LabelDetectionMode int32 + +const ( + // Unspecified. + LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0 + // Detect shot-level labels. + LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1 + // Detect frame-level labels. + LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2 + // Detect both shot-level and frame-level labels. + LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3 +) + +var LabelDetectionMode_name = map[int32]string{ + 0: "LABEL_DETECTION_MODE_UNSPECIFIED", + 1: "SHOT_MODE", + 2: "FRAME_MODE", + 3: "SHOT_AND_FRAME_MODE", +} +var LabelDetectionMode_value = map[string]int32{ + "LABEL_DETECTION_MODE_UNSPECIFIED": 0, + "SHOT_MODE": 1, + "FRAME_MODE": 2, + "SHOT_AND_FRAME_MODE": 3, +} + +func (x LabelDetectionMode) String() string { + return proto.EnumName(LabelDetectionMode_name, int32(x)) +} +func (LabelDetectionMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Bucketized representation of likelihood. +type Likelihood int32 + +const ( + // Unspecified likelihood. + Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 + // Very unlikely. + Likelihood_VERY_UNLIKELY Likelihood = 1 + // Unlikely. + Likelihood_UNLIKELY Likelihood = 2 + // Possible. + Likelihood_POSSIBLE Likelihood = 3 + // Likely. + Likelihood_LIKELY Likelihood = 4 + // Very likely. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "LIKELIHOOD_UNSPECIFIED", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "LIKELIHOOD_UNSPECIFIED": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Video annotation request. +type AnnotateVideoRequest struct { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // The video data bytes. Encoding: base64. If unset, the input video(s) + // should be specified via `input_uri`. If set, `input_uri` should be unset. + InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"` + // Requested video annotation features. + Features []Feature `protobuf:"varint,2,rep,packed,name=features,enum=google.cloud.videointelligence.v1beta2.Feature" json:"features,omitempty"` + // Additional video context and/or feature-specific parameters. + VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext" json:"video_context,omitempty"` + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId" json:"location_id,omitempty"` +} + +func (m *AnnotateVideoRequest) Reset() { *m = AnnotateVideoRequest{} } +func (m *AnnotateVideoRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoRequest) ProtoMessage() {} +func (*AnnotateVideoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AnnotateVideoRequest) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetInputContent() []byte { + if m != nil { + return m.InputContent + } + return nil +} + +func (m *AnnotateVideoRequest) GetFeatures() []Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext { + if m != nil { + return m.VideoContext + } + return nil +} + +func (m *AnnotateVideoRequest) GetOutputUri() string { + if m != nil { + return m.OutputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetLocationId() string { + if m != nil { + return m.LocationId + } + return "" +} + +// Video context and/or feature-specific parameters. +type VideoContext struct { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video + // is treated as a single segment. + Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"` + // Config for LABEL_DETECTION. + LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig" json:"label_detection_config,omitempty"` + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig" json:"shot_change_detection_config,omitempty"` + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig" json:"explicit_content_detection_config,omitempty"` + // Config for FACE_DETECTION. + FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig" json:"face_detection_config,omitempty"` +} + +func (m *VideoContext) Reset() { *m = VideoContext{} } +func (m *VideoContext) String() string { return proto.CompactTextString(m) } +func (*VideoContext) ProtoMessage() {} +func (*VideoContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *VideoContext) GetSegments() []*VideoSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig { + if m != nil { + return m.LabelDetectionConfig + } + return nil +} + +func (m *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig { + if m != nil { + return m.ShotChangeDetectionConfig + } + return nil +} + +func (m *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig { + if m != nil { + return m.ExplicitContentDetectionConfig + } + return nil +} + +func (m *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig { + if m != nil { + return m.FaceDetectionConfig + } + return nil +} + +// Config for LABEL_DETECTION. +type LabelDetectionConfig struct { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,enum=google.cloud.videointelligence.v1beta2.LabelDetectionMode" json:"label_detection_mode,omitempty"` + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera" json:"stationary_camera,omitempty"` + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,3,opt,name=model" json:"model,omitempty"` +} + +func (m *LabelDetectionConfig) Reset() { *m = LabelDetectionConfig{} } +func (m *LabelDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*LabelDetectionConfig) ProtoMessage() {} +func (*LabelDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode { + if m != nil { + return m.LabelDetectionMode + } + return LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED +} + +func (m *LabelDetectionConfig) GetStationaryCamera() bool { + if m != nil { + return m.StationaryCamera + } + return false +} + +func (m *LabelDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for SHOT_CHANGE_DETECTION. +type ShotChangeDetectionConfig struct { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` +} + +func (m *ShotChangeDetectionConfig) Reset() { *m = ShotChangeDetectionConfig{} } +func (m *ShotChangeDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*ShotChangeDetectionConfig) ProtoMessage() {} +func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ShotChangeDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for EXPLICIT_CONTENT_DETECTION. +type ExplicitContentDetectionConfig struct { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` +} + +func (m *ExplicitContentDetectionConfig) Reset() { *m = ExplicitContentDetectionConfig{} } +func (m *ExplicitContentDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentDetectionConfig) ProtoMessage() {} +func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExplicitContentDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for FACE_DETECTION. +type FaceDetectionConfig struct { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` + // Whether bounding boxes be included in the face annotation output. + IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes" json:"include_bounding_boxes,omitempty"` +} + +func (m *FaceDetectionConfig) Reset() { *m = FaceDetectionConfig{} } +func (m *FaceDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*FaceDetectionConfig) ProtoMessage() {} +func (*FaceDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *FaceDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +func (m *FaceDetectionConfig) GetIncludeBoundingBoxes() bool { + if m != nil { + return m.IncludeBoundingBoxes + } + return false +} + +// Video segment. +type VideoSegment struct { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + StartTimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset" json:"start_time_offset,omitempty"` + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + EndTimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset" json:"end_time_offset,omitempty"` +} + +func (m *VideoSegment) Reset() { *m = VideoSegment{} } +func (m *VideoSegment) String() string { return proto.CompactTextString(m) } +func (*VideoSegment) ProtoMessage() {} +func (*VideoSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *VideoSegment) GetStartTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.StartTimeOffset + } + return nil +} + +func (m *VideoSegment) GetEndTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.EndTimeOffset + } + return nil +} + +// Video segment level annotation results for label detection. +type LabelSegment struct { + // Video segment where a label was detected. + Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` + // Confidence that the label is accurate. Range: [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *LabelSegment) Reset() { *m = LabelSegment{} } +func (m *LabelSegment) String() string { return proto.CompactTextString(m) } +func (*LabelSegment) ProtoMessage() {} +func (*LabelSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *LabelSegment) GetSegment() *VideoSegment { + if m != nil { + return m.Segment + } + return nil +} + +func (m *LabelSegment) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Video frame level annotation results for label detection. +type LabelFrame struct { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` + // Confidence that the label is accurate. Range: [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *LabelFrame) Reset() { *m = LabelFrame{} } +func (m *LabelFrame) String() string { return proto.CompactTextString(m) } +func (*LabelFrame) ProtoMessage() {} +func (*LabelFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *LabelFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +func (m *LabelFrame) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Detected entity from video analysis. +type Entity struct { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // Textual description, e.g. `Fixed-gear bicycle`. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Language code for `description` in BCP-47 format. + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Entity) GetEntityId() string { + if m != nil { + return m.EntityId + } + return "" +} + +func (m *Entity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Entity) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +// Label annotation. +type LabelAnnotation struct { + // Detected entity. + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities" json:"category_entities,omitempty"` + // All video segments where a label was detected. + Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments" json:"segments,omitempty"` + // All video frames where a label was detected. + Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames" json:"frames,omitempty"` +} + +func (m *LabelAnnotation) Reset() { *m = LabelAnnotation{} } +func (m *LabelAnnotation) String() string { return proto.CompactTextString(m) } +func (*LabelAnnotation) ProtoMessage() {} +func (*LabelAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *LabelAnnotation) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *LabelAnnotation) GetCategoryEntities() []*Entity { + if m != nil { + return m.CategoryEntities + } + return nil +} + +func (m *LabelAnnotation) GetSegments() []*LabelSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *LabelAnnotation) GetFrames() []*LabelFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Video frame level annotation results for explicit content. +type ExplicitContentFrame struct { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` + // Likelihood of the pornography content.. + PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,enum=google.cloud.videointelligence.v1beta2.Likelihood" json:"pornography_likelihood,omitempty"` +} + +func (m *ExplicitContentFrame) Reset() { *m = ExplicitContentFrame{} } +func (m *ExplicitContentFrame) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentFrame) ProtoMessage() {} +func (*ExplicitContentFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ExplicitContentFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +func (m *ExplicitContentFrame) GetPornographyLikelihood() Likelihood { + if m != nil { + return m.PornographyLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +type ExplicitContentAnnotation struct { + // All video frames where explicit content was detected. + Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` +} + +func (m *ExplicitContentAnnotation) Reset() { *m = ExplicitContentAnnotation{} } +func (m *ExplicitContentAnnotation) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentAnnotation) ProtoMessage() {} +func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +type NormalizedBoundingBox struct { + // Left X coordinate. + Left float32 `protobuf:"fixed32,1,opt,name=left" json:"left,omitempty"` + // Top Y coordinate. + Top float32 `protobuf:"fixed32,2,opt,name=top" json:"top,omitempty"` + // Right X coordinate. + Right float32 `protobuf:"fixed32,3,opt,name=right" json:"right,omitempty"` + // Bottom Y coordinate. + Bottom float32 `protobuf:"fixed32,4,opt,name=bottom" json:"bottom,omitempty"` +} + +func (m *NormalizedBoundingBox) Reset() { *m = NormalizedBoundingBox{} } +func (m *NormalizedBoundingBox) String() string { return proto.CompactTextString(m) } +func (*NormalizedBoundingBox) ProtoMessage() {} +func (*NormalizedBoundingBox) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *NormalizedBoundingBox) GetLeft() float32 { + if m != nil { + return m.Left + } + return 0 +} + +func (m *NormalizedBoundingBox) GetTop() float32 { + if m != nil { + return m.Top + } + return 0 +} + +func (m *NormalizedBoundingBox) GetRight() float32 { + if m != nil { + return m.Right + } + return 0 +} + +func (m *NormalizedBoundingBox) GetBottom() float32 { + if m != nil { + return m.Bottom + } + return 0 +} + +// Video segment level annotation results for face detection. +type FaceSegment struct { + // Video segment where a face was detected. + Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` +} + +func (m *FaceSegment) Reset() { *m = FaceSegment{} } +func (m *FaceSegment) String() string { return proto.CompactTextString(m) } +func (*FaceSegment) ProtoMessage() {} +func (*FaceSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *FaceSegment) GetSegment() *VideoSegment { + if m != nil { + return m.Segment + } + return nil +} + +// Video frame level annotation results for face detection. +type FaceFrame struct { + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + NormalizedBoundingBoxes []*NormalizedBoundingBox `protobuf:"bytes,1,rep,name=normalized_bounding_boxes,json=normalizedBoundingBoxes" json:"normalized_bounding_boxes,omitempty"` + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` +} + +func (m *FaceFrame) Reset() { *m = FaceFrame{} } +func (m *FaceFrame) String() string { return proto.CompactTextString(m) } +func (*FaceFrame) ProtoMessage() {} +func (*FaceFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBox { + if m != nil { + return m.NormalizedBoundingBoxes + } + return nil +} + +func (m *FaceFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +// Face annotation. +type FaceAnnotation struct { + // Thumbnail of a representative face view (in JPEG format). Encoding: base64. + Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"` + // All video segments where a face was detected. + Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments" json:"segments,omitempty"` + // All video frames where a face was detected. + Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames" json:"frames,omitempty"` +} + +func (m *FaceAnnotation) Reset() { *m = FaceAnnotation{} } +func (m *FaceAnnotation) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation) ProtoMessage() {} +func (*FaceAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *FaceAnnotation) GetThumbnail() []byte { + if m != nil { + return m.Thumbnail + } + return nil +} + +func (m *FaceAnnotation) GetSegments() []*FaceSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *FaceAnnotation) GetFrames() []*FaceFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Annotation results for a single video. +type VideoAnnotationResults struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations" json:"segment_label_annotations,omitempty"` + // Label annotations on shot level. + // There is exactly one element for each unique label. + ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations" json:"shot_label_annotations,omitempty"` + // Label annotations on frame level. + // There is exactly one element for each unique label. + FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations" json:"frame_label_annotations,omitempty"` + // Face annotations. There is exactly one element for each unique face. + FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` + // Shot annotations. Each shot is represented as a video segment. + ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations" json:"shot_annotations,omitempty"` + // Explicit content annotation. + ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation" json:"explicit_annotation,omitempty"` + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` +} + +func (m *VideoAnnotationResults) Reset() { *m = VideoAnnotationResults{} } +func (m *VideoAnnotationResults) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationResults) ProtoMessage() {} +func (*VideoAnnotationResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *VideoAnnotationResults) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.SegmentLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.ShotLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.FrameLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation { + if m != nil { + return m.FaceAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment { + if m != nil { + return m.ShotAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation { + if m != nil { + return m.ExplicitAnnotation + } + return nil +} + +func (m *VideoAnnotationResults) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoResponse struct { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults" json:"annotation_results,omitempty"` +} + +func (m *AnnotateVideoResponse) Reset() { *m = AnnotateVideoResponse{} } +func (m *AnnotateVideoResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoResponse) ProtoMessage() {} +func (*AnnotateVideoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults { + if m != nil { + return m.AnnotationResults + } + return nil +} + +// Annotation progress for a single video. +type VideoAnnotationProgress struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent update. + UpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *VideoAnnotationProgress) Reset() { *m = VideoAnnotationProgress{} } +func (m *VideoAnnotationProgress) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationProgress) ProtoMessage() {} +func (*VideoAnnotationProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *VideoAnnotationProgress) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationProgress) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoProgress struct { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress" json:"annotation_progress,omitempty"` +} + +func (m *AnnotateVideoProgress) Reset() { *m = AnnotateVideoProgress{} } +func (m *AnnotateVideoProgress) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoProgress) ProtoMessage() {} +func (*AnnotateVideoProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress { + if m != nil { + return m.AnnotationProgress + } + return nil +} + +func init() { + proto.RegisterType((*AnnotateVideoRequest)(nil), "google.cloud.videointelligence.v1beta2.AnnotateVideoRequest") + proto.RegisterType((*VideoContext)(nil), "google.cloud.videointelligence.v1beta2.VideoContext") + proto.RegisterType((*LabelDetectionConfig)(nil), "google.cloud.videointelligence.v1beta2.LabelDetectionConfig") + proto.RegisterType((*ShotChangeDetectionConfig)(nil), "google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig") + proto.RegisterType((*ExplicitContentDetectionConfig)(nil), "google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig") + proto.RegisterType((*FaceDetectionConfig)(nil), "google.cloud.videointelligence.v1beta2.FaceDetectionConfig") + proto.RegisterType((*VideoSegment)(nil), "google.cloud.videointelligence.v1beta2.VideoSegment") + proto.RegisterType((*LabelSegment)(nil), "google.cloud.videointelligence.v1beta2.LabelSegment") + proto.RegisterType((*LabelFrame)(nil), "google.cloud.videointelligence.v1beta2.LabelFrame") + proto.RegisterType((*Entity)(nil), "google.cloud.videointelligence.v1beta2.Entity") + proto.RegisterType((*LabelAnnotation)(nil), "google.cloud.videointelligence.v1beta2.LabelAnnotation") + proto.RegisterType((*ExplicitContentFrame)(nil), "google.cloud.videointelligence.v1beta2.ExplicitContentFrame") + proto.RegisterType((*ExplicitContentAnnotation)(nil), "google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation") + proto.RegisterType((*NormalizedBoundingBox)(nil), "google.cloud.videointelligence.v1beta2.NormalizedBoundingBox") + proto.RegisterType((*FaceSegment)(nil), "google.cloud.videointelligence.v1beta2.FaceSegment") + proto.RegisterType((*FaceFrame)(nil), "google.cloud.videointelligence.v1beta2.FaceFrame") + proto.RegisterType((*FaceAnnotation)(nil), "google.cloud.videointelligence.v1beta2.FaceAnnotation") + proto.RegisterType((*VideoAnnotationResults)(nil), "google.cloud.videointelligence.v1beta2.VideoAnnotationResults") + proto.RegisterType((*AnnotateVideoResponse)(nil), "google.cloud.videointelligence.v1beta2.AnnotateVideoResponse") + proto.RegisterType((*VideoAnnotationProgress)(nil), "google.cloud.videointelligence.v1beta2.VideoAnnotationProgress") + proto.RegisterType((*AnnotateVideoProgress)(nil), "google.cloud.videointelligence.v1beta2.AnnotateVideoProgress") + proto.RegisterEnum("google.cloud.videointelligence.v1beta2.Feature", Feature_name, Feature_value) + proto.RegisterEnum("google.cloud.videointelligence.v1beta2.LabelDetectionMode", LabelDetectionMode_name, LabelDetectionMode_value) + proto.RegisterEnum("google.cloud.videointelligence.v1beta2.Likelihood", Likelihood_name, Likelihood_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for VideoIntelligenceService service + +type VideoIntelligenceServiceClient interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type videoIntelligenceServiceClient struct { + cc *grpc.ClientConn +} + +func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient { + return &videoIntelligenceServiceClient{cc} +} + +func (c *videoIntelligenceServiceClient) AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for VideoIntelligenceService service + +type VideoIntelligenceServiceServer interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(context.Context, *AnnotateVideoRequest) (*google_longrunning.Operation, error) +} + +func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer) { + s.RegisterService(&_VideoIntelligenceService_serviceDesc, srv) +} + +func _VideoIntelligenceService_AnnotateVideo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateVideoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.videointelligence.v1beta2.VideoIntelligenceService/AnnotateVideo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, req.(*AnnotateVideoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _VideoIntelligenceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.videointelligence.v1beta2.VideoIntelligenceService", + HandlerType: (*VideoIntelligenceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnnotateVideo", + Handler: _VideoIntelligenceService_AnnotateVideo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/videointelligence/v1beta2/video_intelligence.proto", +} + +func init() { + proto.RegisterFile("google/cloud/videointelligence/v1beta2/video_intelligence.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1702 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdb, 0xc8, + 0x15, 0x2f, 0x25, 0x59, 0xb1, 0x9e, 0x64, 0x4b, 0x19, 0xcb, 0xb6, 0xec, 0x26, 0x5e, 0x97, 0x29, + 0x16, 0xae, 0x0b, 0x48, 0x88, 0x77, 0xb1, 0x45, 0x93, 0x6d, 0x17, 0xb2, 0x4c, 0x6d, 0xd4, 0x75, + 0x24, 0x81, 0x52, 0xd2, 0xa6, 0x45, 0x41, 0x50, 0xe4, 0x88, 0x22, 0x96, 0xe2, 0x70, 0xc9, 0x61, + 0x10, 0xf7, 0xd0, 0xc3, 0x1e, 0x16, 0xe8, 0xb1, 0xe8, 0xa5, 0x9f, 0xa1, 0x87, 0x7e, 0x83, 0x02, + 0x45, 0x2f, 0x05, 0x7a, 0xe9, 0xa1, 0xbd, 0xf4, 0xd2, 0x53, 0x8f, 0xfd, 0x10, 0x05, 0x67, 0x86, + 0x12, 0x45, 0xc9, 0xb1, 0x94, 0xa0, 0x37, 0xce, 0x7b, 0xf3, 0x7e, 0xef, 0xff, 0x9b, 0x19, 0xc2, + 0x67, 0x16, 0x21, 0x96, 0x83, 0x1b, 0x86, 0x43, 0x42, 0xb3, 0xf1, 0xda, 0x36, 0x31, 0xb1, 0x5d, + 0x8a, 0x1d, 0xc7, 0xb6, 0xb0, 0x6b, 0xe0, 0xc6, 0xeb, 0xc7, 0x23, 0x4c, 0xf5, 0x0b, 0xce, 0xd1, + 0x92, 0xac, 0xba, 0xe7, 0x13, 0x4a, 0xd0, 0x87, 0x1c, 0xa0, 0xce, 0x00, 0xea, 0x4b, 0x00, 0x75, + 0x01, 0x70, 0xfc, 0x40, 0x28, 0xd2, 0x3d, 0xbb, 0xa1, 0xbb, 0x2e, 0xa1, 0x3a, 0xb5, 0x89, 0x1b, + 0x70, 0x94, 0xe3, 0x47, 0x82, 0xeb, 0x10, 0xd7, 0xf2, 0x43, 0xd7, 0xb5, 0x5d, 0xab, 0x41, 0x3c, + 0xec, 0x2f, 0x6c, 0x3a, 0x11, 0x9b, 0xd8, 0x6a, 0x14, 0x8e, 0x1b, 0x66, 0xc8, 0x37, 0x08, 0xfe, + 0x07, 0x69, 0x3e, 0xb5, 0xa7, 0x38, 0xa0, 0xfa, 0xd4, 0x13, 0x1b, 0x0e, 0xc5, 0x06, 0xdf, 0x33, + 0x1a, 0x01, 0xd5, 0x69, 0x28, 0x90, 0xe5, 0x3f, 0x67, 0xa0, 0xda, 0xe4, 0x46, 0xe1, 0x97, 0x91, + 0x0b, 0x2a, 0xfe, 0x2a, 0xc4, 0x01, 0x45, 0xdf, 0x86, 0x82, 0xed, 0x7a, 0x21, 0xd5, 0x42, 0xdf, + 0xae, 0x49, 0xa7, 0xd2, 0x59, 0x41, 0xdd, 0x66, 0x84, 0x17, 0xbe, 0x8d, 0x1e, 0xc1, 0x0e, 0x67, + 0x1a, 0xc4, 0xa5, 0xd8, 0xa5, 0xb5, 0xfc, 0xa9, 0x74, 0x56, 0x52, 0x4b, 0x8c, 0xd8, 0xe2, 0x34, + 0xf4, 0x05, 0x6c, 0x8f, 0xb1, 0x4e, 0x43, 0x1f, 0x07, 0xb5, 0xcc, 0x69, 0xf6, 0x6c, 0xf7, 0xa2, + 0x51, 0x5f, 0x2f, 0x64, 0xf5, 0x36, 0x97, 0x53, 0x67, 0x00, 0xe8, 0x15, 0xec, 0xf0, 0x44, 0x30, + 0x8d, 0x6f, 0x68, 0x2d, 0x7b, 0x2a, 0x9d, 0x15, 0x2f, 0x3e, 0x5e, 0x17, 0x91, 0xf9, 0xd6, 0xe2, + 0xb2, 0x6a, 0xe9, 0x75, 0x62, 0x85, 0x1e, 0x02, 0x90, 0x90, 0xc6, 0xae, 0xe6, 0x98, 0xab, 0x05, + 0x4e, 0x89, 0x7c, 0xfd, 0x00, 0x8a, 0x0e, 0x31, 0x58, 0xb4, 0x35, 0xdb, 0xac, 0x6d, 0x31, 0x3e, + 0xc4, 0xa4, 0x8e, 0x29, 0xff, 0x3b, 0x07, 0xa5, 0x24, 0x3c, 0xea, 0xc3, 0x76, 0x80, 0xad, 0x29, + 0x76, 0x69, 0x50, 0x93, 0x4e, 0xb3, 0x1b, 0x9b, 0x39, 0xe0, 0xc2, 0xea, 0x0c, 0x05, 0xf9, 0x70, + 0xe0, 0xe8, 0x23, 0xec, 0x68, 0x26, 0xa6, 0xd8, 0x60, 0xa6, 0x18, 0xc4, 0x1d, 0xdb, 0x56, 0x2d, + 0xc3, 0xc2, 0xf0, 0xe9, 0xba, 0xf8, 0xd7, 0x11, 0xca, 0x55, 0x0c, 0xd2, 0x62, 0x18, 0x6a, 0xd5, + 0x59, 0x41, 0x45, 0x5f, 0x4b, 0xf0, 0x20, 0x98, 0x10, 0xaa, 0x19, 0x13, 0xdd, 0xb5, 0xf0, 0xb2, + 0x6a, 0x9e, 0x81, 0xe6, 0xba, 0xaa, 0x07, 0x13, 0x42, 0x5b, 0x0c, 0x2a, 0xad, 0xff, 0x28, 0xb8, + 0x8d, 0x85, 0x7e, 0x2b, 0xc1, 0x77, 0xf0, 0x1b, 0xcf, 0xb1, 0x0d, 0x7b, 0x56, 0x6c, 0xcb, 0x96, + 0xe4, 0x98, 0x25, 0xed, 0x75, 0x2d, 0x51, 0x04, 0xa0, 0x28, 0xd4, 0xb4, 0x39, 0x27, 0xf8, 0xad, + 0x7c, 0x44, 0x60, 0x7f, 0xac, 0x1b, 0x2b, 0x02, 0xb2, 0xc5, 0xcc, 0x78, 0xba, 0x76, 0x91, 0xeb, + 0xc6, 0x52, 0x28, 0xf6, 0xc6, 0xcb, 0x44, 0xf9, 0xaf, 0x12, 0x54, 0x57, 0x25, 0x0e, 0x39, 0x50, + 0x4d, 0x97, 0xc5, 0x94, 0x98, 0x98, 0xb5, 0xeb, 0xee, 0xc5, 0x93, 0x77, 0x2b, 0x8a, 0xe7, 0xc4, + 0xc4, 0x2a, 0x72, 0x96, 0x68, 0xe8, 0xfb, 0x70, 0x3f, 0xe0, 0xb3, 0x4b, 0xf7, 0x6f, 0x34, 0x43, + 0x9f, 0x62, 0x5f, 0x67, 0xf5, 0xb7, 0xad, 0x56, 0xe6, 0x8c, 0x16, 0xa3, 0xa3, 0x2a, 0x6c, 0x45, + 0xa6, 0x38, 0xac, 0x4a, 0x0a, 0x2a, 0x5f, 0xc8, 0x8f, 0xe1, 0xe8, 0xd6, 0x32, 0x98, 0x8b, 0x48, + 0x49, 0x91, 0x4f, 0xe0, 0xe4, 0xed, 0xf9, 0xba, 0x45, 0x4e, 0x87, 0xbd, 0x15, 0x01, 0x5e, 0xbd, + 0x19, 0x7d, 0x0c, 0x07, 0xb6, 0x6b, 0x38, 0xa1, 0x89, 0xb5, 0x11, 0x09, 0x5d, 0xd3, 0x76, 0x2d, + 0x6d, 0x44, 0xde, 0xb0, 0xc1, 0x15, 0xf9, 0x57, 0x15, 0xdc, 0x4b, 0xc1, 0xbc, 0x8c, 0x78, 0xf2, + 0xef, 0x25, 0xd1, 0xf8, 0xa2, 0x61, 0x91, 0xc2, 0x22, 0xe4, 0x53, 0x2d, 0x1a, 0xbf, 0x1a, 0x19, + 0x8f, 0x03, 0x4c, 0x99, 0xa2, 0xe2, 0xc5, 0x51, 0x9c, 0x8c, 0x78, 0x44, 0xd7, 0xaf, 0xc4, 0x08, + 0x57, 0xcb, 0x4c, 0x66, 0x68, 0x4f, 0x71, 0x8f, 0x49, 0xa0, 0x26, 0x94, 0xb1, 0x6b, 0x2e, 0x80, + 0x64, 0xee, 0x02, 0xd9, 0xc1, 0xae, 0x39, 0x87, 0x90, 0x7f, 0x0d, 0x25, 0x96, 0xd5, 0xd8, 0xb2, + 0x2e, 0xdc, 0x13, 0xc3, 0x44, 0xd8, 0xf3, 0x6e, 0x13, 0x29, 0x06, 0x41, 0x27, 0x00, 0xac, 0xe8, + 0xcd, 0x68, 0x2f, 0xb3, 0x2e, 0xa3, 0x26, 0x28, 0xf2, 0x04, 0x80, 0xe9, 0x6f, 0xfb, 0xfa, 0x14, + 0xa3, 0x27, 0x50, 0xdc, 0x28, 0x22, 0x40, 0xe7, 0xc1, 0xb8, 0x4b, 0x93, 0x03, 0x79, 0xc5, 0xa5, + 0x36, 0xbd, 0x89, 0x4e, 0x2c, 0xcc, 0xbe, 0xa2, 0x31, 0x2d, 0x4e, 0x2c, 0x4e, 0xe8, 0x98, 0xe8, + 0x14, 0x8a, 0x26, 0x0e, 0x0c, 0xdf, 0xf6, 0x22, 0x0d, 0x0c, 0xa7, 0xa0, 0x26, 0x49, 0xd1, 0x99, + 0xe6, 0xe8, 0xae, 0x15, 0xea, 0x16, 0xd6, 0x8c, 0xa8, 0x8b, 0x78, 0xe5, 0x96, 0x62, 0x62, 0x8b, + 0x98, 0x58, 0xfe, 0x67, 0x06, 0xca, 0xcc, 0xb1, 0xe6, 0xec, 0x20, 0x47, 0x6d, 0xc8, 0x73, 0x35, + 0xc2, 0xb1, 0xfa, 0xda, 0x73, 0x88, 0x49, 0xa9, 0x42, 0x1a, 0xfd, 0x02, 0xee, 0x1b, 0x3a, 0xc5, + 0x16, 0xf1, 0x6f, 0x34, 0x46, 0xb2, 0xc5, 0xc1, 0xb9, 0x39, 0x64, 0x25, 0x06, 0x52, 0x04, 0xce, + 0xc2, 0x99, 0x94, 0xdd, 0xec, 0x4c, 0x4a, 0x16, 0x52, 0xe2, 0x4c, 0xfa, 0x09, 0xe4, 0xc7, 0x51, + 0x76, 0x83, 0x5a, 0x8e, 0xe1, 0x5d, 0x6c, 0x84, 0xc7, 0x0a, 0x43, 0x15, 0x08, 0xf2, 0x9f, 0x24, + 0xa8, 0xa6, 0xba, 0xfc, 0xfd, 0x2b, 0xc7, 0x86, 0x03, 0x8f, 0xf8, 0x2e, 0xb1, 0x7c, 0xdd, 0x9b, + 0xdc, 0x68, 0x8e, 0xfd, 0x25, 0x76, 0xec, 0x09, 0x21, 0x26, 0xcb, 0xfe, 0xee, 0x06, 0x06, 0xcf, + 0x24, 0xd5, 0xfd, 0x04, 0xe2, 0x9c, 0x2c, 0x7f, 0x05, 0x47, 0x29, 0xf3, 0x13, 0xf5, 0x31, 0x9c, + 0x05, 0x8a, 0x5f, 0x06, 0x3e, 0x7d, 0xc7, 0x73, 0x6a, 0x31, 0x64, 0x5f, 0xc2, 0x7e, 0x97, 0xf8, + 0x53, 0xdd, 0xb1, 0x7f, 0x85, 0xcd, 0xc4, 0x5c, 0x42, 0x08, 0x72, 0x0e, 0x1e, 0xf3, 0x58, 0x65, + 0x54, 0xf6, 0x8d, 0x2a, 0x90, 0xa5, 0xc4, 0x13, 0xdd, 0x13, 0x7d, 0x46, 0x73, 0xd0, 0xb7, 0xad, + 0x09, 0xbf, 0x47, 0x65, 0x54, 0xbe, 0x40, 0x07, 0x90, 0x1f, 0x11, 0x4a, 0xc9, 0x94, 0x1d, 0xa9, + 0x19, 0x55, 0xac, 0xe4, 0x5f, 0x42, 0x31, 0x1a, 0xa6, 0xff, 0xa7, 0x69, 0x22, 0xff, 0x45, 0x82, + 0x42, 0x84, 0xcf, 0x73, 0x7e, 0x03, 0x47, 0xee, 0xcc, 0xb3, 0xf4, 0x3c, 0xe6, 0x21, 0xfc, 0xd1, + 0xba, 0xfa, 0x56, 0x86, 0x48, 0x3d, 0x74, 0x57, 0x91, 0x71, 0x90, 0x2e, 0xb7, 0xcc, 0x06, 0xe5, + 0x26, 0xff, 0x4d, 0x82, 0xdd, 0xc8, 0x89, 0x44, 0xe6, 0x1f, 0x40, 0x81, 0x4e, 0xc2, 0xe9, 0xc8, + 0xd5, 0x6d, 0x7e, 0xe0, 0x94, 0xd4, 0x39, 0x01, 0xf5, 0x12, 0x2d, 0xc9, 0xdb, 0xfc, 0xa3, 0x4d, + 0xae, 0x0e, 0xcb, 0x1d, 0xd9, 0x99, 0x15, 0x1a, 0xef, 0xf0, 0xc7, 0x9b, 0xc0, 0x2d, 0x56, 0xd7, + 0x7f, 0xb7, 0xe0, 0x80, 0xe5, 0x6a, 0xee, 0x8d, 0x8a, 0x83, 0xd0, 0xa1, 0xc1, 0xdb, 0x1f, 0x06, + 0x01, 0x1c, 0x09, 0x73, 0x34, 0x7e, 0x33, 0x49, 0x3c, 0x78, 0x84, 0x93, 0x3f, 0xd8, 0x68, 0x4e, + 0x24, 0xf4, 0x1f, 0x0a, 0xe4, 0x14, 0x3d, 0x40, 0x53, 0x38, 0x60, 0x17, 0xd5, 0x65, 0x8d, 0xd9, + 0xf7, 0xd3, 0x58, 0x8d, 0x60, 0x97, 0xd4, 0x11, 0x38, 0x64, 0x51, 0x5a, 0xa1, 0x2f, 0xf7, 0x7e, + 0xfa, 0xf6, 0x19, 0xee, 0x92, 0x42, 0x1d, 0x2a, 0xec, 0xc2, 0x99, 0xd4, 0xb4, 0xc5, 0x34, 0x7d, + 0xb2, 0x49, 0x86, 0x13, 0x8a, 0xca, 0xe3, 0x85, 0x75, 0x80, 0x34, 0xa8, 0xb0, 0x10, 0x26, 0x55, + 0xe4, 0xdf, 0xe3, 0xe9, 0x52, 0x8e, 0xd0, 0x92, 0x0a, 0x7c, 0xd8, 0x9b, 0xdd, 0xe3, 0xe7, 0x4a, + 0x6a, 0xf7, 0x36, 0x7b, 0x43, 0xdc, 0x3a, 0x64, 0x55, 0x14, 0xa3, 0x27, 0xda, 0xef, 0x0c, 0xb6, + 0xb0, 0xef, 0x13, 0xbf, 0x56, 0x60, 0x5a, 0x50, 0xac, 0xc5, 0xf7, 0x8c, 0xfa, 0x80, 0x3d, 0x82, + 0x55, 0xbe, 0x41, 0xfe, 0x46, 0x82, 0xfd, 0xd4, 0x2b, 0x38, 0xf0, 0x88, 0x1b, 0x60, 0x34, 0x05, + 0x34, 0x37, 0x57, 0xf3, 0x79, 0x0f, 0x88, 0x29, 0xf4, 0xe3, 0x8d, 0x42, 0xb3, 0xd4, 0x49, 0xea, + 0x7d, 0x3d, 0x4d, 0x92, 0xff, 0x25, 0xc1, 0x61, 0x6a, 0x77, 0xdf, 0x27, 0x96, 0x8f, 0x83, 0x3b, + 0x1a, 0xef, 0x7b, 0x50, 0xf1, 0xc4, 0x46, 0xcd, 0xc3, 0xbe, 0x11, 0xcd, 0xe6, 0x68, 0x7c, 0x6d, + 0xa9, 0xe5, 0x98, 0xde, 0xe7, 0x64, 0xf4, 0x43, 0x80, 0xf9, 0x2d, 0x55, 0xbc, 0xe2, 0x8e, 0x97, + 0x66, 0xdc, 0x30, 0xfe, 0x83, 0xa0, 0x16, 0x66, 0xf7, 0x53, 0xf4, 0x14, 0x8a, 0xa1, 0x67, 0xea, + 0x14, 0x73, 0xd9, 0xdc, 0x9d, 0xb2, 0xc0, 0xb7, 0x47, 0x04, 0xf9, 0x37, 0xe9, 0x20, 0xcf, 0x3c, + 0xf3, 0x60, 0x2f, 0x11, 0xe4, 0xd8, 0x5e, 0x11, 0xe5, 0xcf, 0xde, 0x31, 0xca, 0x31, 0xba, 0x9a, + 0x48, 0x60, 0x4c, 0x3b, 0xff, 0x46, 0x82, 0x7b, 0xe2, 0x27, 0x03, 0x3a, 0x84, 0xbd, 0xb6, 0xd2, + 0x1c, 0xbe, 0x50, 0x15, 0xed, 0x45, 0x77, 0xd0, 0x57, 0x5a, 0x9d, 0x76, 0x47, 0xb9, 0xaa, 0x7c, + 0x0b, 0xed, 0x41, 0xf9, 0xba, 0x79, 0xa9, 0x5c, 0x6b, 0x57, 0xca, 0x50, 0x69, 0x0d, 0x3b, 0xbd, + 0x6e, 0x45, 0x42, 0x47, 0xb0, 0x3f, 0x78, 0xd6, 0x1b, 0x6a, 0xad, 0x67, 0xcd, 0xee, 0xe7, 0x4a, + 0x82, 0x95, 0x41, 0x27, 0x70, 0xac, 0xfc, 0xac, 0x7f, 0xdd, 0x69, 0x75, 0x86, 0x5a, 0xab, 0xd7, + 0x1d, 0x2a, 0xdd, 0x61, 0x82, 0x9f, 0x45, 0x08, 0x76, 0xdb, 0xcd, 0x56, 0x52, 0x26, 0x77, 0xee, + 0x03, 0x5a, 0x7e, 0x7e, 0xa1, 0xef, 0xc2, 0x69, 0x4a, 0xb3, 0xf6, 0xbc, 0x77, 0x95, 0xb6, 0x6f, + 0x07, 0x0a, 0xcc, 0x94, 0x88, 0x55, 0x91, 0xd0, 0x2e, 0x40, 0x5b, 0x6d, 0x3e, 0x57, 0xf8, 0x3a, + 0x13, 0xf9, 0xc5, 0xd8, 0xcd, 0xee, 0x95, 0x96, 0x60, 0x64, 0xcf, 0x29, 0xc0, 0xfc, 0xee, 0x82, + 0x8e, 0xe1, 0xe0, 0xba, 0xf3, 0x85, 0x72, 0xdd, 0x79, 0xd6, 0xeb, 0x5d, 0xa5, 0x34, 0xdc, 0x87, + 0x9d, 0x97, 0x8a, 0xfa, 0x4a, 0x7b, 0xd1, 0x65, 0x5b, 0x5e, 0x55, 0x24, 0x54, 0x82, 0xed, 0xd9, + 0x2a, 0x13, 0xad, 0xfa, 0xbd, 0xc1, 0xa0, 0x73, 0x79, 0xad, 0x54, 0xb2, 0x08, 0x20, 0x2f, 0x38, + 0x39, 0x54, 0x86, 0x22, 0x13, 0x15, 0x84, 0xad, 0x8b, 0x3f, 0x4a, 0x50, 0x63, 0x29, 0xea, 0x24, + 0x92, 0x37, 0xc0, 0xfe, 0x6b, 0xdb, 0xc0, 0xd1, 0x3b, 0x7f, 0x67, 0xa1, 0x36, 0xd0, 0xda, 0xb7, + 0xa4, 0x55, 0x7f, 0xaf, 0x8e, 0x1f, 0xc6, 0xd2, 0x89, 0xdf, 0x6a, 0xf5, 0x5e, 0xfc, 0x5b, 0x4d, + 0x7e, 0xf4, 0xf5, 0x3f, 0xfe, 0xf3, 0xbb, 0xcc, 0x43, 0xb9, 0xb6, 0xf8, 0x97, 0x2f, 0x78, 0x22, + 0x4a, 0x05, 0x3f, 0x91, 0xce, 0x2f, 0xff, 0x2e, 0xc1, 0xb9, 0x41, 0xa6, 0x6b, 0xda, 0x71, 0xf9, + 0xf0, 0x36, 0xe7, 0xfa, 0x51, 0x5b, 0xf4, 0xa5, 0x9f, 0xff, 0x54, 0x00, 0x59, 0x24, 0x7a, 0x52, + 0xd4, 0x89, 0x6f, 0x35, 0x2c, 0xec, 0xb2, 0xa6, 0x69, 0x70, 0x96, 0xee, 0xd9, 0xc1, 0x5d, 0xff, + 0x23, 0x9f, 0x2e, 0x71, 0xfe, 0x90, 0xf9, 0xf0, 0x73, 0x8e, 0xdc, 0x62, 0x26, 0x2e, 0xd9, 0x51, + 0x7f, 0xc9, 0x45, 0x47, 0x79, 0xa6, 0xec, 0xa3, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x2f, + 0xdf, 0xfc, 0xfb, 0x14, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go new file mode 100644 index 000000000..ef01b7cbb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/geometry.pb.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1/geometry.proto + +/* +Package vision is a generated protocol buffer package. + +It is generated from these files: + google/cloud/vision/v1/geometry.proto + google/cloud/vision/v1/image_annotator.proto + google/cloud/vision/v1/text_annotation.proto + google/cloud/vision/v1/web_detection.proto + +It has these top-level messages: + Vertex + BoundingPoly + Position + Feature + ImageSource + Image + FaceAnnotation + LocationInfo + Property + EntityAnnotation + SafeSearchAnnotation + LatLongRect + ColorInfo + DominantColorsAnnotation + ImageProperties + CropHint + CropHintsAnnotation + CropHintsParams + ImageContext + AnnotateImageRequest + AnnotateImageResponse + BatchAnnotateImagesRequest + BatchAnnotateImagesResponse + TextAnnotation + Page + Block + Paragraph + Word + Symbol + WebDetection +*/ +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A vertex represents a 2D point in the image. +// NOTE: the vertex coordinates are in the same scale as the original image. +type Vertex struct { + // X coordinate. + X int32 `protobuf:"varint,1,opt,name=x" json:"x,omitempty"` + // Y coordinate. + Y int32 `protobuf:"varint,2,opt,name=y" json:"y,omitempty"` +} + +func (m *Vertex) Reset() { *m = Vertex{} } +func (m *Vertex) String() string { return proto.CompactTextString(m) } +func (*Vertex) ProtoMessage() {} +func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Vertex) GetX() int32 { + if m != nil { + return m.X + } + return 0 +} + +func (m *Vertex) GetY() int32 { + if m != nil { + return m.Y + } + return 0 +} + +// A bounding polygon for the detected image annotation. +type BoundingPoly struct { + // The bounding polygon vertices. + Vertices []*Vertex `protobuf:"bytes,1,rep,name=vertices" json:"vertices,omitempty"` +} + +func (m *BoundingPoly) Reset() { *m = BoundingPoly{} } +func (m *BoundingPoly) String() string { return proto.CompactTextString(m) } +func (*BoundingPoly) ProtoMessage() {} +func (*BoundingPoly) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *BoundingPoly) GetVertices() []*Vertex { + if m != nil { + return m.Vertices + } + return nil +} + +// A 3D position in the image, used primarily for Face detection landmarks. +// A valid Position must have both x and y coordinates. +// The position coordinates are in the same scale as the original image. +type Position struct { + // X coordinate. + X float32 `protobuf:"fixed32,1,opt,name=x" json:"x,omitempty"` + // Y coordinate. + Y float32 `protobuf:"fixed32,2,opt,name=y" json:"y,omitempty"` + // Z coordinate (or depth). + Z float32 `protobuf:"fixed32,3,opt,name=z" json:"z,omitempty"` +} + +func (m *Position) Reset() { *m = Position{} } +func (m *Position) String() string { return proto.CompactTextString(m) } +func (*Position) ProtoMessage() {} +func (*Position) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Position) GetX() float32 { + if m != nil { + return m.X + } + return 0 +} + +func (m *Position) GetY() float32 { + if m != nil { + return m.Y + } + return 0 +} + +func (m *Position) GetZ() float32 { + if m != nil { + return m.Z + } + return 0 +} + +func init() { + proto.RegisterType((*Vertex)(nil), "google.cloud.vision.v1.Vertex") + proto.RegisterType((*BoundingPoly)(nil), "google.cloud.vision.v1.BoundingPoly") + proto.RegisterType((*Position)(nil), "google.cloud.vision.v1.Position") +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/geometry.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x31, 0x4b, 0x03, 0x31, + 0x14, 0x80, 0x79, 0x57, 0x2c, 0x25, 0xd6, 0xe5, 0x06, 0x39, 0x1c, 0xa4, 0x1c, 0x0a, 0x9d, 0x12, + 0xaa, 0x4e, 0xea, 0x74, 0x8b, 0xe0, 0x74, 0xdc, 0xe0, 0xe0, 0x56, 0xaf, 0x8f, 0x47, 0xe0, 0x9a, + 0x57, 0x92, 0x34, 0x34, 0xfd, 0xe5, 0x8e, 0xd2, 0xa4, 0x28, 0x8a, 0xdd, 0xf2, 0x91, 0x8f, 0xf7, + 0xf1, 0x9e, 0xb8, 0x25, 0x66, 0x1a, 0x50, 0xf5, 0x03, 0x6f, 0x57, 0x2a, 0x68, 0xa7, 0xd9, 0xa8, + 0xb0, 0x50, 0x84, 0xbc, 0x46, 0x6f, 0xa3, 0xdc, 0x58, 0xf6, 0x5c, 0x5e, 0x66, 0x4d, 0x26, 0x4d, + 0x66, 0x4d, 0x86, 0x45, 0x7d, 0x23, 0xc6, 0x6f, 0x68, 0x3d, 0xee, 0xca, 0xa9, 0x80, 0x5d, 0x05, + 0x33, 0x98, 0x9f, 0x75, 0x90, 0x28, 0x56, 0x45, 0xa6, 0x58, 0xbf, 0x8a, 0x69, 0xc3, 0x5b, 0xb3, + 0xd2, 0x86, 0x5a, 0x1e, 0x62, 0xf9, 0x28, 0x26, 0x01, 0xad, 0xd7, 0x3d, 0xba, 0x0a, 0x66, 0xa3, + 0xf9, 0xf9, 0xdd, 0xb5, 0xfc, 0x3f, 0x20, 0xf3, 0xf4, 0xee, 0xdb, 0xaf, 0x1f, 0xc4, 0xa4, 0x65, + 0xa7, 0xbd, 0x66, 0xf3, 0xd3, 0x2c, 0x7e, 0x35, 0x8b, 0x0e, 0xe2, 0x81, 0xf6, 0xd5, 0x28, 0xd3, + 0xbe, 0x31, 0xe2, 0xaa, 0xe7, 0xf5, 0x89, 0x48, 0x73, 0xf1, 0x72, 0xdc, 0xb6, 0x3d, 0x2c, 0xdb, + 0xc2, 0xfb, 0xf3, 0x51, 0x24, 0x1e, 0x96, 0x86, 0x24, 0x5b, 0x52, 0x84, 0x26, 0x9d, 0x42, 0xe5, + 0xaf, 0xe5, 0x46, 0xbb, 0xbf, 0x47, 0x7b, 0xca, 0xaf, 0x4f, 0x80, 0x8f, 0x71, 0x72, 0xef, 0xbf, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x04, 0x38, 0x95, 0x5f, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go new file mode 100644 index 000000000..d5ed6d63d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/image_annotator.pb.go @@ -0,0 +1,1438 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1/image_annotator.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" +import google_type "google.golang.org/genproto/googleapis/type/color" +import google_type1 "google.golang.org/genproto/googleapis/type/latlng" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A bucketized representation of likelihood, which is intended to give clients +// highly stable results across model upgrades. +type Likelihood int32 + +const ( + // Unknown likelihood. + Likelihood_UNKNOWN Likelihood = 0 + // It is very unlikely that the image belongs to the specified vertical. + Likelihood_VERY_UNLIKELY Likelihood = 1 + // It is unlikely that the image belongs to the specified vertical. + Likelihood_UNLIKELY Likelihood = 2 + // It is possible that the image belongs to the specified vertical. + Likelihood_POSSIBLE Likelihood = 3 + // It is likely that the image belongs to the specified vertical. + Likelihood_LIKELY Likelihood = 4 + // It is very likely that the image belongs to the specified vertical. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "UNKNOWN": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Type of image feature. +type Feature_Type int32 + +const ( + // Unspecified feature type. + Feature_TYPE_UNSPECIFIED Feature_Type = 0 + // Run face detection. + Feature_FACE_DETECTION Feature_Type = 1 + // Run landmark detection. + Feature_LANDMARK_DETECTION Feature_Type = 2 + // Run logo detection. + Feature_LOGO_DETECTION Feature_Type = 3 + // Run label detection. + Feature_LABEL_DETECTION Feature_Type = 4 + // Run OCR. + Feature_TEXT_DETECTION Feature_Type = 5 + // Run dense text document OCR. Takes precedence when both + // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + Feature_DOCUMENT_TEXT_DETECTION Feature_Type = 11 + // Run computer vision models to compute image safe-search properties. + Feature_SAFE_SEARCH_DETECTION Feature_Type = 6 + // Compute a set of image properties, such as the image's dominant colors. + Feature_IMAGE_PROPERTIES Feature_Type = 7 + // Run crop hints. + Feature_CROP_HINTS Feature_Type = 9 + // Run web detection. + Feature_WEB_DETECTION Feature_Type = 10 +) + +var Feature_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "FACE_DETECTION", + 2: "LANDMARK_DETECTION", + 3: "LOGO_DETECTION", + 4: "LABEL_DETECTION", + 5: "TEXT_DETECTION", + 11: "DOCUMENT_TEXT_DETECTION", + 6: "SAFE_SEARCH_DETECTION", + 7: "IMAGE_PROPERTIES", + 9: "CROP_HINTS", + 10: "WEB_DETECTION", +} +var Feature_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "FACE_DETECTION": 1, + "LANDMARK_DETECTION": 2, + "LOGO_DETECTION": 3, + "LABEL_DETECTION": 4, + "TEXT_DETECTION": 5, + "DOCUMENT_TEXT_DETECTION": 11, + "SAFE_SEARCH_DETECTION": 6, + "IMAGE_PROPERTIES": 7, + "CROP_HINTS": 9, + "WEB_DETECTION": 10, +} + +func (x Feature_Type) String() string { + return proto.EnumName(Feature_Type_name, int32(x)) +} +func (Feature_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// Face landmark (feature) type. +// Left and right are defined from the vantage of the viewer of the image +// without considering mirror projections typical of photos. So, `LEFT_EYE`, +// typically, is the person's right eye. +type FaceAnnotation_Landmark_Type int32 + +const ( + // Unknown face landmark detected. Should not be filled. + FaceAnnotation_Landmark_UNKNOWN_LANDMARK FaceAnnotation_Landmark_Type = 0 + // Left eye. + FaceAnnotation_Landmark_LEFT_EYE FaceAnnotation_Landmark_Type = 1 + // Right eye. + FaceAnnotation_Landmark_RIGHT_EYE FaceAnnotation_Landmark_Type = 2 + // Left of left eyebrow. + FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 3 + // Right of left eyebrow. + FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW FaceAnnotation_Landmark_Type = 4 + // Left of right eyebrow. + FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 5 + // Right of right eyebrow. + FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW FaceAnnotation_Landmark_Type = 6 + // Midpoint between eyes. + FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES FaceAnnotation_Landmark_Type = 7 + // Nose tip. + FaceAnnotation_Landmark_NOSE_TIP FaceAnnotation_Landmark_Type = 8 + // Upper lip. + FaceAnnotation_Landmark_UPPER_LIP FaceAnnotation_Landmark_Type = 9 + // Lower lip. + FaceAnnotation_Landmark_LOWER_LIP FaceAnnotation_Landmark_Type = 10 + // Mouth left. + FaceAnnotation_Landmark_MOUTH_LEFT FaceAnnotation_Landmark_Type = 11 + // Mouth right. + FaceAnnotation_Landmark_MOUTH_RIGHT FaceAnnotation_Landmark_Type = 12 + // Mouth center. + FaceAnnotation_Landmark_MOUTH_CENTER FaceAnnotation_Landmark_Type = 13 + // Nose, bottom right. + FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT FaceAnnotation_Landmark_Type = 14 + // Nose, bottom left. + FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT FaceAnnotation_Landmark_Type = 15 + // Nose, bottom center. + FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER FaceAnnotation_Landmark_Type = 16 + // Left eye, top boundary. + FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 17 + // Left eye, right corner. + FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 18 + // Left eye, bottom boundary. + FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 19 + // Left eye, left corner. + FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 20 + // Right eye, top boundary. + FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY FaceAnnotation_Landmark_Type = 21 + // Right eye, right corner. + FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER FaceAnnotation_Landmark_Type = 22 + // Right eye, bottom boundary. + FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY FaceAnnotation_Landmark_Type = 23 + // Right eye, left corner. + FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER FaceAnnotation_Landmark_Type = 24 + // Left eyebrow, upper midpoint. + FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 25 + // Right eyebrow, upper midpoint. + FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT FaceAnnotation_Landmark_Type = 26 + // Left ear tragion. + FaceAnnotation_Landmark_LEFT_EAR_TRAGION FaceAnnotation_Landmark_Type = 27 + // Right ear tragion. + FaceAnnotation_Landmark_RIGHT_EAR_TRAGION FaceAnnotation_Landmark_Type = 28 + // Left eye pupil. + FaceAnnotation_Landmark_LEFT_EYE_PUPIL FaceAnnotation_Landmark_Type = 29 + // Right eye pupil. + FaceAnnotation_Landmark_RIGHT_EYE_PUPIL FaceAnnotation_Landmark_Type = 30 + // Forehead glabella. + FaceAnnotation_Landmark_FOREHEAD_GLABELLA FaceAnnotation_Landmark_Type = 31 + // Chin gnathion. + FaceAnnotation_Landmark_CHIN_GNATHION FaceAnnotation_Landmark_Type = 32 + // Chin left gonion. + FaceAnnotation_Landmark_CHIN_LEFT_GONION FaceAnnotation_Landmark_Type = 33 + // Chin right gonion. + FaceAnnotation_Landmark_CHIN_RIGHT_GONION FaceAnnotation_Landmark_Type = 34 +) + +var FaceAnnotation_Landmark_Type_name = map[int32]string{ + 0: "UNKNOWN_LANDMARK", + 1: "LEFT_EYE", + 2: "RIGHT_EYE", + 3: "LEFT_OF_LEFT_EYEBROW", + 4: "RIGHT_OF_LEFT_EYEBROW", + 5: "LEFT_OF_RIGHT_EYEBROW", + 6: "RIGHT_OF_RIGHT_EYEBROW", + 7: "MIDPOINT_BETWEEN_EYES", + 8: "NOSE_TIP", + 9: "UPPER_LIP", + 10: "LOWER_LIP", + 11: "MOUTH_LEFT", + 12: "MOUTH_RIGHT", + 13: "MOUTH_CENTER", + 14: "NOSE_BOTTOM_RIGHT", + 15: "NOSE_BOTTOM_LEFT", + 16: "NOSE_BOTTOM_CENTER", + 17: "LEFT_EYE_TOP_BOUNDARY", + 18: "LEFT_EYE_RIGHT_CORNER", + 19: "LEFT_EYE_BOTTOM_BOUNDARY", + 20: "LEFT_EYE_LEFT_CORNER", + 21: "RIGHT_EYE_TOP_BOUNDARY", + 22: "RIGHT_EYE_RIGHT_CORNER", + 23: "RIGHT_EYE_BOTTOM_BOUNDARY", + 24: "RIGHT_EYE_LEFT_CORNER", + 25: "LEFT_EYEBROW_UPPER_MIDPOINT", + 26: "RIGHT_EYEBROW_UPPER_MIDPOINT", + 27: "LEFT_EAR_TRAGION", + 28: "RIGHT_EAR_TRAGION", + 29: "LEFT_EYE_PUPIL", + 30: "RIGHT_EYE_PUPIL", + 31: "FOREHEAD_GLABELLA", + 32: "CHIN_GNATHION", + 33: "CHIN_LEFT_GONION", + 34: "CHIN_RIGHT_GONION", +} +var FaceAnnotation_Landmark_Type_value = map[string]int32{ + "UNKNOWN_LANDMARK": 0, + "LEFT_EYE": 1, + "RIGHT_EYE": 2, + "LEFT_OF_LEFT_EYEBROW": 3, + "RIGHT_OF_LEFT_EYEBROW": 4, + "LEFT_OF_RIGHT_EYEBROW": 5, + "RIGHT_OF_RIGHT_EYEBROW": 6, + "MIDPOINT_BETWEEN_EYES": 7, + "NOSE_TIP": 8, + "UPPER_LIP": 9, + "LOWER_LIP": 10, + "MOUTH_LEFT": 11, + "MOUTH_RIGHT": 12, + "MOUTH_CENTER": 13, + "NOSE_BOTTOM_RIGHT": 14, + "NOSE_BOTTOM_LEFT": 15, + "NOSE_BOTTOM_CENTER": 16, + "LEFT_EYE_TOP_BOUNDARY": 17, + "LEFT_EYE_RIGHT_CORNER": 18, + "LEFT_EYE_BOTTOM_BOUNDARY": 19, + "LEFT_EYE_LEFT_CORNER": 20, + "RIGHT_EYE_TOP_BOUNDARY": 21, + "RIGHT_EYE_RIGHT_CORNER": 22, + "RIGHT_EYE_BOTTOM_BOUNDARY": 23, + "RIGHT_EYE_LEFT_CORNER": 24, + "LEFT_EYEBROW_UPPER_MIDPOINT": 25, + "RIGHT_EYEBROW_UPPER_MIDPOINT": 26, + "LEFT_EAR_TRAGION": 27, + "RIGHT_EAR_TRAGION": 28, + "LEFT_EYE_PUPIL": 29, + "RIGHT_EYE_PUPIL": 30, + "FOREHEAD_GLABELLA": 31, + "CHIN_GNATHION": 32, + "CHIN_LEFT_GONION": 33, + "CHIN_RIGHT_GONION": 34, +} + +func (x FaceAnnotation_Landmark_Type) String() string { + return proto.EnumName(FaceAnnotation_Landmark_Type_name, int32(x)) +} +func (FaceAnnotation_Landmark_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor1, []int{3, 0, 0} +} + +// Users describe the type of Google Cloud Vision API tasks to perform over +// images by using *Feature*s. Each Feature indicates a type of image +// detection task to perform. Features encode the Cloud Vision API +// vertical to operate on and the number of top-scoring results to return. +type Feature struct { + // The feature type. + Type Feature_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1.Feature_Type" json:"type,omitempty"` + // Maximum number of results of this type. + MaxResults int32 `protobuf:"varint,2,opt,name=max_results,json=maxResults" json:"max_results,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Feature) GetType() Feature_Type { + if m != nil { + return m.Type + } + return Feature_TYPE_UNSPECIFIED +} + +func (m *Feature) GetMaxResults() int32 { + if m != nil { + return m.MaxResults + } + return 0 +} + +// External image source (Google Cloud Storage image location). +type ImageSource struct { + // NOTE: For new code `image_uri` below is preferred. + // Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + GcsImageUri string `protobuf:"bytes,1,opt,name=gcs_image_uri,json=gcsImageUri" json:"gcs_image_uri,omitempty"` + // Image URI which supports: + // 1) Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + // 2) Publicly accessible image HTTP/HTTPS URL. + // This is preferred over the legacy `gcs_image_uri` above. When both + // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + // precedence. + ImageUri string `protobuf:"bytes,2,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` +} + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (m *ImageSource) String() string { return proto.CompactTextString(m) } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *ImageSource) GetGcsImageUri() string { + if m != nil { + return m.GcsImageUri + } + return "" +} + +func (m *ImageSource) GetImageUri() string { + if m != nil { + return m.ImageUri + } + return "" +} + +// Client image to perform Google Cloud Vision API tasks over. +type Image struct { + // Image content, represented as a stream of bytes. + // Note: as with all `bytes` fields, protobuffers use a pure binary + // representation, whereas JSON representations use base64. + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // Google Cloud Storage image location. If both `content` and `source` + // are provided for an image, `content` takes precedence and is + // used to perform the image annotation request. + Source *ImageSource `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (m *Image) String() string { return proto.CompactTextString(m) } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Image) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *Image) GetSource() *ImageSource { + if m != nil { + return m.Source + } + return nil +} + +// A face annotation object contains the results of face detection. +type FaceAnnotation struct { + // The bounding polygon around the face. The coordinates of the bounding box + // are in the original image's scale, as returned in `ImageParams`. + // The bounding box is computed to "frame" the face in accordance with human + // expectations. It is based on the landmarker results. + // Note that one or more x and/or y coordinates may not be generated in the + // `BoundingPoly` (the polygon will be unbounded) if only a partial face + // appears in the image to be annotated. + BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // The `fd_bounding_poly` bounding polygon is tighter than the + // `boundingPoly`, and encloses only the skin part of the face. Typically, it + // is used to eliminate the face from any image analysis that detects the + // "amount of skin" visible in an image. It is not based on the + // landmarker results, only on the initial face detection, hence + // the fd (face detection) prefix. + FdBoundingPoly *BoundingPoly `protobuf:"bytes,2,opt,name=fd_bounding_poly,json=fdBoundingPoly" json:"fd_bounding_poly,omitempty"` + // Detected face landmarks. + Landmarks []*FaceAnnotation_Landmark `protobuf:"bytes,3,rep,name=landmarks" json:"landmarks,omitempty"` + // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + // of the face relative to the image vertical about the axis perpendicular to + // the face. Range [-180,180]. + RollAngle float32 `protobuf:"fixed32,4,opt,name=roll_angle,json=rollAngle" json:"roll_angle,omitempty"` + // Yaw angle, which indicates the leftward/rightward angle that the face is + // pointing relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + PanAngle float32 `protobuf:"fixed32,5,opt,name=pan_angle,json=panAngle" json:"pan_angle,omitempty"` + // Pitch angle, which indicates the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + TiltAngle float32 `protobuf:"fixed32,6,opt,name=tilt_angle,json=tiltAngle" json:"tilt_angle,omitempty"` + // Detection confidence. Range [0, 1]. + DetectionConfidence float32 `protobuf:"fixed32,7,opt,name=detection_confidence,json=detectionConfidence" json:"detection_confidence,omitempty"` + // Face landmarking confidence. Range [0, 1]. + LandmarkingConfidence float32 `protobuf:"fixed32,8,opt,name=landmarking_confidence,json=landmarkingConfidence" json:"landmarking_confidence,omitempty"` + // Joy likelihood. + JoyLikelihood Likelihood `protobuf:"varint,9,opt,name=joy_likelihood,json=joyLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"joy_likelihood,omitempty"` + // Sorrow likelihood. + SorrowLikelihood Likelihood `protobuf:"varint,10,opt,name=sorrow_likelihood,json=sorrowLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"sorrow_likelihood,omitempty"` + // Anger likelihood. + AngerLikelihood Likelihood `protobuf:"varint,11,opt,name=anger_likelihood,json=angerLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"anger_likelihood,omitempty"` + // Surprise likelihood. + SurpriseLikelihood Likelihood `protobuf:"varint,12,opt,name=surprise_likelihood,json=surpriseLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"surprise_likelihood,omitempty"` + // Under-exposed likelihood. + UnderExposedLikelihood Likelihood `protobuf:"varint,13,opt,name=under_exposed_likelihood,json=underExposedLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"under_exposed_likelihood,omitempty"` + // Blurred likelihood. + BlurredLikelihood Likelihood `protobuf:"varint,14,opt,name=blurred_likelihood,json=blurredLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"blurred_likelihood,omitempty"` + // Headwear likelihood. + HeadwearLikelihood Likelihood `protobuf:"varint,15,opt,name=headwear_likelihood,json=headwearLikelihood,enum=google.cloud.vision.v1.Likelihood" json:"headwear_likelihood,omitempty"` +} + +func (m *FaceAnnotation) Reset() { *m = FaceAnnotation{} } +func (m *FaceAnnotation) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation) ProtoMessage() {} +func (*FaceAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *FaceAnnotation) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *FaceAnnotation) GetFdBoundingPoly() *BoundingPoly { + if m != nil { + return m.FdBoundingPoly + } + return nil +} + +func (m *FaceAnnotation) GetLandmarks() []*FaceAnnotation_Landmark { + if m != nil { + return m.Landmarks + } + return nil +} + +func (m *FaceAnnotation) GetRollAngle() float32 { + if m != nil { + return m.RollAngle + } + return 0 +} + +func (m *FaceAnnotation) GetPanAngle() float32 { + if m != nil { + return m.PanAngle + } + return 0 +} + +func (m *FaceAnnotation) GetTiltAngle() float32 { + if m != nil { + return m.TiltAngle + } + return 0 +} + +func (m *FaceAnnotation) GetDetectionConfidence() float32 { + if m != nil { + return m.DetectionConfidence + } + return 0 +} + +func (m *FaceAnnotation) GetLandmarkingConfidence() float32 { + if m != nil { + return m.LandmarkingConfidence + } + return 0 +} + +func (m *FaceAnnotation) GetJoyLikelihood() Likelihood { + if m != nil { + return m.JoyLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetSorrowLikelihood() Likelihood { + if m != nil { + return m.SorrowLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetAngerLikelihood() Likelihood { + if m != nil { + return m.AngerLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetSurpriseLikelihood() Likelihood { + if m != nil { + return m.SurpriseLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetUnderExposedLikelihood() Likelihood { + if m != nil { + return m.UnderExposedLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetBlurredLikelihood() Likelihood { + if m != nil { + return m.BlurredLikelihood + } + return Likelihood_UNKNOWN +} + +func (m *FaceAnnotation) GetHeadwearLikelihood() Likelihood { + if m != nil { + return m.HeadwearLikelihood + } + return Likelihood_UNKNOWN +} + +// A face-specific landmark (for example, a face feature). +// Landmark positions may fall outside the bounds of the image +// if the face is near one or more edges of the image. +// Therefore it is NOT guaranteed that `0 <= x < width` or +// `0 <= y < height`. +type FaceAnnotation_Landmark struct { + // Face landmark type. + Type FaceAnnotation_Landmark_Type `protobuf:"varint,3,opt,name=type,enum=google.cloud.vision.v1.FaceAnnotation_Landmark_Type" json:"type,omitempty"` + // Face landmark position. + Position *Position `protobuf:"bytes,4,opt,name=position" json:"position,omitempty"` +} + +func (m *FaceAnnotation_Landmark) Reset() { *m = FaceAnnotation_Landmark{} } +func (m *FaceAnnotation_Landmark) String() string { return proto.CompactTextString(m) } +func (*FaceAnnotation_Landmark) ProtoMessage() {} +func (*FaceAnnotation_Landmark) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } + +func (m *FaceAnnotation_Landmark) GetType() FaceAnnotation_Landmark_Type { + if m != nil { + return m.Type + } + return FaceAnnotation_Landmark_UNKNOWN_LANDMARK +} + +func (m *FaceAnnotation_Landmark) GetPosition() *Position { + if m != nil { + return m.Position + } + return nil +} + +// Detected entity location information. +type LocationInfo struct { + // lat/long location coordinates. + LatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=lat_lng,json=latLng" json:"lat_lng,omitempty"` +} + +func (m *LocationInfo) Reset() { *m = LocationInfo{} } +func (m *LocationInfo) String() string { return proto.CompactTextString(m) } +func (*LocationInfo) ProtoMessage() {} +func (*LocationInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *LocationInfo) GetLatLng() *google_type1.LatLng { + if m != nil { + return m.LatLng + } + return nil +} + +// A `Property` consists of a user-supplied name/value pair. +type Property struct { + // Name of the property. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Value of the property. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} +func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *Property) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Property) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Set of detected entity features. +type EntityAnnotation struct { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). + Mid string `protobuf:"bytes,1,opt,name=mid" json:"mid,omitempty"` + // The language code for the locale in which the entity textual + // `description` is expressed. + Locale string `protobuf:"bytes,2,opt,name=locale" json:"locale,omitempty"` + // Entity textual description, expressed in its `locale` language. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // Overall score of the result. Range [0, 1]. + Score float32 `protobuf:"fixed32,4,opt,name=score" json:"score,omitempty"` + // The accuracy of the entity detection in an image. + // For example, for an image in which the "Eiffel Tower" entity is detected, + // this field represents the confidence that there is a tower in the query + // image. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,5,opt,name=confidence" json:"confidence,omitempty"` + // The relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of "tower" is likely higher to an image + // containing the detected "Eiffel Tower" than to an image containing a + // detected distant towering building, even though the confidence that + // there is a tower in each image may be the same. Range [0, 1]. + Topicality float32 `protobuf:"fixed32,6,opt,name=topicality" json:"topicality,omitempty"` + // Image region to which this entity belongs. Currently not produced + // for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s + // are produced for the entire text detected in an image region, followed by + // `boundingPoly`s for each word within the detected text. + BoundingPoly *BoundingPoly `protobuf:"bytes,7,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // The location information for the detected entity. Multiple + // `LocationInfo` elements can be present because one location may + // indicate the location of the scene in the image, and another location + // may indicate the location of the place where the image was taken. + // Location information is usually present for landmarks. + Locations []*LocationInfo `protobuf:"bytes,8,rep,name=locations" json:"locations,omitempty"` + // Some entities may have optional user-supplied `Property` (name/value) + // fields, such a score or string that qualifies the entity. + Properties []*Property `protobuf:"bytes,9,rep,name=properties" json:"properties,omitempty"` +} + +func (m *EntityAnnotation) Reset() { *m = EntityAnnotation{} } +func (m *EntityAnnotation) String() string { return proto.CompactTextString(m) } +func (*EntityAnnotation) ProtoMessage() {} +func (*EntityAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *EntityAnnotation) GetMid() string { + if m != nil { + return m.Mid + } + return "" +} + +func (m *EntityAnnotation) GetLocale() string { + if m != nil { + return m.Locale + } + return "" +} + +func (m *EntityAnnotation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *EntityAnnotation) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *EntityAnnotation) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *EntityAnnotation) GetTopicality() float32 { + if m != nil { + return m.Topicality + } + return 0 +} + +func (m *EntityAnnotation) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *EntityAnnotation) GetLocations() []*LocationInfo { + if m != nil { + return m.Locations + } + return nil +} + +func (m *EntityAnnotation) GetProperties() []*Property { + if m != nil { + return m.Properties + } + return nil +} + +// Set of features pertaining to the image, computed by computer vision +// methods over safe-search verticals (for example, adult, spoof, medical, +// violence). +type SafeSearchAnnotation struct { + // Represents the adult content likelihood for the image. + Adult Likelihood `protobuf:"varint,1,opt,name=adult,enum=google.cloud.vision.v1.Likelihood" json:"adult,omitempty"` + // Spoof likelihood. The likelihood that an modification + // was made to the image's canonical version to make it appear + // funny or offensive. + Spoof Likelihood `protobuf:"varint,2,opt,name=spoof,enum=google.cloud.vision.v1.Likelihood" json:"spoof,omitempty"` + // Likelihood that this is a medical image. + Medical Likelihood `protobuf:"varint,3,opt,name=medical,enum=google.cloud.vision.v1.Likelihood" json:"medical,omitempty"` + // Violence likelihood. + Violence Likelihood `protobuf:"varint,4,opt,name=violence,enum=google.cloud.vision.v1.Likelihood" json:"violence,omitempty"` +} + +func (m *SafeSearchAnnotation) Reset() { *m = SafeSearchAnnotation{} } +func (m *SafeSearchAnnotation) String() string { return proto.CompactTextString(m) } +func (*SafeSearchAnnotation) ProtoMessage() {} +func (*SafeSearchAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *SafeSearchAnnotation) GetAdult() Likelihood { + if m != nil { + return m.Adult + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetSpoof() Likelihood { + if m != nil { + return m.Spoof + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetMedical() Likelihood { + if m != nil { + return m.Medical + } + return Likelihood_UNKNOWN +} + +func (m *SafeSearchAnnotation) GetViolence() Likelihood { + if m != nil { + return m.Violence + } + return Likelihood_UNKNOWN +} + +// Rectangle determined by min and max `LatLng` pairs. +type LatLongRect struct { + // Min lat/long pair. + MinLatLng *google_type1.LatLng `protobuf:"bytes,1,opt,name=min_lat_lng,json=minLatLng" json:"min_lat_lng,omitempty"` + // Max lat/long pair. + MaxLatLng *google_type1.LatLng `protobuf:"bytes,2,opt,name=max_lat_lng,json=maxLatLng" json:"max_lat_lng,omitempty"` +} + +func (m *LatLongRect) Reset() { *m = LatLongRect{} } +func (m *LatLongRect) String() string { return proto.CompactTextString(m) } +func (*LatLongRect) ProtoMessage() {} +func (*LatLongRect) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *LatLongRect) GetMinLatLng() *google_type1.LatLng { + if m != nil { + return m.MinLatLng + } + return nil +} + +func (m *LatLongRect) GetMaxLatLng() *google_type1.LatLng { + if m != nil { + return m.MaxLatLng + } + return nil +} + +// Color information consists of RGB channels, score, and the fraction of +// the image that the color occupies in the image. +type ColorInfo struct { + // RGB components of the color. + Color *google_type.Color `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` + // Image-specific score for this color. Value in range [0, 1]. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // The fraction of pixels the color occupies in the image. + // Value in range [0, 1]. + PixelFraction float32 `protobuf:"fixed32,3,opt,name=pixel_fraction,json=pixelFraction" json:"pixel_fraction,omitempty"` +} + +func (m *ColorInfo) Reset() { *m = ColorInfo{} } +func (m *ColorInfo) String() string { return proto.CompactTextString(m) } +func (*ColorInfo) ProtoMessage() {} +func (*ColorInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ColorInfo) GetColor() *google_type.Color { + if m != nil { + return m.Color + } + return nil +} + +func (m *ColorInfo) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *ColorInfo) GetPixelFraction() float32 { + if m != nil { + return m.PixelFraction + } + return 0 +} + +// Set of dominant colors and their corresponding scores. +type DominantColorsAnnotation struct { + // RGB color values with their score and pixel fraction. + Colors []*ColorInfo `protobuf:"bytes,1,rep,name=colors" json:"colors,omitempty"` +} + +func (m *DominantColorsAnnotation) Reset() { *m = DominantColorsAnnotation{} } +func (m *DominantColorsAnnotation) String() string { return proto.CompactTextString(m) } +func (*DominantColorsAnnotation) ProtoMessage() {} +func (*DominantColorsAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *DominantColorsAnnotation) GetColors() []*ColorInfo { + if m != nil { + return m.Colors + } + return nil +} + +// Stores image properties, such as dominant colors. +type ImageProperties struct { + // If present, dominant colors completed successfully. + DominantColors *DominantColorsAnnotation `protobuf:"bytes,1,opt,name=dominant_colors,json=dominantColors" json:"dominant_colors,omitempty"` +} + +func (m *ImageProperties) Reset() { *m = ImageProperties{} } +func (m *ImageProperties) String() string { return proto.CompactTextString(m) } +func (*ImageProperties) ProtoMessage() {} +func (*ImageProperties) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *ImageProperties) GetDominantColors() *DominantColorsAnnotation { + if m != nil { + return m.DominantColors + } + return nil +} + +// Single crop hint that is used to generate a new crop when serving an image. +type CropHint struct { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly *BoundingPoly `protobuf:"bytes,1,opt,name=bounding_poly,json=boundingPoly" json:"bounding_poly,omitempty"` + // Confidence of this being a salient region. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // Fraction of importance of this salient region with respect to the original + // image. + ImportanceFraction float32 `protobuf:"fixed32,3,opt,name=importance_fraction,json=importanceFraction" json:"importance_fraction,omitempty"` +} + +func (m *CropHint) Reset() { *m = CropHint{} } +func (m *CropHint) String() string { return proto.CompactTextString(m) } +func (*CropHint) ProtoMessage() {} +func (*CropHint) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *CropHint) GetBoundingPoly() *BoundingPoly { + if m != nil { + return m.BoundingPoly + } + return nil +} + +func (m *CropHint) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *CropHint) GetImportanceFraction() float32 { + if m != nil { + return m.ImportanceFraction + } + return 0 +} + +// Set of crop hints that are used to generate new crops when serving images. +type CropHintsAnnotation struct { + CropHints []*CropHint `protobuf:"bytes,1,rep,name=crop_hints,json=cropHints" json:"crop_hints,omitempty"` +} + +func (m *CropHintsAnnotation) Reset() { *m = CropHintsAnnotation{} } +func (m *CropHintsAnnotation) String() string { return proto.CompactTextString(m) } +func (*CropHintsAnnotation) ProtoMessage() {} +func (*CropHintsAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *CropHintsAnnotation) GetCropHints() []*CropHint { + if m != nil { + return m.CropHints + } + return nil +} + +// Parameters for crop hints annotation request. +type CropHintsParams struct { + // Aspect ratios in floats, representing the ratio of the width to the height + // of the image. For example, if the desired aspect ratio is 4/3, the + // corresponding float value should be 1.33333. If not specified, the + // best possible crop is returned. The number of provided aspect ratios is + // limited to a maximum of 16; any aspect ratios provided after the 16th are + // ignored. + AspectRatios []float32 `protobuf:"fixed32,1,rep,packed,name=aspect_ratios,json=aspectRatios" json:"aspect_ratios,omitempty"` +} + +func (m *CropHintsParams) Reset() { *m = CropHintsParams{} } +func (m *CropHintsParams) String() string { return proto.CompactTextString(m) } +func (*CropHintsParams) ProtoMessage() {} +func (*CropHintsParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *CropHintsParams) GetAspectRatios() []float32 { + if m != nil { + return m.AspectRatios + } + return nil +} + +// Image context and/or feature-specific parameters. +type ImageContext struct { + // lat/long rectangle that specifies the location of the image. + LatLongRect *LatLongRect `protobuf:"bytes,1,opt,name=lat_long_rect,json=latLongRect" json:"lat_long_rect,omitempty"` + // List of languages to use for TEXT_DETECTION. In most cases, an empty value + // yields the best results since it enables automatic language detection. For + // languages based on the Latin alphabet, setting `language_hints` is not + // needed. In rare cases, when the language of the text in the image is known, + // setting a hint will help get better results (although it will be a + // significant hindrance if the hint is wrong). Text detection returns an + // error if one or more of the specified languages is not one of the + // [supported languages](/vision/docs/languages). + LanguageHints []string `protobuf:"bytes,2,rep,name=language_hints,json=languageHints" json:"language_hints,omitempty"` + // Parameters for crop hints annotation request. + CropHintsParams *CropHintsParams `protobuf:"bytes,4,opt,name=crop_hints_params,json=cropHintsParams" json:"crop_hints_params,omitempty"` +} + +func (m *ImageContext) Reset() { *m = ImageContext{} } +func (m *ImageContext) String() string { return proto.CompactTextString(m) } +func (*ImageContext) ProtoMessage() {} +func (*ImageContext) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *ImageContext) GetLatLongRect() *LatLongRect { + if m != nil { + return m.LatLongRect + } + return nil +} + +func (m *ImageContext) GetLanguageHints() []string { + if m != nil { + return m.LanguageHints + } + return nil +} + +func (m *ImageContext) GetCropHintsParams() *CropHintsParams { + if m != nil { + return m.CropHintsParams + } + return nil +} + +// Request for performing Google Cloud Vision API tasks over a user-provided +// image, with user-requested features. +type AnnotateImageRequest struct { + // The image to be processed. + Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` + // Requested features. + Features []*Feature `protobuf:"bytes,2,rep,name=features" json:"features,omitempty"` + // Additional context that may accompany the image. + ImageContext *ImageContext `protobuf:"bytes,3,opt,name=image_context,json=imageContext" json:"image_context,omitempty"` +} + +func (m *AnnotateImageRequest) Reset() { *m = AnnotateImageRequest{} } +func (m *AnnotateImageRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateImageRequest) ProtoMessage() {} +func (*AnnotateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *AnnotateImageRequest) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +func (m *AnnotateImageRequest) GetFeatures() []*Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateImageRequest) GetImageContext() *ImageContext { + if m != nil { + return m.ImageContext + } + return nil +} + +// Response to an image annotation request. +type AnnotateImageResponse struct { + // If present, face detection has completed successfully. + FaceAnnotations []*FaceAnnotation `protobuf:"bytes,1,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` + // If present, landmark detection has completed successfully. + LandmarkAnnotations []*EntityAnnotation `protobuf:"bytes,2,rep,name=landmark_annotations,json=landmarkAnnotations" json:"landmark_annotations,omitempty"` + // If present, logo detection has completed successfully. + LogoAnnotations []*EntityAnnotation `protobuf:"bytes,3,rep,name=logo_annotations,json=logoAnnotations" json:"logo_annotations,omitempty"` + // If present, label detection has completed successfully. + LabelAnnotations []*EntityAnnotation `protobuf:"bytes,4,rep,name=label_annotations,json=labelAnnotations" json:"label_annotations,omitempty"` + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + TextAnnotations []*EntityAnnotation `protobuf:"bytes,5,rep,name=text_annotations,json=textAnnotations" json:"text_annotations,omitempty"` + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + // This annotation provides the structural hierarchy for the OCR detected + // text. + FullTextAnnotation *TextAnnotation `protobuf:"bytes,12,opt,name=full_text_annotation,json=fullTextAnnotation" json:"full_text_annotation,omitempty"` + // If present, safe-search annotation has completed successfully. + SafeSearchAnnotation *SafeSearchAnnotation `protobuf:"bytes,6,opt,name=safe_search_annotation,json=safeSearchAnnotation" json:"safe_search_annotation,omitempty"` + // If present, image properties were extracted successfully. + ImagePropertiesAnnotation *ImageProperties `protobuf:"bytes,8,opt,name=image_properties_annotation,json=imagePropertiesAnnotation" json:"image_properties_annotation,omitempty"` + // If present, crop hints have completed successfully. + CropHintsAnnotation *CropHintsAnnotation `protobuf:"bytes,11,opt,name=crop_hints_annotation,json=cropHintsAnnotation" json:"crop_hints_annotation,omitempty"` + // If present, web detection has completed successfully. + WebDetection *WebDetection `protobuf:"bytes,13,opt,name=web_detection,json=webDetection" json:"web_detection,omitempty"` + // If set, represents the error message for the operation. + // Note that filled-in image annotations are guaranteed to be + // correct, even when `error` is set. + Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` +} + +func (m *AnnotateImageResponse) Reset() { *m = AnnotateImageResponse{} } +func (m *AnnotateImageResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateImageResponse) ProtoMessage() {} +func (*AnnotateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *AnnotateImageResponse) GetFaceAnnotations() []*FaceAnnotation { + if m != nil { + return m.FaceAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLandmarkAnnotations() []*EntityAnnotation { + if m != nil { + return m.LandmarkAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLogoAnnotations() []*EntityAnnotation { + if m != nil { + return m.LogoAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetLabelAnnotations() []*EntityAnnotation { + if m != nil { + return m.LabelAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetTextAnnotations() []*EntityAnnotation { + if m != nil { + return m.TextAnnotations + } + return nil +} + +func (m *AnnotateImageResponse) GetFullTextAnnotation() *TextAnnotation { + if m != nil { + return m.FullTextAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetSafeSearchAnnotation() *SafeSearchAnnotation { + if m != nil { + return m.SafeSearchAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetImagePropertiesAnnotation() *ImageProperties { + if m != nil { + return m.ImagePropertiesAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetCropHintsAnnotation() *CropHintsAnnotation { + if m != nil { + return m.CropHintsAnnotation + } + return nil +} + +func (m *AnnotateImageResponse) GetWebDetection() *WebDetection { + if m != nil { + return m.WebDetection + } + return nil +} + +func (m *AnnotateImageResponse) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +// Multiple image annotation requests are batched into a single service call. +type BatchAnnotateImagesRequest struct { + // Individual image annotation requests for this batch. + Requests []*AnnotateImageRequest `protobuf:"bytes,1,rep,name=requests" json:"requests,omitempty"` +} + +func (m *BatchAnnotateImagesRequest) Reset() { *m = BatchAnnotateImagesRequest{} } +func (m *BatchAnnotateImagesRequest) String() string { return proto.CompactTextString(m) } +func (*BatchAnnotateImagesRequest) ProtoMessage() {} +func (*BatchAnnotateImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } + +func (m *BatchAnnotateImagesRequest) GetRequests() []*AnnotateImageRequest { + if m != nil { + return m.Requests + } + return nil +} + +// Response to a batch image annotation request. +type BatchAnnotateImagesResponse struct { + // Individual responses to image annotation requests within the batch. + Responses []*AnnotateImageResponse `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"` +} + +func (m *BatchAnnotateImagesResponse) Reset() { *m = BatchAnnotateImagesResponse{} } +func (m *BatchAnnotateImagesResponse) String() string { return proto.CompactTextString(m) } +func (*BatchAnnotateImagesResponse) ProtoMessage() {} +func (*BatchAnnotateImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } + +func (m *BatchAnnotateImagesResponse) GetResponses() []*AnnotateImageResponse { + if m != nil { + return m.Responses + } + return nil +} + +func init() { + proto.RegisterType((*Feature)(nil), "google.cloud.vision.v1.Feature") + proto.RegisterType((*ImageSource)(nil), "google.cloud.vision.v1.ImageSource") + proto.RegisterType((*Image)(nil), "google.cloud.vision.v1.Image") + proto.RegisterType((*FaceAnnotation)(nil), "google.cloud.vision.v1.FaceAnnotation") + proto.RegisterType((*FaceAnnotation_Landmark)(nil), "google.cloud.vision.v1.FaceAnnotation.Landmark") + proto.RegisterType((*LocationInfo)(nil), "google.cloud.vision.v1.LocationInfo") + proto.RegisterType((*Property)(nil), "google.cloud.vision.v1.Property") + proto.RegisterType((*EntityAnnotation)(nil), "google.cloud.vision.v1.EntityAnnotation") + proto.RegisterType((*SafeSearchAnnotation)(nil), "google.cloud.vision.v1.SafeSearchAnnotation") + proto.RegisterType((*LatLongRect)(nil), "google.cloud.vision.v1.LatLongRect") + proto.RegisterType((*ColorInfo)(nil), "google.cloud.vision.v1.ColorInfo") + proto.RegisterType((*DominantColorsAnnotation)(nil), "google.cloud.vision.v1.DominantColorsAnnotation") + proto.RegisterType((*ImageProperties)(nil), "google.cloud.vision.v1.ImageProperties") + proto.RegisterType((*CropHint)(nil), "google.cloud.vision.v1.CropHint") + proto.RegisterType((*CropHintsAnnotation)(nil), "google.cloud.vision.v1.CropHintsAnnotation") + proto.RegisterType((*CropHintsParams)(nil), "google.cloud.vision.v1.CropHintsParams") + proto.RegisterType((*ImageContext)(nil), "google.cloud.vision.v1.ImageContext") + proto.RegisterType((*AnnotateImageRequest)(nil), "google.cloud.vision.v1.AnnotateImageRequest") + proto.RegisterType((*AnnotateImageResponse)(nil), "google.cloud.vision.v1.AnnotateImageResponse") + proto.RegisterType((*BatchAnnotateImagesRequest)(nil), "google.cloud.vision.v1.BatchAnnotateImagesRequest") + proto.RegisterType((*BatchAnnotateImagesResponse)(nil), "google.cloud.vision.v1.BatchAnnotateImagesResponse") + proto.RegisterEnum("google.cloud.vision.v1.Likelihood", Likelihood_name, Likelihood_value) + proto.RegisterEnum("google.cloud.vision.v1.Feature_Type", Feature_Type_name, Feature_Type_value) + proto.RegisterEnum("google.cloud.vision.v1.FaceAnnotation_Landmark_Type", FaceAnnotation_Landmark_Type_name, FaceAnnotation_Landmark_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ImageAnnotator service + +type ImageAnnotatorClient interface { + // Run image detection and annotation for a batch of images. + BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error) +} + +type imageAnnotatorClient struct { + cc *grpc.ClientConn +} + +func NewImageAnnotatorClient(cc *grpc.ClientConn) ImageAnnotatorClient { + return &imageAnnotatorClient{cc} +} + +func (c *imageAnnotatorClient) BatchAnnotateImages(ctx context.Context, in *BatchAnnotateImagesRequest, opts ...grpc.CallOption) (*BatchAnnotateImagesResponse, error) { + out := new(BatchAnnotateImagesResponse) + err := grpc.Invoke(ctx, "/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateImages", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ImageAnnotator service + +type ImageAnnotatorServer interface { + // Run image detection and annotation for a batch of images. + BatchAnnotateImages(context.Context, *BatchAnnotateImagesRequest) (*BatchAnnotateImagesResponse, error) +} + +func RegisterImageAnnotatorServer(s *grpc.Server, srv ImageAnnotatorServer) { + s.RegisterService(&_ImageAnnotator_serviceDesc, srv) +} + +func _ImageAnnotator_BatchAnnotateImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchAnnotateImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageAnnotatorServer).BatchAnnotateImages(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateImages", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageAnnotatorServer).BatchAnnotateImages(ctx, req.(*BatchAnnotateImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ImageAnnotator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.vision.v1.ImageAnnotator", + HandlerType: (*ImageAnnotatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BatchAnnotateImages", + Handler: _ImageAnnotator_BatchAnnotateImages_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/vision/v1/image_annotator.proto", +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/image_annotator.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 2281 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0x4b, 0x73, 0x23, 0x49, + 0xf1, 0x5f, 0xc9, 0x2f, 0x29, 0xf5, 0x6a, 0x97, 0x1f, 0xa3, 0xb1, 0xe7, 0xe1, 0xed, 0xfd, 0xcf, + 0x1f, 0xc7, 0x30, 0xd8, 0x8c, 0x67, 0x21, 0x96, 0x9d, 0x09, 0x40, 0x92, 0xdb, 0xb6, 0x62, 0x64, + 0x49, 0x5b, 0x92, 0xd7, 0x6b, 0x20, 0xe8, 0x68, 0xb7, 0x4a, 0x9a, 0x9e, 0x6d, 0x75, 0x35, 0xdd, + 0xad, 0x19, 0xfb, 0x4a, 0x04, 0x11, 0xdc, 0xb9, 0x73, 0xe4, 0x4e, 0x04, 0x5f, 0x81, 0x03, 0x47, + 0x62, 0xcf, 0xdc, 0xf8, 0x0c, 0x04, 0x47, 0xa2, 0x1e, 0xdd, 0x2a, 0x69, 0x2c, 0x8f, 0x4c, 0x70, + 0x52, 0x57, 0x66, 0xfe, 0x7e, 0x59, 0x95, 0x55, 0x59, 0x59, 0x55, 0x82, 0x67, 0x03, 0x4a, 0x07, + 0x2e, 0xd9, 0xb7, 0x5d, 0x3a, 0xea, 0xed, 0xbf, 0x73, 0x42, 0x87, 0x7a, 0xfb, 0xef, 0x9e, 0xef, + 0x3b, 0x43, 0x6b, 0x40, 0x4c, 0xcb, 0xf3, 0x68, 0x64, 0x45, 0x34, 0xd8, 0xf3, 0x03, 0x1a, 0x51, + 0xb4, 0x29, 0xac, 0xf7, 0xb8, 0xf5, 0x9e, 0xb0, 0xde, 0x7b, 0xf7, 0x7c, 0xeb, 0x81, 0x64, 0xb1, + 0x7c, 0x67, 0x5f, 0x62, 0x1c, 0xea, 0x85, 0x02, 0xb5, 0xf5, 0x64, 0x86, 0x8f, 0x01, 0xa1, 0x43, + 0x12, 0x05, 0xd7, 0xd2, 0x6c, 0x56, 0x57, 0x22, 0x72, 0x15, 0x99, 0x63, 0x56, 0x69, 0xfd, 0x74, + 0x86, 0xf5, 0x7b, 0x72, 0x69, 0xf6, 0x48, 0x44, 0x6c, 0xc5, 0xf6, 0x9e, 0xb4, 0x0d, 0x7c, 0x7b, + 0x3f, 0x8c, 0xac, 0x68, 0x14, 0x4e, 0x29, 0xa2, 0x6b, 0x9f, 0xec, 0xdb, 0xd4, 0x8d, 0x07, 0xba, + 0x55, 0x56, 0x15, 0xae, 0x15, 0xb9, 0xde, 0x40, 0x68, 0xf4, 0x7f, 0xa4, 0x61, 0xe5, 0x88, 0x58, + 0xd1, 0x28, 0x20, 0xe8, 0x0b, 0x58, 0x64, 0x06, 0xe5, 0xd4, 0x4e, 0x6a, 0xb7, 0x78, 0xf0, 0x7f, + 0x7b, 0x37, 0x47, 0x67, 0x4f, 0x9a, 0xef, 0x75, 0xaf, 0x7d, 0x82, 0x39, 0x02, 0x3d, 0x86, 0xdc, + 0xd0, 0xba, 0x32, 0x03, 0x12, 0x8e, 0xdc, 0x28, 0x2c, 0xa7, 0x77, 0x52, 0xbb, 0x4b, 0x18, 0x86, + 0xd6, 0x15, 0x16, 0x12, 0xfd, 0x5f, 0x29, 0x58, 0x64, 0xf6, 0x68, 0x1d, 0xb4, 0xee, 0x45, 0xdb, + 0x30, 0xcf, 0x9a, 0x9d, 0xb6, 0x51, 0xab, 0x1f, 0xd5, 0x8d, 0x43, 0xed, 0x13, 0x84, 0xa0, 0x78, + 0x54, 0xa9, 0x19, 0xe6, 0xa1, 0xd1, 0x35, 0x6a, 0xdd, 0x7a, 0xab, 0xa9, 0xa5, 0xd0, 0x26, 0xa0, + 0x46, 0xa5, 0x79, 0x78, 0x5a, 0xc1, 0xaf, 0x15, 0x79, 0x9a, 0xd9, 0x36, 0x5a, 0xc7, 0x2d, 0x45, + 0xb6, 0x80, 0xd6, 0xa0, 0xd4, 0xa8, 0x54, 0x8d, 0x86, 0x22, 0x5c, 0x64, 0x86, 0x5d, 0xe3, 0x9b, + 0xae, 0x22, 0x5b, 0x42, 0xdb, 0x70, 0xef, 0xb0, 0x55, 0x3b, 0x3b, 0x35, 0x9a, 0x5d, 0x73, 0x4a, + 0x99, 0x43, 0xf7, 0x61, 0xa3, 0x53, 0x39, 0x32, 0xcc, 0x8e, 0x51, 0xc1, 0xb5, 0x13, 0x45, 0xb5, + 0xcc, 0xba, 0x5d, 0x3f, 0xad, 0x1c, 0x1b, 0x66, 0x1b, 0xb7, 0xda, 0x06, 0xee, 0xd6, 0x8d, 0x8e, + 0xb6, 0x82, 0x8a, 0x00, 0x35, 0xdc, 0x6a, 0x9b, 0x27, 0xf5, 0x66, 0xb7, 0xa3, 0x65, 0xd1, 0x2a, + 0x14, 0xce, 0x8d, 0xaa, 0x02, 0x04, 0xbd, 0x09, 0xb9, 0x3a, 0x5b, 0x7b, 0x1d, 0x3a, 0x0a, 0x6c, + 0x82, 0x74, 0x28, 0x0c, 0xec, 0xd0, 0x14, 0xcb, 0x71, 0x14, 0x38, 0x3c, 0xd6, 0x59, 0x9c, 0x1b, + 0xd8, 0x21, 0x37, 0x3b, 0x0b, 0x1c, 0xb4, 0x0d, 0xd9, 0xb1, 0x3e, 0xcd, 0xf5, 0x19, 0x47, 0x2a, + 0xf5, 0x5f, 0xc3, 0x12, 0x37, 0x44, 0x65, 0x58, 0xb1, 0xa9, 0x17, 0x11, 0x2f, 0xe2, 0x1c, 0x79, + 0x1c, 0x37, 0xd1, 0x4b, 0x58, 0x0e, 0xb9, 0x37, 0x0e, 0xce, 0x1d, 0x7c, 0x36, 0x6b, 0x22, 0x95, + 0x8e, 0x61, 0x09, 0xd1, 0xff, 0x5e, 0x82, 0xe2, 0x91, 0x65, 0x93, 0x4a, 0xb2, 0x40, 0x51, 0x1d, + 0x0a, 0x97, 0x74, 0xe4, 0xf5, 0x1c, 0x6f, 0x60, 0xfa, 0xd4, 0xbd, 0xe6, 0xfe, 0x72, 0xb3, 0xd7, + 0x47, 0x55, 0x1a, 0xb7, 0xa9, 0x7b, 0x8d, 0xf3, 0x97, 0x4a, 0x0b, 0x35, 0x41, 0xeb, 0xf7, 0xcc, + 0x49, 0xb6, 0xf4, 0x1d, 0xd8, 0x8a, 0xfd, 0x9e, 0xda, 0x46, 0xa7, 0x90, 0x75, 0x2d, 0xaf, 0x37, + 0xb4, 0x82, 0x6f, 0xc3, 0xf2, 0xc2, 0xce, 0xc2, 0x6e, 0xee, 0x60, 0x7f, 0xe6, 0xb2, 0x9d, 0x18, + 0xd5, 0x5e, 0x43, 0xe2, 0xf0, 0x98, 0x01, 0x3d, 0x04, 0x08, 0xa8, 0xeb, 0x9a, 0x96, 0x37, 0x70, + 0x49, 0x79, 0x71, 0x27, 0xb5, 0x9b, 0xc6, 0x59, 0x26, 0xa9, 0x30, 0x01, 0x9b, 0x18, 0xdf, 0xf2, + 0xa4, 0x76, 0x89, 0x6b, 0x33, 0xbe, 0xe5, 0x09, 0xe5, 0x43, 0x80, 0xc8, 0x71, 0x23, 0xa9, 0x5d, + 0x16, 0x58, 0x26, 0x11, 0xea, 0xe7, 0xb0, 0x9e, 0xa4, 0xb1, 0x69, 0x53, 0xaf, 0xef, 0xf4, 0x88, + 0x67, 0x93, 0xf2, 0x0a, 0x37, 0x5c, 0x4b, 0x74, 0xb5, 0x44, 0x85, 0x7e, 0x04, 0x9b, 0x71, 0xd7, + 0x58, 0xb0, 0x14, 0x50, 0x86, 0x83, 0x36, 0x14, 0xad, 0x02, 0xab, 0x43, 0xf1, 0x2d, 0xbd, 0x36, + 0x5d, 0xe7, 0x5b, 0xe2, 0x3a, 0x6f, 0x28, 0xed, 0x95, 0xb3, 0x3c, 0x9f, 0xf5, 0x59, 0x81, 0x69, + 0x24, 0x96, 0xb8, 0xf0, 0x96, 0x5e, 0x8f, 0x9b, 0xa8, 0x05, 0xab, 0x21, 0x0d, 0x02, 0xfa, 0x5e, + 0x65, 0x83, 0xb9, 0xd9, 0x34, 0x01, 0x56, 0x08, 0x4f, 0x41, 0xb3, 0xbc, 0x01, 0x09, 0x54, 0xbe, + 0xdc, 0xdc, 0x7c, 0x25, 0x8e, 0x55, 0xe8, 0x3a, 0xb0, 0x16, 0x8e, 0x02, 0x3f, 0x70, 0x42, 0xa2, + 0x32, 0xe6, 0xe7, 0x66, 0x44, 0x31, 0x5c, 0x21, 0xfd, 0x15, 0x94, 0x47, 0x5e, 0x8f, 0x04, 0x26, + 0xb9, 0xf2, 0x69, 0x48, 0x7a, 0x2a, 0x73, 0x61, 0x6e, 0xe6, 0x4d, 0xce, 0x61, 0x08, 0x0a, 0x85, + 0xfd, 0x2b, 0x40, 0x97, 0xee, 0x28, 0x08, 0x26, 0x79, 0x8b, 0x73, 0xf3, 0xae, 0x4a, 0xf4, 0x64, + 0x14, 0xde, 0x10, 0xab, 0xf7, 0x9e, 0x58, 0x13, 0x71, 0x2d, 0xcd, 0x1f, 0x85, 0x18, 0x3e, 0x96, + 0x6d, 0xfd, 0x6d, 0x05, 0x32, 0x71, 0x8a, 0xa0, 0x13, 0x59, 0x18, 0x16, 0x38, 0xe5, 0xe7, 0x77, + 0xcc, 0x30, 0xb5, 0x50, 0xbc, 0x82, 0x8c, 0x4f, 0x43, 0x87, 0xe9, 0x79, 0x7e, 0xe5, 0x0e, 0x76, + 0x66, 0xb1, 0xb5, 0xa5, 0x1d, 0x4e, 0x10, 0xfa, 0x5f, 0x96, 0xc7, 0x55, 0xe4, 0xac, 0xf9, 0xba, + 0xd9, 0x3a, 0x6f, 0x9a, 0x71, 0x8d, 0xd0, 0x3e, 0x41, 0x79, 0xc8, 0x34, 0x8c, 0xa3, 0xae, 0x69, + 0x5c, 0x18, 0x5a, 0x0a, 0x15, 0x20, 0x8b, 0xeb, 0xc7, 0x27, 0xa2, 0x99, 0x46, 0x65, 0x58, 0xe7, + 0xca, 0xd6, 0x91, 0x19, 0x1b, 0x55, 0x71, 0xeb, 0x5c, 0x5b, 0x60, 0xdb, 0xbe, 0x30, 0x9c, 0x56, + 0x2d, 0x32, 0x55, 0x0c, 0x4a, 0xb8, 0xb8, 0x6a, 0x09, 0x6d, 0xc1, 0x66, 0x82, 0x9a, 0xd4, 0x2d, + 0x33, 0xd8, 0x69, 0xfd, 0xb0, 0xdd, 0xaa, 0x37, 0xbb, 0x66, 0xd5, 0xe8, 0x9e, 0x1b, 0x46, 0x93, + 0x69, 0x59, 0xc9, 0xc8, 0x43, 0xa6, 0xd9, 0xea, 0x18, 0x66, 0xb7, 0xde, 0xd6, 0x32, 0xac, 0x8f, + 0x67, 0xed, 0xb6, 0x81, 0xcd, 0x46, 0xbd, 0xad, 0x65, 0x59, 0xb3, 0xd1, 0x3a, 0x97, 0x4d, 0x60, + 0xe5, 0xe5, 0xb4, 0x75, 0xd6, 0x3d, 0xe1, 0xbd, 0xd2, 0x72, 0xa8, 0x04, 0x39, 0xd1, 0xe6, 0xfe, + 0xb4, 0x3c, 0xd2, 0x20, 0x2f, 0x04, 0x35, 0xa3, 0xd9, 0x35, 0xb0, 0x56, 0x40, 0x1b, 0xb0, 0xca, + 0xe9, 0xab, 0xad, 0x6e, 0xb7, 0x75, 0x2a, 0x0d, 0x8b, 0x2c, 0x5e, 0xaa, 0x98, 0xf3, 0x95, 0x58, + 0x85, 0x55, 0xa5, 0x92, 0x44, 0x4b, 0x46, 0x6d, 0x5c, 0x18, 0x66, 0xb7, 0xd5, 0x36, 0xab, 0xad, + 0xb3, 0xe6, 0x61, 0x05, 0x5f, 0x68, 0xab, 0x13, 0x2a, 0x31, 0xea, 0x5a, 0x0b, 0x37, 0x0d, 0xac, + 0x21, 0xf4, 0x00, 0xca, 0x89, 0x4a, 0x32, 0x26, 0xc0, 0xb5, 0x24, 0xfc, 0x4c, 0xcb, 0x3f, 0x24, + 0x6e, 0x7d, 0x1c, 0xc8, 0x0f, 0xdc, 0x6d, 0x4c, 0xea, 0x26, 0xfc, 0x6d, 0xa2, 0x87, 0x70, 0x7f, + 0xac, 0x9b, 0x76, 0x78, 0x6f, 0x3c, 0xab, 0xd3, 0x1e, 0xcb, 0xe8, 0x31, 0x6c, 0xab, 0xf3, 0x6c, + 0x8a, 0x29, 0x88, 0x67, 0x4c, 0xbb, 0x8f, 0x76, 0xe0, 0xc1, 0xc4, 0x94, 0x4e, 0x5b, 0x6c, 0xb1, + 0x80, 0x0a, 0x8a, 0x0a, 0x36, 0xbb, 0xb8, 0x72, 0xcc, 0x8a, 0xfd, 0x36, 0x8b, 0xbe, 0xc4, 0x29, + 0xe2, 0x07, 0xfc, 0xc4, 0x12, 0x8f, 0xbd, 0x7d, 0xd6, 0xae, 0x37, 0xb4, 0x87, 0xec, 0xc4, 0x32, + 0xee, 0x9e, 0x10, 0x3e, 0x62, 0xf8, 0xa3, 0x16, 0x36, 0x4e, 0x8c, 0xca, 0xa1, 0x79, 0xcc, 0x0f, + 0x34, 0x8d, 0x8a, 0xf6, 0x98, 0x1d, 0x2b, 0x6a, 0x27, 0xf5, 0xa6, 0x79, 0xdc, 0xac, 0x74, 0x4f, + 0x18, 0xe5, 0x0e, 0xf3, 0xcf, 0x45, 0x9c, 0xf7, 0xb8, 0xd5, 0x64, 0xd2, 0x4f, 0x19, 0x9e, 0x4b, + 0x05, 0xb3, 0x14, 0xeb, 0xfa, 0x2b, 0xc8, 0x37, 0xa8, 0xcd, 0x93, 0xb2, 0xee, 0xf5, 0x29, 0x7a, + 0x06, 0x2b, 0xae, 0x15, 0x99, 0xae, 0x37, 0x90, 0xa5, 0x7c, 0x2d, 0xce, 0x41, 0x96, 0xa3, 0x7b, + 0x0d, 0x2b, 0x6a, 0x78, 0x03, 0xbc, 0xec, 0xf2, 0x5f, 0xfd, 0x73, 0xc8, 0xb4, 0x03, 0xea, 0x93, + 0x20, 0xba, 0x46, 0x08, 0x16, 0x3d, 0x6b, 0x48, 0xe4, 0xa9, 0x85, 0x7f, 0xa3, 0x75, 0x58, 0x7a, + 0x67, 0xb9, 0x23, 0x22, 0x8f, 0x2a, 0xa2, 0xa1, 0xff, 0x6e, 0x01, 0x34, 0xc3, 0x8b, 0x9c, 0xe8, + 0x5a, 0x39, 0x49, 0x68, 0xb0, 0x30, 0x74, 0x7a, 0x12, 0xcd, 0x3e, 0xd1, 0x26, 0x2c, 0xbb, 0xd4, + 0xb6, 0xdc, 0x18, 0x2d, 0x5b, 0x68, 0x07, 0x72, 0x3d, 0x12, 0xda, 0x81, 0xe3, 0xf3, 0xad, 0x62, + 0x41, 0x9c, 0x92, 0x14, 0x11, 0x73, 0x1b, 0xda, 0x34, 0x88, 0xcb, 0xb4, 0x68, 0xa0, 0x47, 0x00, + 0x4a, 0x9d, 0x14, 0x35, 0x5a, 0x91, 0x30, 0x7d, 0x44, 0x7d, 0xc7, 0xb6, 0x5c, 0x27, 0xba, 0x96, + 0x55, 0x5a, 0x91, 0x7c, 0x78, 0xd6, 0x59, 0xf9, 0xaf, 0xcf, 0x3a, 0x55, 0xc8, 0xba, 0x32, 0xea, + 0x61, 0x39, 0xc3, 0xcf, 0x26, 0x33, 0x69, 0xd4, 0xe9, 0xc1, 0x63, 0x18, 0xfa, 0x39, 0x80, 0x2f, + 0x62, 0xef, 0x90, 0xb0, 0x9c, 0xe5, 0x24, 0xb3, 0x37, 0x4c, 0x39, 0x4b, 0x58, 0xc1, 0xe8, 0xbf, + 0x4f, 0xc3, 0x7a, 0xc7, 0xea, 0x93, 0x0e, 0xb1, 0x02, 0xfb, 0x8d, 0x32, 0x17, 0x5f, 0xc0, 0x92, + 0xd5, 0x1b, 0xb9, 0x91, 0x3c, 0xed, 0xcf, 0x53, 0x27, 0x04, 0x80, 0x21, 0x43, 0x9f, 0xd2, 0x3e, + 0x9f, 0xb2, 0x39, 0x91, 0x1c, 0x80, 0x5e, 0xc1, 0xca, 0x90, 0xf4, 0x58, 0xac, 0x65, 0x29, 0x99, + 0x07, 0x1b, 0x43, 0xd0, 0x4f, 0x21, 0xf3, 0xce, 0xa1, 0x2e, 0x9f, 0xd9, 0xc5, 0xb9, 0xe1, 0x09, + 0x46, 0x7f, 0x0f, 0x39, 0xb6, 0xb4, 0xa9, 0x37, 0xc0, 0xc4, 0x8e, 0xd0, 0x0b, 0xc8, 0x0d, 0x1d, + 0xcf, 0x9c, 0x23, 0x13, 0xb2, 0x43, 0xc7, 0x13, 0x9f, 0x1c, 0x64, 0x5d, 0x25, 0xa0, 0xf4, 0x6d, + 0x20, 0xeb, 0x4a, 0x7c, 0xea, 0x01, 0x64, 0x6b, 0xec, 0x32, 0xc6, 0x93, 0x6f, 0x17, 0x96, 0xf8, + 0xcd, 0x4c, 0x3a, 0x44, 0x13, 0x58, 0x6e, 0x86, 0x85, 0xc1, 0x78, 0x85, 0xa7, 0xd5, 0x15, 0xfe, + 0x04, 0x8a, 0xbe, 0x73, 0x45, 0x5c, 0xb3, 0x1f, 0x58, 0x76, 0x92, 0x1c, 0x69, 0x5c, 0xe0, 0xd2, + 0x23, 0x29, 0xd4, 0xcf, 0xa0, 0x7c, 0x48, 0x87, 0x8e, 0x67, 0x79, 0x11, 0x27, 0x0d, 0x95, 0xa9, + 0xff, 0x09, 0x2c, 0x73, 0x0f, 0x61, 0x39, 0xc5, 0x57, 0xd4, 0xa7, 0xb3, 0xc2, 0x98, 0xf4, 0x1a, + 0x4b, 0x80, 0xee, 0x42, 0x89, 0xdf, 0x1a, 0xda, 0xc9, 0x0a, 0x43, 0x17, 0x50, 0xea, 0x49, 0x4f, + 0x66, 0x42, 0xcb, 0x86, 0xf6, 0xc3, 0x59, 0xb4, 0xb3, 0x3a, 0x86, 0x8b, 0xbd, 0x09, 0x8d, 0xfe, + 0xa7, 0x14, 0x64, 0x6a, 0x01, 0xf5, 0x4f, 0x1c, 0x2f, 0xfa, 0x5f, 0x5e, 0x43, 0x26, 0x77, 0x89, + 0xf4, 0x07, 0xbb, 0xc4, 0x3e, 0xac, 0x39, 0x43, 0x9f, 0x06, 0x91, 0xe5, 0xd9, 0x64, 0x3a, 0xd0, + 0x68, 0xac, 0x4a, 0xa2, 0xfd, 0x35, 0xac, 0xc5, 0xfd, 0x54, 0x03, 0xfd, 0x33, 0x00, 0x3b, 0xa0, + 0xbe, 0xf9, 0x86, 0xc9, 0x65, 0xb0, 0x67, 0xa6, 0x6f, 0x4c, 0x80, 0xb3, 0x76, 0x4c, 0xa5, 0xff, + 0x18, 0x4a, 0x09, 0x6f, 0xdb, 0x0a, 0xac, 0x61, 0x88, 0x3e, 0x83, 0x82, 0x15, 0xfa, 0xc4, 0x8e, + 0xcc, 0x80, 0x39, 0x11, 0xb4, 0x69, 0x9c, 0x17, 0x42, 0xcc, 0x65, 0xfa, 0x77, 0x29, 0xc8, 0xf3, + 0x79, 0xaa, 0xb1, 0x3b, 0xe1, 0x55, 0x84, 0x8e, 0xa1, 0xc0, 0xd7, 0x2c, 0xf5, 0x06, 0x66, 0x40, + 0xec, 0x48, 0x06, 0x6f, 0xe6, 0xd5, 0x50, 0x49, 0x14, 0x9c, 0x73, 0x95, 0xac, 0x79, 0x02, 0x45, + 0xd7, 0xf2, 0x06, 0x23, 0x76, 0x3f, 0x15, 0xc3, 0x4a, 0xef, 0x2c, 0xec, 0x66, 0x71, 0x21, 0x96, + 0xf2, 0xbe, 0xa2, 0x0e, 0xac, 0x8e, 0x47, 0x6e, 0xfa, 0xbc, 0xeb, 0xf2, 0xc0, 0xf7, 0xbd, 0x8f, + 0x05, 0x40, 0x8e, 0x14, 0x97, 0xec, 0x49, 0x01, 0x1b, 0xd5, 0xba, 0x8c, 0x2e, 0xe1, 0xa3, 0xc3, + 0xe4, 0x37, 0x23, 0x12, 0xb2, 0x54, 0x5e, 0xe2, 0x17, 0x64, 0x39, 0xaa, 0x87, 0xb7, 0x5e, 0x78, + 0xb1, 0xb0, 0x45, 0x2f, 0x21, 0xd3, 0x17, 0x2f, 0x19, 0x62, 0x0c, 0xb9, 0x83, 0xc7, 0x1f, 0x79, + 0xf1, 0xc0, 0x09, 0x80, 0x2d, 0x46, 0x71, 0x47, 0xb7, 0x45, 0x80, 0xf9, 0xda, 0xb8, 0x65, 0x31, + 0xaa, 0x93, 0x81, 0xf3, 0x8e, 0xd2, 0xd2, 0xff, 0xba, 0x02, 0x1b, 0x53, 0xa3, 0x0a, 0x7d, 0xea, + 0x85, 0x04, 0x7d, 0x05, 0x5a, 0xdf, 0xb2, 0x89, 0xf2, 0x58, 0x14, 0x2f, 0xa2, 0xff, 0x9f, 0xef, + 0x08, 0x8e, 0x4b, 0xfd, 0x89, 0x76, 0x88, 0x7e, 0x09, 0xeb, 0xf1, 0xad, 0x71, 0x82, 0x56, 0x04, + 0x60, 0x77, 0x16, 0xed, 0x74, 0x25, 0xc7, 0x6b, 0x31, 0x8b, 0x4a, 0xde, 0x01, 0xcd, 0xa5, 0x03, + 0x3a, 0x41, 0xbc, 0x70, 0x47, 0xe2, 0x12, 0x63, 0x50, 0x49, 0xcf, 0x60, 0xd5, 0xb5, 0x2e, 0x89, + 0x3b, 0xc1, 0xba, 0x78, 0x47, 0x56, 0x8d, 0x53, 0x4c, 0xf5, 0x75, 0xea, 0x21, 0x2e, 0x2c, 0x2f, + 0xdd, 0xb5, 0xaf, 0x8c, 0x41, 0x25, 0xfd, 0x06, 0xd6, 0xfb, 0x23, 0xd7, 0x35, 0xa7, 0x98, 0xf9, + 0x85, 0xf4, 0x96, 0x49, 0xeb, 0x4e, 0xd0, 0x60, 0xc4, 0x38, 0x26, 0x65, 0xe8, 0x12, 0x36, 0x43, + 0xab, 0x4f, 0xcc, 0x90, 0x97, 0x71, 0x95, 0x7b, 0x99, 0x73, 0x3f, 0x9b, 0xc5, 0x7d, 0x53, 0xed, + 0xc7, 0xeb, 0xe1, 0x4d, 0x27, 0x82, 0x01, 0x6c, 0x8b, 0x35, 0x3d, 0x3e, 0x3e, 0xa8, 0x8e, 0x32, + 0xb7, 0x67, 0xef, 0x54, 0x59, 0xc0, 0xf7, 0x9d, 0x49, 0x81, 0xe2, 0xc8, 0x84, 0x0d, 0x65, 0x73, + 0x50, 0x5c, 0xe4, 0xb8, 0x8b, 0xef, 0x7f, 0x74, 0x83, 0x50, 0x17, 0xa2, 0x7d, 0xc3, 0xbe, 0x5b, + 0x87, 0xc2, 0xc4, 0xbb, 0x29, 0xbf, 0xb7, 0xdf, 0x92, 0x9d, 0xe7, 0xe4, 0xf2, 0x30, 0xb6, 0xc5, + 0xf9, 0xf7, 0x4a, 0x8b, 0x95, 0x6b, 0x12, 0x04, 0x34, 0xe0, 0x8f, 0x28, 0x4a, 0xb9, 0x0e, 0x7c, + 0x7b, 0xaf, 0xc3, 0xdf, 0x5e, 0xb1, 0x30, 0xd0, 0xfb, 0xb0, 0x55, 0xb5, 0xa2, 0x24, 0xa2, 0x22, + 0x97, 0xc3, 0x78, 0x8b, 0x3a, 0x81, 0x4c, 0x20, 0x3e, 0xe3, 0x1c, 0x9e, 0x39, 0x65, 0x37, 0x6d, + 0x71, 0x38, 0x41, 0xeb, 0x6f, 0x61, 0xfb, 0x46, 0x3f, 0x72, 0xd3, 0x78, 0x0d, 0xd9, 0x40, 0x7e, + 0xc7, 0x9e, 0x7e, 0x30, 0xa7, 0x27, 0x81, 0xc2, 0x63, 0xfc, 0x53, 0x02, 0xa0, 0x3c, 0x34, 0xe4, + 0x60, 0x45, 0xde, 0xba, 0xb5, 0x4f, 0xd8, 0xa5, 0xe4, 0x6b, 0x03, 0x5f, 0x98, 0x67, 0xcd, 0x46, + 0xfd, 0xb5, 0xd1, 0xb8, 0xd0, 0x52, 0xec, 0x6e, 0x9b, 0xb4, 0xd2, 0xac, 0xd5, 0x6e, 0x75, 0x3a, + 0xf5, 0x6a, 0xc3, 0xd0, 0x16, 0x10, 0xc0, 0xb2, 0xd4, 0x2c, 0xb2, 0x7b, 0x2c, 0x87, 0x4a, 0xc1, + 0xd2, 0xc1, 0x9f, 0x53, 0x50, 0xe4, 0x7d, 0xa8, 0xc4, 0x0f, 0xf4, 0xe8, 0x8f, 0x29, 0x58, 0xbb, + 0x61, 0x98, 0xe8, 0x60, 0x66, 0xb9, 0x9f, 0x19, 0xfb, 0xad, 0x17, 0x77, 0xc2, 0x88, 0xb1, 0xeb, + 0x8f, 0x7e, 0xfb, 0xdd, 0x3f, 0xff, 0x90, 0x2e, 0xeb, 0x6b, 0xc9, 0xdf, 0x07, 0xe1, 0x97, 0x72, + 0xa9, 0x92, 0x2f, 0x53, 0x4f, 0xab, 0x11, 0x6c, 0xd9, 0x74, 0x38, 0x83, 0xb9, 0xba, 0x36, 0x39, + 0x9c, 0x76, 0x40, 0x23, 0xda, 0x4e, 0xfd, 0xe2, 0x95, 0x34, 0x1f, 0x50, 0x56, 0x2e, 0xf7, 0x68, + 0x30, 0xd8, 0x1f, 0x10, 0x8f, 0xbf, 0xc4, 0xef, 0x0b, 0x95, 0xe5, 0x3b, 0xe1, 0xf4, 0x9f, 0x00, + 0x2f, 0xc5, 0xd7, 0xbf, 0x53, 0xa9, 0xcb, 0x65, 0x6e, 0xfb, 0xe2, 0x3f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xe5, 0x59, 0xbe, 0xb0, 0xe8, 0x18, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go new file mode 100644 index 000000000..566d59670 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/text_annotation.pb.go @@ -0,0 +1,537 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1/text_annotation.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Enum to denote the type of break found. New line, space etc. +type TextAnnotation_DetectedBreak_BreakType int32 + +const ( + // Unknown break label type. + TextAnnotation_DetectedBreak_UNKNOWN TextAnnotation_DetectedBreak_BreakType = 0 + // Regular space. + TextAnnotation_DetectedBreak_SPACE TextAnnotation_DetectedBreak_BreakType = 1 + // Sure space (very wide). + TextAnnotation_DetectedBreak_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 2 + // Line-wrapping break. + TextAnnotation_DetectedBreak_EOL_SURE_SPACE TextAnnotation_DetectedBreak_BreakType = 3 + // End-line hyphen that is not present in text; does + TextAnnotation_DetectedBreak_HYPHEN TextAnnotation_DetectedBreak_BreakType = 4 + // not co-occur with SPACE, LEADER_SPACE, or + // LINE_BREAK. + // Line break that ends a paragraph. + TextAnnotation_DetectedBreak_LINE_BREAK TextAnnotation_DetectedBreak_BreakType = 5 +) + +var TextAnnotation_DetectedBreak_BreakType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SPACE", + 2: "SURE_SPACE", + 3: "EOL_SURE_SPACE", + 4: "HYPHEN", + 5: "LINE_BREAK", +} +var TextAnnotation_DetectedBreak_BreakType_value = map[string]int32{ + "UNKNOWN": 0, + "SPACE": 1, + "SURE_SPACE": 2, + "EOL_SURE_SPACE": 3, + "HYPHEN": 4, + "LINE_BREAK": 5, +} + +func (x TextAnnotation_DetectedBreak_BreakType) String() string { + return proto.EnumName(TextAnnotation_DetectedBreak_BreakType_name, int32(x)) +} +func (TextAnnotation_DetectedBreak_BreakType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 1, 0} +} + +// Type of a block (text, image etc) as identified by OCR. +type Block_BlockType int32 + +const ( + // Unknown block type. + Block_UNKNOWN Block_BlockType = 0 + // Regular text block. + Block_TEXT Block_BlockType = 1 + // Table block. + Block_TABLE Block_BlockType = 2 + // Image block. + Block_PICTURE Block_BlockType = 3 + // Horizontal/vertical line box. + Block_RULER Block_BlockType = 4 + // Barcode block. + Block_BARCODE Block_BlockType = 5 +) + +var Block_BlockType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TEXT", + 2: "TABLE", + 3: "PICTURE", + 4: "RULER", + 5: "BARCODE", +} +var Block_BlockType_value = map[string]int32{ + "UNKNOWN": 0, + "TEXT": 1, + "TABLE": 2, + "PICTURE": 3, + "RULER": 4, + "BARCODE": 5, +} + +func (x Block_BlockType) String() string { + return proto.EnumName(Block_BlockType_name, int32(x)) +} +func (Block_BlockType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2, 0} } + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure is like this: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have their own +// properties. Properties describe detected languages, breaks etc.. Please +// refer to the [google.cloud.vision.v1.TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty] message +// definition below for more detail. +type TextAnnotation struct { + // List of pages detected by OCR. + Pages []*Page `protobuf:"bytes,1,rep,name=pages" json:"pages,omitempty"` + // UTF-8 text detected on the pages. + Text string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` +} + +func (m *TextAnnotation) Reset() { *m = TextAnnotation{} } +func (m *TextAnnotation) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation) ProtoMessage() {} +func (*TextAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *TextAnnotation) GetPages() []*Page { + if m != nil { + return m.Pages + } + return nil +} + +func (m *TextAnnotation) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +// Detected language for a structural component. +type TextAnnotation_DetectedLanguage struct { + // The BCP-47 language code, such as "en-US" or "sr-Latn". For more + // information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Confidence of detected language. Range [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *TextAnnotation_DetectedLanguage) Reset() { *m = TextAnnotation_DetectedLanguage{} } +func (m *TextAnnotation_DetectedLanguage) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedLanguage) ProtoMessage() {} +func (*TextAnnotation_DetectedLanguage) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +func (m *TextAnnotation_DetectedLanguage) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *TextAnnotation_DetectedLanguage) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Detected start or end of a structural component. +type TextAnnotation_DetectedBreak struct { + Type TextAnnotation_DetectedBreak_BreakType `protobuf:"varint,1,opt,name=type,enum=google.cloud.vision.v1.TextAnnotation_DetectedBreak_BreakType" json:"type,omitempty"` + // True if break prepends the element. + IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix" json:"is_prefix,omitempty"` +} + +func (m *TextAnnotation_DetectedBreak) Reset() { *m = TextAnnotation_DetectedBreak{} } +func (m *TextAnnotation_DetectedBreak) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_DetectedBreak) ProtoMessage() {} +func (*TextAnnotation_DetectedBreak) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} } + +func (m *TextAnnotation_DetectedBreak) GetType() TextAnnotation_DetectedBreak_BreakType { + if m != nil { + return m.Type + } + return TextAnnotation_DetectedBreak_UNKNOWN +} + +func (m *TextAnnotation_DetectedBreak) GetIsPrefix() bool { + if m != nil { + return m.IsPrefix + } + return false +} + +// Additional information detected on the structural component. +type TextAnnotation_TextProperty struct { + // A list of detected languages together with confidence. + DetectedLanguages []*TextAnnotation_DetectedLanguage `protobuf:"bytes,1,rep,name=detected_languages,json=detectedLanguages" json:"detected_languages,omitempty"` + // Detected start or end of a text segment. + DetectedBreak *TextAnnotation_DetectedBreak `protobuf:"bytes,2,opt,name=detected_break,json=detectedBreak" json:"detected_break,omitempty"` +} + +func (m *TextAnnotation_TextProperty) Reset() { *m = TextAnnotation_TextProperty{} } +func (m *TextAnnotation_TextProperty) String() string { return proto.CompactTextString(m) } +func (*TextAnnotation_TextProperty) ProtoMessage() {} +func (*TextAnnotation_TextProperty) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 2} } + +func (m *TextAnnotation_TextProperty) GetDetectedLanguages() []*TextAnnotation_DetectedLanguage { + if m != nil { + return m.DetectedLanguages + } + return nil +} + +func (m *TextAnnotation_TextProperty) GetDetectedBreak() *TextAnnotation_DetectedBreak { + if m != nil { + return m.DetectedBreak + } + return nil +} + +// Detected page from OCR. +type Page struct { + // Additional information detected on the page. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // Page width in pixels. + Width int32 `protobuf:"varint,2,opt,name=width" json:"width,omitempty"` + // Page height in pixels. + Height int32 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"` + // List of blocks of text, images etc on this page. + Blocks []*Block `protobuf:"bytes,4,rep,name=blocks" json:"blocks,omitempty"` +} + +func (m *Page) Reset() { *m = Page{} } +func (m *Page) String() string { return proto.CompactTextString(m) } +func (*Page) ProtoMessage() {} +func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *Page) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Page) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Page) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Page) GetBlocks() []*Block { + if m != nil { + return m.Blocks + } + return nil +} + +// Logical element on the page. +type Block struct { + // Additional information detected for the block. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of paragraphs in this block (if this blocks is of type text). + Paragraphs []*Paragraph `protobuf:"bytes,3,rep,name=paragraphs" json:"paragraphs,omitempty"` + // Detected block type (text, image etc) for this block. + BlockType Block_BlockType `protobuf:"varint,4,opt,name=block_type,json=blockType,enum=google.cloud.vision.v1.Block_BlockType" json:"block_type,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *Block) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Block) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Block) GetParagraphs() []*Paragraph { + if m != nil { + return m.Paragraphs + } + return nil +} + +func (m *Block) GetBlockType() Block_BlockType { + if m != nil { + return m.BlockType + } + return Block_UNKNOWN +} + +// Structural unit of text representing a number of words in certain order. +type Paragraph struct { + // Additional information detected for the paragraph. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of words in this paragraph. + Words []*Word `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` +} + +func (m *Paragraph) Reset() { *m = Paragraph{} } +func (m *Paragraph) String() string { return proto.CompactTextString(m) } +func (*Paragraph) ProtoMessage() {} +func (*Paragraph) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *Paragraph) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Paragraph) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Paragraph) GetWords() []*Word { + if m != nil { + return m.Words + } + return nil +} + +// A word representation. +type Word struct { + // Additional information detected for the word. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + Symbols []*Symbol `protobuf:"bytes,3,rep,name=symbols" json:"symbols,omitempty"` +} + +func (m *Word) Reset() { *m = Word{} } +func (m *Word) String() string { return proto.CompactTextString(m) } +func (*Word) ProtoMessage() {} +func (*Word) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *Word) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Word) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Word) GetSymbols() []*Symbol { + if m != nil { + return m.Symbols + } + return nil +} + +// A single symbol representation. +type Symbol struct { + // Additional information detected for the symbol. + Property *TextAnnotation_TextProperty `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox *BoundingPoly `protobuf:"bytes,2,opt,name=bounding_box,json=boundingBox" json:"bounding_box,omitempty"` + // The actual UTF-8 representation of the symbol. + Text string `protobuf:"bytes,3,opt,name=text" json:"text,omitempty"` +} + +func (m *Symbol) Reset() { *m = Symbol{} } +func (m *Symbol) String() string { return proto.CompactTextString(m) } +func (*Symbol) ProtoMessage() {} +func (*Symbol) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *Symbol) GetProperty() *TextAnnotation_TextProperty { + if m != nil { + return m.Property + } + return nil +} + +func (m *Symbol) GetBoundingBox() *BoundingPoly { + if m != nil { + return m.BoundingBox + } + return nil +} + +func (m *Symbol) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func init() { + proto.RegisterType((*TextAnnotation)(nil), "google.cloud.vision.v1.TextAnnotation") + proto.RegisterType((*TextAnnotation_DetectedLanguage)(nil), "google.cloud.vision.v1.TextAnnotation.DetectedLanguage") + proto.RegisterType((*TextAnnotation_DetectedBreak)(nil), "google.cloud.vision.v1.TextAnnotation.DetectedBreak") + proto.RegisterType((*TextAnnotation_TextProperty)(nil), "google.cloud.vision.v1.TextAnnotation.TextProperty") + proto.RegisterType((*Page)(nil), "google.cloud.vision.v1.Page") + proto.RegisterType((*Block)(nil), "google.cloud.vision.v1.Block") + proto.RegisterType((*Paragraph)(nil), "google.cloud.vision.v1.Paragraph") + proto.RegisterType((*Word)(nil), "google.cloud.vision.v1.Word") + proto.RegisterType((*Symbol)(nil), "google.cloud.vision.v1.Symbol") + proto.RegisterEnum("google.cloud.vision.v1.TextAnnotation_DetectedBreak_BreakType", TextAnnotation_DetectedBreak_BreakType_name, TextAnnotation_DetectedBreak_BreakType_value) + proto.RegisterEnum("google.cloud.vision.v1.Block_BlockType", Block_BlockType_name, Block_BlockType_value) +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/text_annotation.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 744 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xfd, 0xb9, 0xb1, 0xd3, 0x78, 0xd2, 0x46, 0xfe, 0x2d, 0xa8, 0x8a, 0x42, 0xa9, 0x8a, 0x01, + 0xd1, 0x03, 0x72, 0xd4, 0x14, 0x04, 0x12, 0x08, 0x29, 0x4e, 0x0d, 0xad, 0x1a, 0x25, 0xd6, 0x36, + 0x51, 0xf9, 0x73, 0xb0, 0xfc, 0x67, 0xeb, 0x58, 0x4d, 0xbd, 0x96, 0xed, 0xb6, 0xc9, 0x8d, 0x4f, + 0xc5, 0x89, 0x6f, 0xc1, 0x09, 0xee, 0x9c, 0xb9, 0x72, 0x44, 0x5e, 0xdb, 0x69, 0x52, 0x61, 0x04, + 0x88, 0x43, 0x2f, 0xd6, 0xce, 0xe4, 0xed, 0xdb, 0xf7, 0x66, 0x33, 0x3b, 0xf0, 0xd0, 0xa5, 0xd4, + 0x1d, 0x93, 0xa6, 0x3d, 0xa6, 0x67, 0x4e, 0xf3, 0xdc, 0x8b, 0x3c, 0xea, 0x37, 0xcf, 0xb7, 0x9b, + 0x31, 0x99, 0xc4, 0x86, 0xe9, 0xfb, 0x34, 0x36, 0x63, 0x8f, 0xfa, 0x4a, 0x10, 0xd2, 0x98, 0xa2, + 0xb5, 0x14, 0xad, 0x30, 0xb4, 0x92, 0xa2, 0x95, 0xf3, 0xed, 0xc6, 0x7a, 0xc6, 0x62, 0x06, 0x5e, + 0xf3, 0x72, 0x53, 0x94, 0xee, 0x6a, 0xdc, 0x2f, 0x38, 0xc3, 0x25, 0xf4, 0x94, 0xc4, 0xe1, 0x34, + 0x85, 0xc9, 0xdf, 0x78, 0xa8, 0x0d, 0xc8, 0x24, 0x6e, 0xcf, 0x08, 0x50, 0x0b, 0x84, 0xc0, 0x74, + 0x49, 0x54, 0xe7, 0x36, 0x4b, 0x5b, 0xd5, 0xd6, 0xba, 0xf2, 0xf3, 0xf3, 0x15, 0xdd, 0x74, 0x09, + 0x4e, 0xa1, 0x08, 0x01, 0x9f, 0x88, 0xaf, 0x2f, 0x6d, 0x72, 0x5b, 0x22, 0x66, 0xeb, 0xc6, 0x11, + 0x48, 0xbb, 0x24, 0x26, 0x76, 0x4c, 0x9c, 0xae, 0xe9, 0xbb, 0x67, 0xa6, 0x4b, 0xd0, 0x5d, 0x58, + 0x1d, 0x67, 0x6b, 0xc3, 0xa6, 0x0e, 0xa9, 0x73, 0x6c, 0xc3, 0x4a, 0x9e, 0xec, 0x50, 0x87, 0xa0, + 0x0d, 0x00, 0x9b, 0xfa, 0xc7, 0x9e, 0x43, 0x7c, 0x9b, 0x30, 0xca, 0x25, 0x3c, 0x97, 0x69, 0x7c, + 0xe5, 0x60, 0x35, 0x67, 0x56, 0x43, 0x62, 0x9e, 0x20, 0x0c, 0x7c, 0x3c, 0x0d, 0x52, 0xb6, 0x5a, + 0xeb, 0x45, 0x91, 0xe2, 0x45, 0xa3, 0xca, 0x02, 0x87, 0xc2, 0xbe, 0x83, 0x69, 0x40, 0x30, 0xe3, + 0x42, 0xb7, 0x40, 0xf4, 0x22, 0x23, 0x08, 0xc9, 0xb1, 0x37, 0x61, 0x22, 0x2a, 0xb8, 0xe2, 0x45, + 0x3a, 0x8b, 0x65, 0x1b, 0xc4, 0x19, 0x1e, 0x55, 0x61, 0x79, 0xd8, 0x3b, 0xe8, 0xf5, 0x8f, 0x7a, + 0xd2, 0x7f, 0x48, 0x04, 0xe1, 0x50, 0x6f, 0x77, 0x34, 0x89, 0x43, 0x35, 0x80, 0xc3, 0x21, 0xd6, + 0x8c, 0x34, 0x5e, 0x42, 0x08, 0x6a, 0x5a, 0xbf, 0x6b, 0xcc, 0xe5, 0x4a, 0x08, 0xa0, 0xbc, 0xf7, + 0x46, 0xdf, 0xd3, 0x7a, 0x12, 0x9f, 0xe0, 0xbb, 0xfb, 0x3d, 0xcd, 0x50, 0xb1, 0xd6, 0x3e, 0x90, + 0x84, 0xc6, 0x27, 0x0e, 0x56, 0x12, 0xc9, 0x7a, 0x48, 0x03, 0x12, 0xc6, 0x53, 0x74, 0x0c, 0xc8, + 0xc9, 0x34, 0x1b, 0x79, 0xc5, 0xf2, 0x6b, 0x7a, 0xf2, 0x87, 0xa6, 0xf3, 0x2b, 0xc1, 0xff, 0x3b, + 0x57, 0x32, 0x11, 0x7a, 0x07, 0xb5, 0xd9, 0x39, 0x56, 0x62, 0x93, 0xf9, 0xaf, 0xb6, 0x1e, 0xfd, + 0x4d, 0x61, 0xf1, 0xaa, 0x33, 0x1f, 0xca, 0x1f, 0x39, 0xe0, 0x93, 0xbf, 0x0e, 0xea, 0x43, 0x25, + 0xc8, 0x9c, 0xb1, 0x8b, 0xab, 0xb6, 0x76, 0x7e, 0x93, 0x7f, 0xbe, 0x28, 0x78, 0x46, 0x82, 0x6e, + 0x82, 0x70, 0xe1, 0x39, 0xf1, 0x88, 0xa9, 0x15, 0x70, 0x1a, 0xa0, 0x35, 0x28, 0x8f, 0x88, 0xe7, + 0x8e, 0xe2, 0x7a, 0x89, 0xa5, 0xb3, 0x08, 0x3d, 0x86, 0xb2, 0x35, 0xa6, 0xf6, 0x49, 0x54, 0xe7, + 0x59, 0x01, 0x6f, 0x17, 0x1d, 0xae, 0x26, 0x28, 0x9c, 0x81, 0xe5, 0xf7, 0x25, 0x10, 0x58, 0xe6, + 0xdf, 0xeb, 0x7f, 0x05, 0x2b, 0x16, 0x3d, 0xf3, 0x1d, 0xcf, 0x77, 0x0d, 0x8b, 0x4e, 0xb2, 0xa2, + 0xdf, 0x2b, 0xd4, 0x95, 0x61, 0x75, 0x3a, 0x9e, 0xe2, 0x6a, 0xbe, 0x53, 0xa5, 0x13, 0xd4, 0x06, + 0x08, 0xcc, 0xd0, 0x74, 0x43, 0x33, 0x18, 0x45, 0xf5, 0x12, 0xb3, 0x77, 0xa7, 0xb8, 0x8d, 0x33, + 0x24, 0x9e, 0xdb, 0x84, 0x5e, 0x02, 0x30, 0xc3, 0x06, 0xeb, 0x2b, 0x9e, 0xf5, 0xd5, 0x83, 0x5f, + 0x56, 0x28, 0xfd, 0xb2, 0x06, 0x12, 0xad, 0x7c, 0x29, 0x63, 0x10, 0x67, 0xf9, 0xc5, 0x46, 0xa9, + 0x00, 0x3f, 0xd0, 0x5e, 0x0f, 0x24, 0x2e, 0x69, 0x99, 0x41, 0x5b, 0xed, 0x26, 0x2d, 0x52, 0x85, + 0x65, 0x7d, 0xbf, 0x33, 0x18, 0xe2, 0xa4, 0x37, 0x44, 0x10, 0xf0, 0xb0, 0xab, 0x61, 0x89, 0x4f, + 0xf2, 0x6a, 0x1b, 0x77, 0xfa, 0xbb, 0x9a, 0x24, 0xc8, 0x9f, 0x39, 0x10, 0x67, 0xaa, 0xaf, 0xf1, + 0x35, 0xb4, 0x40, 0xb8, 0xa0, 0xa1, 0x93, 0xdf, 0x40, 0xe1, 0x43, 0x7a, 0x44, 0x43, 0x07, 0xa7, + 0x50, 0xf9, 0x0b, 0x07, 0x7c, 0x12, 0x5f, 0x63, 0x5b, 0x4f, 0x61, 0x39, 0x9a, 0x9e, 0x5a, 0x74, + 0x9c, 0x1b, 0xdb, 0x28, 0xe2, 0x38, 0x64, 0x30, 0x9c, 0xc3, 0xe5, 0x0f, 0x1c, 0x94, 0xd3, 0xdc, + 0x35, 0xb6, 0x97, 0x8f, 0xb2, 0xd2, 0xe5, 0x28, 0x53, 0x63, 0x68, 0xd8, 0xf4, 0xb4, 0x80, 0x4b, + 0xbd, 0xb1, 0xa8, 0x50, 0x4f, 0x06, 0xab, 0xce, 0xbd, 0x7d, 0x9e, 0xc1, 0x5d, 0x9a, 0xbc, 0xd5, + 0x0a, 0x0d, 0xdd, 0xa6, 0x4b, 0x7c, 0x36, 0x76, 0x9b, 0xe9, 0x4f, 0x66, 0xe0, 0x45, 0x57, 0x07, + 0xf4, 0xb3, 0x74, 0xf5, 0x9d, 0xe3, 0xac, 0x32, 0xc3, 0xee, 0xfc, 0x08, 0x00, 0x00, 0xff, 0xff, + 0x80, 0x29, 0x2a, 0x3b, 0x2f, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go new file mode 100644 index 000000000..2f4089cf4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/vision/v1/web_detection.pb.go @@ -0,0 +1,193 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/vision/v1/web_detection.proto + +package vision + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Relevant information for the image from the Internet. +type WebDetection struct { + // Deduced entities from similar images on the Internet. + WebEntities []*WebDetection_WebEntity `protobuf:"bytes,1,rep,name=web_entities,json=webEntities" json:"web_entities,omitempty"` + // Fully matching images from the Internet. + // They're definite neardups and most often a copy of the query image with + // merely a size change. + FullMatchingImages []*WebDetection_WebImage `protobuf:"bytes,2,rep,name=full_matching_images,json=fullMatchingImages" json:"full_matching_images,omitempty"` + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + PartialMatchingImages []*WebDetection_WebImage `protobuf:"bytes,3,rep,name=partial_matching_images,json=partialMatchingImages" json:"partial_matching_images,omitempty"` + // Web pages containing the matching images from the Internet. + PagesWithMatchingImages []*WebDetection_WebPage `protobuf:"bytes,4,rep,name=pages_with_matching_images,json=pagesWithMatchingImages" json:"pages_with_matching_images,omitempty"` +} + +func (m *WebDetection) Reset() { *m = WebDetection{} } +func (m *WebDetection) String() string { return proto.CompactTextString(m) } +func (*WebDetection) ProtoMessage() {} +func (*WebDetection) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *WebDetection) GetWebEntities() []*WebDetection_WebEntity { + if m != nil { + return m.WebEntities + } + return nil +} + +func (m *WebDetection) GetFullMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.FullMatchingImages + } + return nil +} + +func (m *WebDetection) GetPartialMatchingImages() []*WebDetection_WebImage { + if m != nil { + return m.PartialMatchingImages + } + return nil +} + +func (m *WebDetection) GetPagesWithMatchingImages() []*WebDetection_WebPage { + if m != nil { + return m.PagesWithMatchingImages + } + return nil +} + +// Entity deduced from similar images on the Internet. +type WebDetection_WebEntity struct { + // Opaque entity ID. + EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + // Canonical description of the entity, in English. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *WebDetection_WebEntity) Reset() { *m = WebDetection_WebEntity{} } +func (m *WebDetection_WebEntity) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebEntity) ProtoMessage() {} +func (*WebDetection_WebEntity) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +func (m *WebDetection_WebEntity) GetEntityId() string { + if m != nil { + return m.EntityId + } + return "" +} + +func (m *WebDetection_WebEntity) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *WebDetection_WebEntity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Metadata for online images. +type WebDetection_WebImage struct { + // The result image URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // Overall relevancy score for the image. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *WebDetection_WebImage) Reset() { *m = WebDetection_WebImage{} } +func (m *WebDetection_WebImage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebImage) ProtoMessage() {} +func (*WebDetection_WebImage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} } + +func (m *WebDetection_WebImage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebImage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Metadata for web pages. +type WebDetection_WebPage struct { + // The result web page URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // Overall relevancy score for the web page. + // Not normalized and not comparable across different image queries. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *WebDetection_WebPage) Reset() { *m = WebDetection_WebPage{} } +func (m *WebDetection_WebPage) String() string { return proto.CompactTextString(m) } +func (*WebDetection_WebPage) ProtoMessage() {} +func (*WebDetection_WebPage) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 2} } + +func (m *WebDetection_WebPage) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WebDetection_WebPage) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func init() { + proto.RegisterType((*WebDetection)(nil), "google.cloud.vision.v1.WebDetection") + proto.RegisterType((*WebDetection_WebEntity)(nil), "google.cloud.vision.v1.WebDetection.WebEntity") + proto.RegisterType((*WebDetection_WebImage)(nil), "google.cloud.vision.v1.WebDetection.WebImage") + proto.RegisterType((*WebDetection_WebPage)(nil), "google.cloud.vision.v1.WebDetection.WebPage") +} + +func init() { proto.RegisterFile("google/cloud/vision/v1/web_detection.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 383 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x4f, 0xea, 0x40, + 0x14, 0x85, 0x53, 0xca, 0x7b, 0x0f, 0x06, 0x16, 0xcf, 0x09, 0x4a, 0x53, 0x5d, 0x34, 0xae, 0x88, + 0xd1, 0x69, 0xc0, 0xa5, 0xae, 0x88, 0x2e, 0x58, 0x98, 0x60, 0x37, 0x24, 0x6e, 0xea, 0xd0, 0x8e, + 0xc3, 0x4d, 0xca, 0x4c, 0xd3, 0x19, 0x20, 0xfc, 0x58, 0xff, 0x87, 0x4b, 0x33, 0xd3, 0x62, 0x10, + 0x30, 0x21, 0xee, 0xee, 0xdc, 0x9e, 0xf3, 0x9d, 0xf6, 0xf6, 0x0e, 0xba, 0xe2, 0x52, 0xf2, 0x8c, + 0x85, 0x49, 0x26, 0x17, 0x69, 0xb8, 0x04, 0x05, 0x52, 0x84, 0xcb, 0x7e, 0xb8, 0x62, 0xd3, 0x38, + 0x65, 0x9a, 0x25, 0x1a, 0xa4, 0x20, 0x79, 0x21, 0xb5, 0xc4, 0x67, 0xa5, 0x96, 0x58, 0x2d, 0x29, + 0xb5, 0x64, 0xd9, 0xf7, 0x2f, 0x2a, 0x06, 0xcd, 0x21, 0xa4, 0x42, 0x48, 0x4d, 0x8d, 0x49, 0x95, + 0xae, 0xcb, 0xf7, 0x3a, 0x6a, 0x4f, 0xd8, 0xf4, 0x61, 0x03, 0xc3, 0xcf, 0xa8, 0x6d, 0xe8, 0x4c, + 0x68, 0xd0, 0xc0, 0x94, 0xe7, 0x04, 0x6e, 0xaf, 0x35, 0x20, 0xe4, 0x30, 0x9d, 0x6c, 0x7b, 0xcd, + 0xe1, 0xd1, 0xf8, 0xd6, 0x51, 0x6b, 0x55, 0x95, 0xc0, 0x14, 0x8e, 0x51, 0xe7, 0x6d, 0x91, 0x65, + 0xf1, 0x9c, 0xea, 0x64, 0x06, 0x82, 0xc7, 0x30, 0xa7, 0x9c, 0x29, 0xaf, 0x66, 0xd1, 0x37, 0xc7, + 0xa2, 0x47, 0xc6, 0x15, 0x61, 0x83, 0x7a, 0xaa, 0x48, 0xb6, 0xa5, 0x30, 0x43, 0xdd, 0x9c, 0x16, + 0x1a, 0xe8, 0x7e, 0x86, 0xfb, 0x9b, 0x8c, 0xd3, 0x8a, 0xb6, 0x13, 0x03, 0xc8, 0xcf, 0x4d, 0x11, + 0xaf, 0x40, 0xcf, 0xf6, 0x92, 0xea, 0x36, 0xe9, 0xfa, 0xd8, 0xa4, 0xb1, 0x09, 0xea, 0x5a, 0xde, + 0x04, 0xf4, 0xec, 0x7b, 0x94, 0xff, 0x8a, 0x9a, 0x5f, 0xc3, 0xc4, 0xe7, 0xa8, 0x69, 0x7f, 0xc7, + 0x3a, 0x86, 0xd4, 0x73, 0x02, 0xa7, 0xd7, 0x8c, 0x1a, 0x65, 0x63, 0x94, 0xe2, 0x0e, 0xfa, 0xa3, + 0x12, 0x59, 0x30, 0xaf, 0x16, 0x38, 0xbd, 0x5a, 0x54, 0x1e, 0x70, 0x80, 0x5a, 0x29, 0x53, 0x49, + 0x01, 0xb9, 0xc9, 0xf3, 0x5c, 0x6b, 0xda, 0x6e, 0xf9, 0x03, 0xd4, 0xd8, 0x7c, 0x2f, 0xfe, 0x8f, + 0xdc, 0x45, 0x91, 0x55, 0x68, 0x53, 0x1e, 0xa6, 0xfa, 0x7d, 0xf4, 0xaf, 0x7a, 0xf3, 0x63, 0x2d, + 0xc3, 0x02, 0xf9, 0x89, 0x9c, 0xff, 0x30, 0x94, 0xe1, 0xc9, 0xf6, 0x54, 0xc6, 0x66, 0x21, 0xc7, + 0xce, 0xcb, 0x7d, 0x25, 0xe6, 0x32, 0xa3, 0x82, 0x13, 0x59, 0xf0, 0x90, 0x33, 0x61, 0xd7, 0x35, + 0x2c, 0x1f, 0xd1, 0x1c, 0xd4, 0xee, 0x9d, 0xb8, 0x2b, 0xab, 0x0f, 0xc7, 0x99, 0xfe, 0xb5, 0xda, + 0xdb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xd9, 0xde, 0x3f, 0x3e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go b/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go new file mode 100644 index 000000000..a5b58321d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go @@ -0,0 +1,4704 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/container/v1/cluster_service.proto + +/* +Package container is a generated protocol buffer package. + +It is generated from these files: + google/container/v1/cluster_service.proto + +It has these top-level messages: + NodeConfig + MasterAuth + ClientCertificateConfig + AddonsConfig + HttpLoadBalancing + HorizontalPodAutoscaling + KubernetesDashboard + MasterAuthorizedNetworksConfig + LegacyAbac + NetworkPolicy + IPAllocationPolicy + Cluster + ClusterUpdate + Operation + CreateClusterRequest + GetClusterRequest + UpdateClusterRequest + UpdateNodePoolRequest + SetNodePoolAutoscalingRequest + SetLoggingServiceRequest + SetMonitoringServiceRequest + SetAddonsConfigRequest + SetLocationsRequest + UpdateMasterRequest + SetMasterAuthRequest + DeleteClusterRequest + ListClustersRequest + ListClustersResponse + GetOperationRequest + ListOperationsRequest + CancelOperationRequest + ListOperationsResponse + GetServerConfigRequest + ServerConfig + CreateNodePoolRequest + DeleteNodePoolRequest + ListNodePoolsRequest + GetNodePoolRequest + NodePool + NodeManagement + AutoUpgradeOptions + SetNodePoolManagementRequest + SetNodePoolSizeRequest + RollbackNodePoolUpgradeRequest + ListNodePoolsResponse + NodePoolAutoscaling + SetLabelsRequest + SetLegacyAbacRequest + StartIPRotationRequest + CompleteIPRotationRequest + AcceleratorConfig + SetNetworkPolicyRequest +*/ +package container + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Allowed Network Policy providers. +type NetworkPolicy_Provider int32 + +const ( + // Not set + NetworkPolicy_PROVIDER_UNSPECIFIED NetworkPolicy_Provider = 0 + // Tigera (Calico Felix). + NetworkPolicy_CALICO NetworkPolicy_Provider = 1 +) + +var NetworkPolicy_Provider_name = map[int32]string{ + 0: "PROVIDER_UNSPECIFIED", + 1: "CALICO", +} +var NetworkPolicy_Provider_value = map[string]int32{ + "PROVIDER_UNSPECIFIED": 0, + "CALICO": 1, +} + +func (x NetworkPolicy_Provider) String() string { + return proto.EnumName(NetworkPolicy_Provider_name, int32(x)) +} +func (NetworkPolicy_Provider) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +// The current status of the cluster. +type Cluster_Status int32 + +const ( + // Not set. + Cluster_STATUS_UNSPECIFIED Cluster_Status = 0 + // The PROVISIONING state indicates the cluster is being created. + Cluster_PROVISIONING Cluster_Status = 1 + // The RUNNING state indicates the cluster has been created and is fully + // usable. + Cluster_RUNNING Cluster_Status = 2 + // The RECONCILING state indicates that some work is actively being done on + // the cluster, such as upgrading the master or node software. Details can + // be found in the `statusMessage` field. + Cluster_RECONCILING Cluster_Status = 3 + // The STOPPING state indicates the cluster is being deleted. + Cluster_STOPPING Cluster_Status = 4 + // The ERROR state indicates the cluster may be unusable. Details + // can be found in the `statusMessage` field. + Cluster_ERROR Cluster_Status = 5 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PROVISIONING", + 2: "RUNNING", + 3: "RECONCILING", + 4: "STOPPING", + 5: "ERROR", +} +var Cluster_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PROVISIONING": 1, + "RUNNING": 2, + "RECONCILING": 3, + "STOPPING": 4, + "ERROR": 5, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } + +// Current status of the operation. +type Operation_Status int32 + +const ( + // Not set. + Operation_STATUS_UNSPECIFIED Operation_Status = 0 + // The operation has been created. + Operation_PENDING Operation_Status = 1 + // The operation is currently running. + Operation_RUNNING Operation_Status = 2 + // The operation is done, either cancelled or completed. + Operation_DONE Operation_Status = 3 + // The operation is aborting. + Operation_ABORTING Operation_Status = 4 +) + +var Operation_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", + 4: "ABORTING", +} +var Operation_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, + "ABORTING": 4, +} + +func (x Operation_Status) String() string { + return proto.EnumName(Operation_Status_name, int32(x)) +} +func (Operation_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +// Operation type. +type Operation_Type int32 + +const ( + // Not set. + Operation_TYPE_UNSPECIFIED Operation_Type = 0 + // Cluster create. + Operation_CREATE_CLUSTER Operation_Type = 1 + // Cluster delete. + Operation_DELETE_CLUSTER Operation_Type = 2 + // A master upgrade. + Operation_UPGRADE_MASTER Operation_Type = 3 + // A node upgrade. + Operation_UPGRADE_NODES Operation_Type = 4 + // Cluster repair. + Operation_REPAIR_CLUSTER Operation_Type = 5 + // Cluster update. + Operation_UPDATE_CLUSTER Operation_Type = 6 + // Node pool create. + Operation_CREATE_NODE_POOL Operation_Type = 7 + // Node pool delete. + Operation_DELETE_NODE_POOL Operation_Type = 8 + // Set node pool management. + Operation_SET_NODE_POOL_MANAGEMENT Operation_Type = 9 + // Automatic node pool repair. + Operation_AUTO_REPAIR_NODES Operation_Type = 10 + // Automatic node upgrade. + Operation_AUTO_UPGRADE_NODES Operation_Type = 11 + // Set labels. + Operation_SET_LABELS Operation_Type = 12 + // Set/generate master auth materials + Operation_SET_MASTER_AUTH Operation_Type = 13 + // Set node pool size. + Operation_SET_NODE_POOL_SIZE Operation_Type = 14 + // Updates network policy for a cluster. + Operation_SET_NETWORK_POLICY Operation_Type = 15 +) + +var Operation_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CREATE_CLUSTER", + 2: "DELETE_CLUSTER", + 3: "UPGRADE_MASTER", + 4: "UPGRADE_NODES", + 5: "REPAIR_CLUSTER", + 6: "UPDATE_CLUSTER", + 7: "CREATE_NODE_POOL", + 8: "DELETE_NODE_POOL", + 9: "SET_NODE_POOL_MANAGEMENT", + 10: "AUTO_REPAIR_NODES", + 11: "AUTO_UPGRADE_NODES", + 12: "SET_LABELS", + 13: "SET_MASTER_AUTH", + 14: "SET_NODE_POOL_SIZE", + 15: "SET_NETWORK_POLICY", +} +var Operation_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CREATE_CLUSTER": 1, + "DELETE_CLUSTER": 2, + "UPGRADE_MASTER": 3, + "UPGRADE_NODES": 4, + "REPAIR_CLUSTER": 5, + "UPDATE_CLUSTER": 6, + "CREATE_NODE_POOL": 7, + "DELETE_NODE_POOL": 8, + "SET_NODE_POOL_MANAGEMENT": 9, + "AUTO_REPAIR_NODES": 10, + "AUTO_UPGRADE_NODES": 11, + "SET_LABELS": 12, + "SET_MASTER_AUTH": 13, + "SET_NODE_POOL_SIZE": 14, + "SET_NETWORK_POLICY": 15, +} + +func (x Operation_Type) String() string { + return proto.EnumName(Operation_Type_name, int32(x)) +} +func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 1} } + +// operation type - what type of key rotation are we performing +type SetMasterAuthRequest_Action int32 + +const ( + // Operation is unknown and will error out + SetMasterAuthRequest_UNKNOWN SetMasterAuthRequest_Action = 0 + // Set the password to a user generated value. + SetMasterAuthRequest_SET_PASSWORD SetMasterAuthRequest_Action = 1 + // Generate a new password and set it to that. + SetMasterAuthRequest_GENERATE_PASSWORD SetMasterAuthRequest_Action = 2 +) + +var SetMasterAuthRequest_Action_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_PASSWORD", + 2: "GENERATE_PASSWORD", +} +var SetMasterAuthRequest_Action_value = map[string]int32{ + "UNKNOWN": 0, + "SET_PASSWORD": 1, + "GENERATE_PASSWORD": 2, +} + +func (x SetMasterAuthRequest_Action) String() string { + return proto.EnumName(SetMasterAuthRequest_Action_name, int32(x)) +} +func (SetMasterAuthRequest_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{24, 0} +} + +// The current status of the node pool instance. +type NodePool_Status int32 + +const ( + // Not set. + NodePool_STATUS_UNSPECIFIED NodePool_Status = 0 + // The PROVISIONING state indicates the node pool is being created. + NodePool_PROVISIONING NodePool_Status = 1 + // The RUNNING state indicates the node pool has been created + // and is fully usable. + NodePool_RUNNING NodePool_Status = 2 + // The RUNNING_WITH_ERROR state indicates the node pool has been created + // and is partially usable. Some error state has occurred and some + // functionality may be impaired. Customer may need to reissue a request + // or trigger a new update. + NodePool_RUNNING_WITH_ERROR NodePool_Status = 3 + // The RECONCILING state indicates that some work is actively being done on + // the node pool, such as upgrading node software. Details can + // be found in the `statusMessage` field. + NodePool_RECONCILING NodePool_Status = 4 + // The STOPPING state indicates the node pool is being deleted. + NodePool_STOPPING NodePool_Status = 5 + // The ERROR state indicates the node pool may be unusable. Details + // can be found in the `statusMessage` field. + NodePool_ERROR NodePool_Status = 6 +) + +var NodePool_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PROVISIONING", + 2: "RUNNING", + 3: "RUNNING_WITH_ERROR", + 4: "RECONCILING", + 5: "STOPPING", + 6: "ERROR", +} +var NodePool_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PROVISIONING": 1, + "RUNNING": 2, + "RUNNING_WITH_ERROR": 3, + "RECONCILING": 4, + "STOPPING": 5, + "ERROR": 6, +} + +func (x NodePool_Status) String() string { + return proto.EnumName(NodePool_Status_name, int32(x)) +} +func (NodePool_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} } + +// Parameters that describe the nodes in a cluster. +type NodeConfig struct { + // The name of a Google Compute Engine [machine + // type](/compute/docs/machine-types) (e.g. + // `n1-standard-1`). + // + // If unspecified, the default machine type is + // `n1-standard-1`. + MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType" json:"machine_type,omitempty"` + // Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. + // + // If unspecified, the default disk size is 100GB. + DiskSizeGb int32 `protobuf:"varint,2,opt,name=disk_size_gb,json=diskSizeGb" json:"disk_size_gb,omitempty"` + // The set of Google API scopes to be made available on all of the + // node VMs under the "default" service account. + // + // The following scopes are recommended, but not required, and by default are + // not included: + // + // * `https://www.googleapis.com/auth/compute` is required for mounting + // persistent storage on your nodes. + // * `https://www.googleapis.com/auth/devstorage.read_only` is required for + // communicating with **gcr.io** + // (the [Google Container Registry](/container-registry/)). + // + // If unspecified, no scopes are added, unless Cloud Logging or Cloud + // Monitoring are enabled, in which case their required scopes will be added. + OauthScopes []string `protobuf:"bytes,3,rep,name=oauth_scopes,json=oauthScopes" json:"oauth_scopes,omitempty"` + // The Google Cloud Platform Service Account to be used by the node VMs. If + // no Service Account is specified, the "default" service account is used. + ServiceAccount string `protobuf:"bytes,9,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // The metadata key/value pairs assigned to instances in the cluster. + // + // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + // in length. These are reflected as part of a URL in the metadata server. + // Additionally, to avoid ambiguity, keys must not conflict with any other + // metadata keys for the project or be one of the four reserved keys: + // "instance-template", "kube-env", "startup-script", and "user-data" + // + // Values are free-form strings, and only have meaning as interpreted by + // the image running in the instance. The only restriction placed on them is + // that each value's size must be less than or equal to 32 KB. + // + // The total size of all keys and values must be less than 512 KB. + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The image type to use for this node. Note that for a given image type, + // the latest version of it will be used. + ImageType string `protobuf:"bytes,5,opt,name=image_type,json=imageType" json:"image_type,omitempty"` + // The map of Kubernetes labels (key/value pairs) to be applied to each node. + // These will added in addition to any default label(s) that + // Kubernetes may apply to the node. + // In case of conflict in label keys, the applied set may differ depending on + // the Kubernetes version -- it's best to assume the behavior is undefined + // and conflicts should be avoided. + // For more information, including usage and the valid values, see: + // http://kubernetes.io/v1.1/docs/user-guide/labels.html + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The number of local SSD disks to be attached to the node. + // + // The limit for this value is dependant upon the maximum number of + // disks available on a machine per zone. See: + // https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits + // for more information. + LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount" json:"local_ssd_count,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify + // valid sources or targets for network firewalls and are specified by + // the client during cluster or node pool creation. Each tag within the list + // must comply with RFC1035. + Tags []string `protobuf:"bytes,8,rep,name=tags" json:"tags,omitempty"` + // Whether the nodes are created as preemptible VM instances. See: + // https://cloud.google.com/compute/docs/instances/preemptible for more + // information about preemptible VM instances. + Preemptible bool `protobuf:"varint,10,opt,name=preemptible" json:"preemptible,omitempty"` + // A list of hardware accelerators to be attached to each node. + // See https://cloud.google.com/compute/docs/gpus for more information about + // support for GPUs. + Accelerators []*AcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators" json:"accelerators,omitempty"` +} + +func (m *NodeConfig) Reset() { *m = NodeConfig{} } +func (m *NodeConfig) String() string { return proto.CompactTextString(m) } +func (*NodeConfig) ProtoMessage() {} +func (*NodeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *NodeConfig) GetMachineType() string { + if m != nil { + return m.MachineType + } + return "" +} + +func (m *NodeConfig) GetDiskSizeGb() int32 { + if m != nil { + return m.DiskSizeGb + } + return 0 +} + +func (m *NodeConfig) GetOauthScopes() []string { + if m != nil { + return m.OauthScopes + } + return nil +} + +func (m *NodeConfig) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *NodeConfig) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *NodeConfig) GetImageType() string { + if m != nil { + return m.ImageType + } + return "" +} + +func (m *NodeConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NodeConfig) GetLocalSsdCount() int32 { + if m != nil { + return m.LocalSsdCount + } + return 0 +} + +func (m *NodeConfig) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *NodeConfig) GetPreemptible() bool { + if m != nil { + return m.Preemptible + } + return false +} + +func (m *NodeConfig) GetAccelerators() []*AcceleratorConfig { + if m != nil { + return m.Accelerators + } + return nil +} + +// The authentication information for accessing the master endpoint. +// Authentication can be done using HTTP basic auth or using client +// certificates. +type MasterAuth struct { + // The username to use for HTTP basic authentication to the master endpoint. + // For clusters v1.6.0 and later, you can disable basic authentication by + // providing an empty username. + Username string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"` + // The password to use for HTTP basic authentication to the master endpoint. + // Because the master endpoint is open to the Internet, you should create a + // strong password. If a password is provided for cluster creation, username + // must be non-empty. + Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` + // Configuration for client certificate authentication on the cluster. If no + // configuration is specified, a client certificate is issued. + ClientCertificateConfig *ClientCertificateConfig `protobuf:"bytes,3,opt,name=client_certificate_config,json=clientCertificateConfig" json:"client_certificate_config,omitempty"` + // [Output only] Base64-encoded public certificate that is the root of + // trust for the cluster. + ClusterCaCertificate string `protobuf:"bytes,100,opt,name=cluster_ca_certificate,json=clusterCaCertificate" json:"cluster_ca_certificate,omitempty"` + // [Output only] Base64-encoded public certificate used by clients to + // authenticate to the cluster endpoint. + ClientCertificate string `protobuf:"bytes,101,opt,name=client_certificate,json=clientCertificate" json:"client_certificate,omitempty"` + // [Output only] Base64-encoded private key used by clients to authenticate + // to the cluster endpoint. + ClientKey string `protobuf:"bytes,102,opt,name=client_key,json=clientKey" json:"client_key,omitempty"` +} + +func (m *MasterAuth) Reset() { *m = MasterAuth{} } +func (m *MasterAuth) String() string { return proto.CompactTextString(m) } +func (*MasterAuth) ProtoMessage() {} +func (*MasterAuth) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *MasterAuth) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *MasterAuth) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *MasterAuth) GetClientCertificateConfig() *ClientCertificateConfig { + if m != nil { + return m.ClientCertificateConfig + } + return nil +} + +func (m *MasterAuth) GetClusterCaCertificate() string { + if m != nil { + return m.ClusterCaCertificate + } + return "" +} + +func (m *MasterAuth) GetClientCertificate() string { + if m != nil { + return m.ClientCertificate + } + return "" +} + +func (m *MasterAuth) GetClientKey() string { + if m != nil { + return m.ClientKey + } + return "" +} + +// Configuration for client certificates on the cluster. +type ClientCertificateConfig struct { + // Issue a client certificate. + IssueClientCertificate bool `protobuf:"varint,1,opt,name=issue_client_certificate,json=issueClientCertificate" json:"issue_client_certificate,omitempty"` +} + +func (m *ClientCertificateConfig) Reset() { *m = ClientCertificateConfig{} } +func (m *ClientCertificateConfig) String() string { return proto.CompactTextString(m) } +func (*ClientCertificateConfig) ProtoMessage() {} +func (*ClientCertificateConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ClientCertificateConfig) GetIssueClientCertificate() bool { + if m != nil { + return m.IssueClientCertificate + } + return false +} + +// Configuration for the addons that can be automatically spun up in the +// cluster, enabling additional functionality. +type AddonsConfig struct { + // Configuration for the HTTP (L7) load balancing controller addon, which + // makes it easy to set up HTTP load balancers for services in a cluster. + HttpLoadBalancing *HttpLoadBalancing `protobuf:"bytes,1,opt,name=http_load_balancing,json=httpLoadBalancing" json:"http_load_balancing,omitempty"` + // Configuration for the horizontal pod autoscaling feature, which + // increases or decreases the number of replica pods a replication controller + // has based on the resource usage of the existing pods. + HorizontalPodAutoscaling *HorizontalPodAutoscaling `protobuf:"bytes,2,opt,name=horizontal_pod_autoscaling,json=horizontalPodAutoscaling" json:"horizontal_pod_autoscaling,omitempty"` + // Configuration for the Kubernetes Dashboard. + KubernetesDashboard *KubernetesDashboard `protobuf:"bytes,3,opt,name=kubernetes_dashboard,json=kubernetesDashboard" json:"kubernetes_dashboard,omitempty"` +} + +func (m *AddonsConfig) Reset() { *m = AddonsConfig{} } +func (m *AddonsConfig) String() string { return proto.CompactTextString(m) } +func (*AddonsConfig) ProtoMessage() {} +func (*AddonsConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *AddonsConfig) GetHttpLoadBalancing() *HttpLoadBalancing { + if m != nil { + return m.HttpLoadBalancing + } + return nil +} + +func (m *AddonsConfig) GetHorizontalPodAutoscaling() *HorizontalPodAutoscaling { + if m != nil { + return m.HorizontalPodAutoscaling + } + return nil +} + +func (m *AddonsConfig) GetKubernetesDashboard() *KubernetesDashboard { + if m != nil { + return m.KubernetesDashboard + } + return nil +} + +// Configuration options for the HTTP (L7) load balancing controller addon, +// which makes it easy to set up HTTP load balancers for services in a cluster. +type HttpLoadBalancing struct { + // Whether the HTTP Load Balancing controller is enabled in the cluster. + // When enabled, it runs a small pod in the cluster that manages the load + // balancers. + Disabled bool `protobuf:"varint,1,opt,name=disabled" json:"disabled,omitempty"` +} + +func (m *HttpLoadBalancing) Reset() { *m = HttpLoadBalancing{} } +func (m *HttpLoadBalancing) String() string { return proto.CompactTextString(m) } +func (*HttpLoadBalancing) ProtoMessage() {} +func (*HttpLoadBalancing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *HttpLoadBalancing) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration options for the horizontal pod autoscaling feature, which +// increases or decreases the number of replica pods a replication controller +// has based on the resource usage of the existing pods. +type HorizontalPodAutoscaling struct { + // Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + // When enabled, it ensures that a Heapster pod is running in the cluster, + // which is also used by the Cloud Monitoring service. + Disabled bool `protobuf:"varint,1,opt,name=disabled" json:"disabled,omitempty"` +} + +func (m *HorizontalPodAutoscaling) Reset() { *m = HorizontalPodAutoscaling{} } +func (m *HorizontalPodAutoscaling) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscaling) ProtoMessage() {} +func (*HorizontalPodAutoscaling) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *HorizontalPodAutoscaling) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration for the Kubernetes Dashboard. +type KubernetesDashboard struct { + // Whether the Kubernetes Dashboard is enabled for this cluster. + Disabled bool `protobuf:"varint,1,opt,name=disabled" json:"disabled,omitempty"` +} + +func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} } +func (m *KubernetesDashboard) String() string { return proto.CompactTextString(m) } +func (*KubernetesDashboard) ProtoMessage() {} +func (*KubernetesDashboard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *KubernetesDashboard) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Master authorized networks is a Beta feature. +// Configuration options for the master authorized networks feature. Enabled +// master authorized networks will disallow all external traffic to access +// Kubernetes master through HTTPS except traffic from the given CIDR blocks, +// Google Compute Engine Public IPs and Google Prod IPs. +type MasterAuthorizedNetworksConfig struct { + // Whether or not master authorized networks is enabled. + Enabled bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"` + // cidr_blocks define up to 10 external networks that could access + // Kubernetes master through HTTPS. + CidrBlocks []*MasterAuthorizedNetworksConfig_CidrBlock `protobuf:"bytes,2,rep,name=cidr_blocks,json=cidrBlocks" json:"cidr_blocks,omitempty"` +} + +func (m *MasterAuthorizedNetworksConfig) Reset() { *m = MasterAuthorizedNetworksConfig{} } +func (m *MasterAuthorizedNetworksConfig) String() string { return proto.CompactTextString(m) } +func (*MasterAuthorizedNetworksConfig) ProtoMessage() {} +func (*MasterAuthorizedNetworksConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MasterAuthorizedNetworksConfig) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *MasterAuthorizedNetworksConfig) GetCidrBlocks() []*MasterAuthorizedNetworksConfig_CidrBlock { + if m != nil { + return m.CidrBlocks + } + return nil +} + +// CidrBlock contains an optional name and one CIDR block. +type MasterAuthorizedNetworksConfig_CidrBlock struct { + // display_name is an optional field for users to identify CIDR blocks. + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // cidr_block must be specified in CIDR notation. + CidrBlock string `protobuf:"bytes,2,opt,name=cidr_block,json=cidrBlock" json:"cidr_block,omitempty"` +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) Reset() { + *m = MasterAuthorizedNetworksConfig_CidrBlock{} +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) String() string { return proto.CompactTextString(m) } +func (*MasterAuthorizedNetworksConfig_CidrBlock) ProtoMessage() {} +func (*MasterAuthorizedNetworksConfig_CidrBlock) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 0} +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetCidrBlock() string { + if m != nil { + return m.CidrBlock + } + return "" +} + +// Configuration for the legacy Attribute Based Access Control authorization +// mode. +type LegacyAbac struct { + // Whether the ABAC authorizer is enabled for this cluster. When enabled, + // identities in the system, including service accounts, nodes, and + // controllers, will have statically granted permissions beyond those + // provided by the RBAC configuration or IAM. + Enabled bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"` +} + +func (m *LegacyAbac) Reset() { *m = LegacyAbac{} } +func (m *LegacyAbac) String() string { return proto.CompactTextString(m) } +func (*LegacyAbac) ProtoMessage() {} +func (*LegacyAbac) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *LegacyAbac) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Configuration options for the NetworkPolicy feature. +// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ +type NetworkPolicy struct { + // The selected network policy provider. + Provider NetworkPolicy_Provider `protobuf:"varint,1,opt,name=provider,enum=google.container.v1.NetworkPolicy_Provider" json:"provider,omitempty"` + // Whether network policy is enabled on the cluster. + Enabled bool `protobuf:"varint,2,opt,name=enabled" json:"enabled,omitempty"` +} + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *NetworkPolicy) GetProvider() NetworkPolicy_Provider { + if m != nil { + return m.Provider + } + return NetworkPolicy_PROVIDER_UNSPECIFIED +} + +func (m *NetworkPolicy) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Configuration for controlling how IPs are allocated in the cluster. +type IPAllocationPolicy struct { + // Whether alias IPs will be used for pod IPs in the cluster. + UseIpAliases bool `protobuf:"varint,1,opt,name=use_ip_aliases,json=useIpAliases" json:"use_ip_aliases,omitempty"` + // Whether a new subnetwork will be created automatically for the cluster. + // + // This field is only applicable when `use_ip_aliases` is true. + CreateSubnetwork bool `protobuf:"varint,2,opt,name=create_subnetwork,json=createSubnetwork" json:"create_subnetwork,omitempty"` + // A custom subnetwork name to be used if `create_subnetwork` is true. If + // this field is empty, then an automatic name will be chosen for the new + // subnetwork. + SubnetworkName string `protobuf:"bytes,3,opt,name=subnetwork_name,json=subnetworkName" json:"subnetwork_name,omitempty"` + // The IP address range for the cluster pod IPs. If this field is set, then + // `cluster.cluster_ipv4_cidr` must be left blank. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // Set to blank to have a range will be chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range be chosen with a specific + // netmask. + // + // Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + ClusterIpv4Cidr string `protobuf:"bytes,4,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr" json:"cluster_ipv4_cidr,omitempty"` + // The IP address range of the instance IPs in this cluster. + // + // This is applicable only if `create_subnetwork` is true. + // + // Set to blank to have a range will be chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range be chosen with a specific + // netmask. + // + // Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + NodeIpv4Cidr string `protobuf:"bytes,5,opt,name=node_ipv4_cidr,json=nodeIpv4Cidr" json:"node_ipv4_cidr,omitempty"` + // The IP address range of the services IPs in this cluster. If blank, a range + // will be automatically chosen with the default size. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // Set to blank to have a range will be chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range be chosen with a specific + // netmask. + // + // Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + ServicesIpv4Cidr string `protobuf:"bytes,6,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr" json:"services_ipv4_cidr,omitempty"` +} + +func (m *IPAllocationPolicy) Reset() { *m = IPAllocationPolicy{} } +func (m *IPAllocationPolicy) String() string { return proto.CompactTextString(m) } +func (*IPAllocationPolicy) ProtoMessage() {} +func (*IPAllocationPolicy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *IPAllocationPolicy) GetUseIpAliases() bool { + if m != nil { + return m.UseIpAliases + } + return false +} + +func (m *IPAllocationPolicy) GetCreateSubnetwork() bool { + if m != nil { + return m.CreateSubnetwork + } + return false +} + +func (m *IPAllocationPolicy) GetSubnetworkName() string { + if m != nil { + return m.SubnetworkName + } + return "" +} + +func (m *IPAllocationPolicy) GetClusterIpv4Cidr() string { + if m != nil { + return m.ClusterIpv4Cidr + } + return "" +} + +func (m *IPAllocationPolicy) GetNodeIpv4Cidr() string { + if m != nil { + return m.NodeIpv4Cidr + } + return "" +} + +func (m *IPAllocationPolicy) GetServicesIpv4Cidr() string { + if m != nil { + return m.ServicesIpv4Cidr + } + return "" +} + +// A Google Container Engine cluster. +type Cluster struct { + // The name of this cluster. The name must be unique within this project + // and zone, and can be up to 40 characters with the following restrictions: + // + // * Lowercase letters, numbers, and hyphens only. + // * Must start with a letter. + // * Must end with a number or a letter. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // An optional description of this cluster. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // The number of nodes to create in this cluster. You must ensure that your + // Compute Engine resource quota + // is sufficient for this number of instances. You must also have available + // firewall and routes quota. + // For requests, this field should only be used in lieu of a + // "node_pool" object, since this configuration (along with the + // "node_config") will be used to create a "NodePool" object with an + // auto-generated name. Do not use this and a node_pool at the same time. + InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount" json:"initial_node_count,omitempty"` + // Parameters used in creating the cluster's nodes. + // See `nodeConfig` for the description of its properties. + // For requests, this field should only be used in lieu of a + // "node_pool" object, since this configuration (along with the + // "initial_node_count") will be used to create a "NodePool" object with an + // auto-generated name. Do not use this and a node_pool at the same time. + // For responses, this field will be populated with the node configuration of + // the first node pool. + // + // If unspecified, the defaults are used. + NodeConfig *NodeConfig `protobuf:"bytes,4,opt,name=node_config,json=nodeConfig" json:"node_config,omitempty"` + // The authentication information for accessing the master endpoint. + MasterAuth *MasterAuth `protobuf:"bytes,5,opt,name=master_auth,json=masterAuth" json:"master_auth,omitempty"` + // The logging service the cluster should use to write logs. + // Currently available options: + // + // * `logging.googleapis.com` - the Google Cloud Logging service. + // * `none` - no logs will be exported from the cluster. + // * if left as an empty string,`logging.googleapis.com` will be used. + LoggingService string `protobuf:"bytes,6,opt,name=logging_service,json=loggingService" json:"logging_service,omitempty"` + // The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * `monitoring.googleapis.com` - the Google Cloud Monitoring service. + // * `none` - no metrics will be exported from the cluster. + // * if left as an empty string, `monitoring.googleapis.com` will be used. + MonitoringService string `protobuf:"bytes,7,opt,name=monitoring_service,json=monitoringService" json:"monitoring_service,omitempty"` + // The name of the Google Compute Engine + // [network](/compute/docs/networks-and-firewalls#networks) to which the + // cluster is connected. If left unspecified, the `default` network + // will be used. + Network string `protobuf:"bytes,8,opt,name=network" json:"network,omitempty"` + // The IP address range of the container pods in this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`). Leave blank to have + // one automatically chosen or specify a `/14` block in `10.0.0.0/8`. + ClusterIpv4Cidr string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr" json:"cluster_ipv4_cidr,omitempty"` + // Configurations for the various addons available to run in the cluster. + AddonsConfig *AddonsConfig `protobuf:"bytes,10,opt,name=addons_config,json=addonsConfig" json:"addons_config,omitempty"` + // The name of the Google Compute Engine + // [subnetwork](/compute/docs/subnetworks) to which the + // cluster is connected. + Subnetwork string `protobuf:"bytes,11,opt,name=subnetwork" json:"subnetwork,omitempty"` + // The node pools associated with this cluster. + // This field should not be set if "node_config" or "initial_node_count" are + // specified. + NodePools []*NodePool `protobuf:"bytes,12,rep,name=node_pools,json=nodePools" json:"node_pools,omitempty"` + // The list of Google Compute Engine + // [locations](/compute/docs/zones#available) in which the cluster's nodes + // should be located. + Locations []string `protobuf:"bytes,13,rep,name=locations" json:"locations,omitempty"` + // Kubernetes alpha features are enabled on this cluster. This includes alpha + // API groups (e.g. v1alpha1) and features that may not be production ready in + // the kubernetes version of the master and nodes. + // The cluster has no SLA for uptime and master/node upgrades are disabled. + // Alpha enabled clusters are automatically deleted thirty days after + // creation. + EnableKubernetesAlpha bool `protobuf:"varint,14,opt,name=enable_kubernetes_alpha,json=enableKubernetesAlpha" json:"enable_kubernetes_alpha,omitempty"` + // The resource labels for the cluster to use to annotate any related + // Google Compute Engine resources. + ResourceLabels map[string]string `protobuf:"bytes,15,rep,name=resource_labels,json=resourceLabels" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The fingerprint of the set of labels for this cluster. + LabelFingerprint string `protobuf:"bytes,16,opt,name=label_fingerprint,json=labelFingerprint" json:"label_fingerprint,omitempty"` + // Configuration for the legacy ABAC authorization mode. + LegacyAbac *LegacyAbac `protobuf:"bytes,18,opt,name=legacy_abac,json=legacyAbac" json:"legacy_abac,omitempty"` + // Configuration options for the NetworkPolicy feature. + NetworkPolicy *NetworkPolicy `protobuf:"bytes,19,opt,name=network_policy,json=networkPolicy" json:"network_policy,omitempty"` + // Configuration for cluster IP allocation. + IpAllocationPolicy *IPAllocationPolicy `protobuf:"bytes,20,opt,name=ip_allocation_policy,json=ipAllocationPolicy" json:"ip_allocation_policy,omitempty"` + // Master authorized networks is a Beta feature. + // The configuration options for master authorized networks feature. + MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,22,opt,name=master_authorized_networks_config,json=masterAuthorizedNetworksConfig" json:"master_authorized_networks_config,omitempty"` + // [Output only] Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink" json:"self_link,omitempty"` + // [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,101,opt,name=zone" json:"zone,omitempty"` + // [Output only] The IP address of this cluster's master endpoint. + // The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. + // + // See the `masterAuth` property of this resource for username and + // password information. + Endpoint string `protobuf:"bytes,102,opt,name=endpoint" json:"endpoint,omitempty"` + // The initial Kubernetes version for this cluster. Valid versions are those + // found in validMasterVersions returned by getServerConfig. The version can + // be upgraded over time; such upgrades are reflected in + // currentMasterVersion and currentNodeVersion. + InitialClusterVersion string `protobuf:"bytes,103,opt,name=initial_cluster_version,json=initialClusterVersion" json:"initial_cluster_version,omitempty"` + // [Output only] The current software version of the master endpoint. + CurrentMasterVersion string `protobuf:"bytes,104,opt,name=current_master_version,json=currentMasterVersion" json:"current_master_version,omitempty"` + // [Output only] The current version of the node software components. + // If they are currently at multiple versions because they're in the process + // of being upgraded, this reflects the minimum version of all nodes. + CurrentNodeVersion string `protobuf:"bytes,105,opt,name=current_node_version,json=currentNodeVersion" json:"current_node_version,omitempty"` + // [Output only] The time the cluster was created, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreateTime string `protobuf:"bytes,106,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // [Output only] The current status of this cluster. + Status Cluster_Status `protobuf:"varint,107,opt,name=status,enum=google.container.v1.Cluster_Status" json:"status,omitempty"` + // [Output only] Additional information about the current status of this + // cluster, if available. + StatusMessage string `protobuf:"bytes,108,opt,name=status_message,json=statusMessage" json:"status_message,omitempty"` + // [Output only] The size of the address space on each node for hosting + // containers. This is provisioned from within the `container_ipv4_cidr` + // range. + NodeIpv4CidrSize int32 `protobuf:"varint,109,opt,name=node_ipv4_cidr_size,json=nodeIpv4CidrSize" json:"node_ipv4_cidr_size,omitempty"` + // [Output only] The IP address range of the Kubernetes services in + // this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `1.2.3.4/29`). Service addresses are + // typically put in the last `/16` from the container CIDR. + ServicesIpv4Cidr string `protobuf:"bytes,110,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr" json:"services_ipv4_cidr,omitempty"` + // [Output only] The resource URLs of [instance + // groups](/compute/docs/instance-groups/) associated with this + // cluster. + InstanceGroupUrls []string `protobuf:"bytes,111,rep,name=instance_group_urls,json=instanceGroupUrls" json:"instance_group_urls,omitempty"` + // [Output only] The number of nodes currently in the cluster. + CurrentNodeCount int32 `protobuf:"varint,112,opt,name=current_node_count,json=currentNodeCount" json:"current_node_count,omitempty"` + // [Output only] The time the cluster will be automatically + // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ExpireTime string `protobuf:"bytes,113,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Cluster) GetInitialNodeCount() int32 { + if m != nil { + return m.InitialNodeCount + } + return 0 +} + +func (m *Cluster) GetNodeConfig() *NodeConfig { + if m != nil { + return m.NodeConfig + } + return nil +} + +func (m *Cluster) GetMasterAuth() *MasterAuth { + if m != nil { + return m.MasterAuth + } + return nil +} + +func (m *Cluster) GetLoggingService() string { + if m != nil { + return m.LoggingService + } + return "" +} + +func (m *Cluster) GetMonitoringService() string { + if m != nil { + return m.MonitoringService + } + return "" +} + +func (m *Cluster) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *Cluster) GetClusterIpv4Cidr() string { + if m != nil { + return m.ClusterIpv4Cidr + } + return "" +} + +func (m *Cluster) GetAddonsConfig() *AddonsConfig { + if m != nil { + return m.AddonsConfig + } + return nil +} + +func (m *Cluster) GetSubnetwork() string { + if m != nil { + return m.Subnetwork + } + return "" +} + +func (m *Cluster) GetNodePools() []*NodePool { + if m != nil { + return m.NodePools + } + return nil +} + +func (m *Cluster) GetLocations() []string { + if m != nil { + return m.Locations + } + return nil +} + +func (m *Cluster) GetEnableKubernetesAlpha() bool { + if m != nil { + return m.EnableKubernetesAlpha + } + return false +} + +func (m *Cluster) GetResourceLabels() map[string]string { + if m != nil { + return m.ResourceLabels + } + return nil +} + +func (m *Cluster) GetLabelFingerprint() string { + if m != nil { + return m.LabelFingerprint + } + return "" +} + +func (m *Cluster) GetLegacyAbac() *LegacyAbac { + if m != nil { + return m.LegacyAbac + } + return nil +} + +func (m *Cluster) GetNetworkPolicy() *NetworkPolicy { + if m != nil { + return m.NetworkPolicy + } + return nil +} + +func (m *Cluster) GetIpAllocationPolicy() *IPAllocationPolicy { + if m != nil { + return m.IpAllocationPolicy + } + return nil +} + +func (m *Cluster) GetMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { + if m != nil { + return m.MasterAuthorizedNetworksConfig + } + return nil +} + +func (m *Cluster) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +func (m *Cluster) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *Cluster) GetEndpoint() string { + if m != nil { + return m.Endpoint + } + return "" +} + +func (m *Cluster) GetInitialClusterVersion() string { + if m != nil { + return m.InitialClusterVersion + } + return "" +} + +func (m *Cluster) GetCurrentMasterVersion() string { + if m != nil { + return m.CurrentMasterVersion + } + return "" +} + +func (m *Cluster) GetCurrentNodeVersion() string { + if m != nil { + return m.CurrentNodeVersion + } + return "" +} + +func (m *Cluster) GetCreateTime() string { + if m != nil { + return m.CreateTime + } + return "" +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNSPECIFIED +} + +func (m *Cluster) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Cluster) GetNodeIpv4CidrSize() int32 { + if m != nil { + return m.NodeIpv4CidrSize + } + return 0 +} + +func (m *Cluster) GetServicesIpv4Cidr() string { + if m != nil { + return m.ServicesIpv4Cidr + } + return "" +} + +func (m *Cluster) GetInstanceGroupUrls() []string { + if m != nil { + return m.InstanceGroupUrls + } + return nil +} + +func (m *Cluster) GetCurrentNodeCount() int32 { + if m != nil { + return m.CurrentNodeCount + } + return 0 +} + +func (m *Cluster) GetExpireTime() string { + if m != nil { + return m.ExpireTime + } + return "" +} + +// ClusterUpdate describes an update to the cluster. Exactly one update can +// be applied to a cluster with each request, so at most one field can be +// provided. +type ClusterUpdate struct { + // The Kubernetes version to change the nodes to (typically an + // upgrade). Use `-` to upgrade to the latest version supported by + // the server. + DesiredNodeVersion string `protobuf:"bytes,4,opt,name=desired_node_version,json=desiredNodeVersion" json:"desired_node_version,omitempty"` + // The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * "monitoring.googleapis.com" - the Google Cloud Monitoring service + // * "none" - no metrics will be exported from the cluster + DesiredMonitoringService string `protobuf:"bytes,5,opt,name=desired_monitoring_service,json=desiredMonitoringService" json:"desired_monitoring_service,omitempty"` + // Configurations for the various addons available to run in the cluster. + DesiredAddonsConfig *AddonsConfig `protobuf:"bytes,6,opt,name=desired_addons_config,json=desiredAddonsConfig" json:"desired_addons_config,omitempty"` + // The node pool to be upgraded. This field is mandatory if + // "desired_node_version", "desired_image_family" or + // "desired_node_pool_autoscaling" is specified and there is more than one + // node pool on the cluster. + DesiredNodePoolId string `protobuf:"bytes,7,opt,name=desired_node_pool_id,json=desiredNodePoolId" json:"desired_node_pool_id,omitempty"` + // The desired image type for the node pool. + // NOTE: Set the "desired_node_pool" field as well. + DesiredImageType string `protobuf:"bytes,8,opt,name=desired_image_type,json=desiredImageType" json:"desired_image_type,omitempty"` + // Autoscaler configuration for the node pool specified in + // desired_node_pool_id. If there is only one pool in the + // cluster and desired_node_pool_id is not provided then + // the change applies to that single node pool. + DesiredNodePoolAutoscaling *NodePoolAutoscaling `protobuf:"bytes,9,opt,name=desired_node_pool_autoscaling,json=desiredNodePoolAutoscaling" json:"desired_node_pool_autoscaling,omitempty"` + // The desired list of Google Compute Engine + // [locations](/compute/docs/zones#available) in which the cluster's nodes + // should be located. Changing the locations a cluster is in will result + // in nodes being either created or removed from the cluster, depending on + // whether locations are being added or removed. + // + // This list must always include the cluster's primary zone. + DesiredLocations []string `protobuf:"bytes,10,rep,name=desired_locations,json=desiredLocations" json:"desired_locations,omitempty"` + // Master authorized networks is a Beta feature. + // The desired configuration options for master authorized networks feature. + DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,12,opt,name=desired_master_authorized_networks_config,json=desiredMasterAuthorizedNetworksConfig" json:"desired_master_authorized_networks_config,omitempty"` + // The Kubernetes version to change the master to. The only valid value is the + // latest supported version. Use "-" to have the server automatically select + // the latest version. + DesiredMasterVersion string `protobuf:"bytes,100,opt,name=desired_master_version,json=desiredMasterVersion" json:"desired_master_version,omitempty"` +} + +func (m *ClusterUpdate) Reset() { *m = ClusterUpdate{} } +func (m *ClusterUpdate) String() string { return proto.CompactTextString(m) } +func (*ClusterUpdate) ProtoMessage() {} +func (*ClusterUpdate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ClusterUpdate) GetDesiredNodeVersion() string { + if m != nil { + return m.DesiredNodeVersion + } + return "" +} + +func (m *ClusterUpdate) GetDesiredMonitoringService() string { + if m != nil { + return m.DesiredMonitoringService + } + return "" +} + +func (m *ClusterUpdate) GetDesiredAddonsConfig() *AddonsConfig { + if m != nil { + return m.DesiredAddonsConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredNodePoolId() string { + if m != nil { + return m.DesiredNodePoolId + } + return "" +} + +func (m *ClusterUpdate) GetDesiredImageType() string { + if m != nil { + return m.DesiredImageType + } + return "" +} + +func (m *ClusterUpdate) GetDesiredNodePoolAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.DesiredNodePoolAutoscaling + } + return nil +} + +func (m *ClusterUpdate) GetDesiredLocations() []string { + if m != nil { + return m.DesiredLocations + } + return nil +} + +func (m *ClusterUpdate) GetDesiredMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { + if m != nil { + return m.DesiredMasterAuthorizedNetworksConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredMasterVersion() string { + if m != nil { + return m.DesiredMasterVersion + } + return "" +} + +// This operation resource represents operations that may have happened or are +// happening on the cluster. All fields are output only. +type Operation struct { + // The server-assigned ID for the operation. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation + // is taking place. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The operation type. + OperationType Operation_Type `protobuf:"varint,3,opt,name=operation_type,json=operationType,enum=google.container.v1.Operation_Type" json:"operation_type,omitempty"` + // The current status of the operation. + Status Operation_Status `protobuf:"varint,4,opt,name=status,enum=google.container.v1.Operation_Status" json:"status,omitempty"` + // Detailed operation progress, if available. + Detail string `protobuf:"bytes,8,opt,name=detail" json:"detail,omitempty"` + // If an error has occurred, a textual description of the error. + StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage" json:"status_message,omitempty"` + // Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,6,opt,name=self_link,json=selfLink" json:"self_link,omitempty"` + // Server-defined URL for the target of the operation. + TargetLink string `protobuf:"bytes,7,opt,name=target_link,json=targetLink" json:"target_link,omitempty"` + // [Output only] The time the operation started, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + StartTime string `protobuf:"bytes,10,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // [Output only] The time the operation completed, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + EndTime string `protobuf:"bytes,11,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Operation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Operation) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *Operation) GetOperationType() Operation_Type { + if m != nil { + return m.OperationType + } + return Operation_TYPE_UNSPECIFIED +} + +func (m *Operation) GetStatus() Operation_Status { + if m != nil { + return m.Status + } + return Operation_STATUS_UNSPECIFIED +} + +func (m *Operation) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Operation) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Operation) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +func (m *Operation) GetTargetLink() string { + if m != nil { + return m.TargetLink + } + return "" +} + +func (m *Operation) GetStartTime() string { + if m != nil { + return m.StartTime + } + return "" +} + +func (m *Operation) GetEndTime() string { + if m != nil { + return m.EndTime + } + return "" +} + +// CreateClusterRequest creates a cluster. +type CreateClusterRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // A [cluster + // resource](/container-engine/reference/rest/v1/projects.zones.clusters) + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *CreateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// GetClusterRequest gets the settings of a cluster. +type GetClusterRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to retrieve. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *GetClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// UpdateClusterRequest updates the settings of a cluster. +type UpdateClusterRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // A description of the update. + Update *ClusterUpdate `protobuf:"bytes,4,opt,name=update" json:"update,omitempty"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *UpdateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdate() *ClusterUpdate { + if m != nil { + return m.Update + } + return nil +} + +// UpdateNodePoolRequests update a node pool's image and/or version. +type UpdateNodePoolRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to upgrade. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` + // The Kubernetes version to change the nodes to (typically an + // upgrade). Use `-` to upgrade to the latest version supported by + // the server. + NodeVersion string `protobuf:"bytes,5,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"` + // The desired image type for the node pool. + ImageType string `protobuf:"bytes,6,opt,name=image_type,json=imageType" json:"image_type,omitempty"` +} + +func (m *UpdateNodePoolRequest) Reset() { *m = UpdateNodePoolRequest{} } +func (m *UpdateNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNodePoolRequest) ProtoMessage() {} +func (*UpdateNodePoolRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *UpdateNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *UpdateNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *UpdateNodePoolRequest) GetNodeVersion() string { + if m != nil { + return m.NodeVersion + } + return "" +} + +func (m *UpdateNodePoolRequest) GetImageType() string { + if m != nil { + return m.ImageType + } + return "" +} + +// SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool. +type SetNodePoolAutoscalingRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to upgrade. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` + // Autoscaling configuration for the node pool. + Autoscaling *NodePoolAutoscaling `protobuf:"bytes,5,opt,name=autoscaling" json:"autoscaling,omitempty"` +} + +func (m *SetNodePoolAutoscalingRequest) Reset() { *m = SetNodePoolAutoscalingRequest{} } +func (m *SetNodePoolAutoscalingRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolAutoscalingRequest) ProtoMessage() {} +func (*SetNodePoolAutoscalingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *SetNodePoolAutoscalingRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetNodePoolAutoscalingRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetNodePoolAutoscalingRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetNodePoolAutoscalingRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolAutoscalingRequest) GetAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.Autoscaling + } + return nil +} + +// SetLoggingServiceRequest sets the logging service of a cluster. +type SetLoggingServiceRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The logging service the cluster should use to write metrics. + // Currently available options: + // + // * "logging.googleapis.com" - the Google Cloud Logging service + // * "none" - no metrics will be exported from the cluster + LoggingService string `protobuf:"bytes,4,opt,name=logging_service,json=loggingService" json:"logging_service,omitempty"` +} + +func (m *SetLoggingServiceRequest) Reset() { *m = SetLoggingServiceRequest{} } +func (m *SetLoggingServiceRequest) String() string { return proto.CompactTextString(m) } +func (*SetLoggingServiceRequest) ProtoMessage() {} +func (*SetLoggingServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *SetLoggingServiceRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetLoggingServiceRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetLoggingServiceRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLoggingServiceRequest) GetLoggingService() string { + if m != nil { + return m.LoggingService + } + return "" +} + +// SetMonitoringServiceRequest sets the monitoring service of a cluster. +type SetMonitoringServiceRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * "monitoring.googleapis.com" - the Google Cloud Monitoring service + // * "none" - no metrics will be exported from the cluster + MonitoringService string `protobuf:"bytes,4,opt,name=monitoring_service,json=monitoringService" json:"monitoring_service,omitempty"` +} + +func (m *SetMonitoringServiceRequest) Reset() { *m = SetMonitoringServiceRequest{} } +func (m *SetMonitoringServiceRequest) String() string { return proto.CompactTextString(m) } +func (*SetMonitoringServiceRequest) ProtoMessage() {} +func (*SetMonitoringServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *SetMonitoringServiceRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetMonitoringServiceRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetMonitoringServiceRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetMonitoringServiceRequest) GetMonitoringService() string { + if m != nil { + return m.MonitoringService + } + return "" +} + +// SetAddonsConfigRequest sets the addons associated with the cluster. +type SetAddonsConfigRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The desired configurations for the various addons available to run in the + // cluster. + AddonsConfig *AddonsConfig `protobuf:"bytes,4,opt,name=addons_config,json=addonsConfig" json:"addons_config,omitempty"` +} + +func (m *SetAddonsConfigRequest) Reset() { *m = SetAddonsConfigRequest{} } +func (m *SetAddonsConfigRequest) String() string { return proto.CompactTextString(m) } +func (*SetAddonsConfigRequest) ProtoMessage() {} +func (*SetAddonsConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *SetAddonsConfigRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetAddonsConfigRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetAddonsConfigRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetAddonsConfigRequest) GetAddonsConfig() *AddonsConfig { + if m != nil { + return m.AddonsConfig + } + return nil +} + +// SetLocationsRequest sets the locations of the cluster. +type SetLocationsRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The desired list of Google Compute Engine + // [locations](/compute/docs/zones#available) in which the cluster's nodes + // should be located. Changing the locations a cluster is in will result + // in nodes being either created or removed from the cluster, depending on + // whether locations are being added or removed. + // + // This list must always include the cluster's primary zone. + Locations []string `protobuf:"bytes,4,rep,name=locations" json:"locations,omitempty"` +} + +func (m *SetLocationsRequest) Reset() { *m = SetLocationsRequest{} } +func (m *SetLocationsRequest) String() string { return proto.CompactTextString(m) } +func (*SetLocationsRequest) ProtoMessage() {} +func (*SetLocationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *SetLocationsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetLocationsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetLocationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLocationsRequest) GetLocations() []string { + if m != nil { + return m.Locations + } + return nil +} + +// UpdateMasterRequest updates the master of the cluster. +type UpdateMasterRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The Kubernetes version to change the master to. The only valid value is the + // latest supported version. Use "-" to have the server automatically select + // the latest version. + MasterVersion string `protobuf:"bytes,4,opt,name=master_version,json=masterVersion" json:"master_version,omitempty"` +} + +func (m *UpdateMasterRequest) Reset() { *m = UpdateMasterRequest{} } +func (m *UpdateMasterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateMasterRequest) ProtoMessage() {} +func (*UpdateMasterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *UpdateMasterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateMasterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *UpdateMasterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateMasterRequest) GetMasterVersion() string { + if m != nil { + return m.MasterVersion + } + return "" +} + +// SetMasterAuthRequest updates the admin password of a cluster. +type SetMasterAuthRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to upgrade. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The exact form of action to be taken on the master auth + Action SetMasterAuthRequest_Action `protobuf:"varint,4,opt,name=action,enum=google.container.v1.SetMasterAuthRequest_Action" json:"action,omitempty"` + // A description of the update. + Update *MasterAuth `protobuf:"bytes,5,opt,name=update" json:"update,omitempty"` +} + +func (m *SetMasterAuthRequest) Reset() { *m = SetMasterAuthRequest{} } +func (m *SetMasterAuthRequest) String() string { return proto.CompactTextString(m) } +func (*SetMasterAuthRequest) ProtoMessage() {} +func (*SetMasterAuthRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *SetMasterAuthRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetMasterAuthRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetMasterAuthRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetMasterAuthRequest) GetAction() SetMasterAuthRequest_Action { + if m != nil { + return m.Action + } + return SetMasterAuthRequest_UNKNOWN +} + +func (m *SetMasterAuthRequest) GetUpdate() *MasterAuth { + if m != nil { + return m.Update + } + return nil +} + +// DeleteClusterRequest deletes a cluster. +type DeleteClusterRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to delete. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *DeleteClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// ListClustersRequest lists clusters. +type ListClustersRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides, or "-" for all zones. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *ListClustersRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListClustersRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// ListClustersResponse is the result of ListClustersRequest. +type ListClustersResponse struct { + // A list of clusters in the project in the specified zone, or + // across all ones. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // If any zones are listed here, the list of clusters returned + // may be missing those zones. + MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones" json:"missing_zones,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetMissingZones() []string { + if m != nil { + return m.MissingZones + } + return nil +} + +// GetOperationRequest gets a single operation. +type GetOperationRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The server-assigned `name` of the operation. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` +} + +func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} } +func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) } +func (*GetOperationRequest) ProtoMessage() {} +func (*GetOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *GetOperationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetOperationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *GetOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +// ListOperationsRequest lists operations. +type ListOperationsRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine [zone](/compute/docs/zones#available) + // to return operations for, or `-` for all zones. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` +} + +func (m *ListOperationsRequest) Reset() { *m = ListOperationsRequest{} } +func (m *ListOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListOperationsRequest) ProtoMessage() {} +func (*ListOperationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *ListOperationsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListOperationsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// CancelOperationRequest cancels a single operation. +type CancelOperationRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The server-assigned `name` of the operation. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` +} + +func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} } +func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) } +func (*CancelOperationRequest) ProtoMessage() {} +func (*CancelOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *CancelOperationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CancelOperationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CancelOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +// ListOperationsResponse is the result of ListOperationsRequest. +type ListOperationsResponse struct { + // A list of operations in the project in the specified zone. + Operations []*Operation `protobuf:"bytes,1,rep,name=operations" json:"operations,omitempty"` + // If any zones are listed here, the list of operations returned + // may be missing the operations from those zones. + MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones" json:"missing_zones,omitempty"` +} + +func (m *ListOperationsResponse) Reset() { *m = ListOperationsResponse{} } +func (m *ListOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListOperationsResponse) ProtoMessage() {} +func (*ListOperationsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *ListOperationsResponse) GetOperations() []*Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListOperationsResponse) GetMissingZones() []string { + if m != nil { + return m.MissingZones + } + return nil +} + +// Gets the current Container Engine service configuration. +type GetServerConfigRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine [zone](/compute/docs/zones#available) + // to return operations for. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` +} + +func (m *GetServerConfigRequest) Reset() { *m = GetServerConfigRequest{} } +func (m *GetServerConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerConfigRequest) ProtoMessage() {} +func (*GetServerConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *GetServerConfigRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetServerConfigRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Container Engine service configuration. +type ServerConfig struct { + // Version of Kubernetes the service deploys by default. + DefaultClusterVersion string `protobuf:"bytes,1,opt,name=default_cluster_version,json=defaultClusterVersion" json:"default_cluster_version,omitempty"` + // List of valid node upgrade target versions. + ValidNodeVersions []string `protobuf:"bytes,3,rep,name=valid_node_versions,json=validNodeVersions" json:"valid_node_versions,omitempty"` + // Default image type. + DefaultImageType string `protobuf:"bytes,4,opt,name=default_image_type,json=defaultImageType" json:"default_image_type,omitempty"` + // List of valid image types. + ValidImageTypes []string `protobuf:"bytes,5,rep,name=valid_image_types,json=validImageTypes" json:"valid_image_types,omitempty"` + // List of valid master versions. + ValidMasterVersions []string `protobuf:"bytes,6,rep,name=valid_master_versions,json=validMasterVersions" json:"valid_master_versions,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} +func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *ServerConfig) GetDefaultClusterVersion() string { + if m != nil { + return m.DefaultClusterVersion + } + return "" +} + +func (m *ServerConfig) GetValidNodeVersions() []string { + if m != nil { + return m.ValidNodeVersions + } + return nil +} + +func (m *ServerConfig) GetDefaultImageType() string { + if m != nil { + return m.DefaultImageType + } + return "" +} + +func (m *ServerConfig) GetValidImageTypes() []string { + if m != nil { + return m.ValidImageTypes + } + return nil +} + +func (m *ServerConfig) GetValidMasterVersions() []string { + if m != nil { + return m.ValidMasterVersions + } + return nil +} + +// CreateNodePoolRequest creates a node pool for a cluster. +type CreateNodePoolRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The node pool to create. + NodePool *NodePool `protobuf:"bytes,4,opt,name=node_pool,json=nodePool" json:"node_pool,omitempty"` +} + +func (m *CreateNodePoolRequest) Reset() { *m = CreateNodePoolRequest{} } +func (m *CreateNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNodePoolRequest) ProtoMessage() {} +func (*CreateNodePoolRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *CreateNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CreateNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateNodePoolRequest) GetNodePool() *NodePool { + if m != nil { + return m.NodePool + } + return nil +} + +// DeleteNodePoolRequest deletes a node pool for a cluster. +type DeleteNodePoolRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to delete. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` +} + +func (m *DeleteNodePoolRequest) Reset() { *m = DeleteNodePoolRequest{} } +func (m *DeleteNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNodePoolRequest) ProtoMessage() {} +func (*DeleteNodePoolRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *DeleteNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *DeleteNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +// ListNodePoolsRequest lists the node pool(s) for a cluster. +type ListNodePoolsRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` +} + +func (m *ListNodePoolsRequest) Reset() { *m = ListNodePoolsRequest{} } +func (m *ListNodePoolsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNodePoolsRequest) ProtoMessage() {} +func (*ListNodePoolsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *ListNodePoolsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListNodePoolsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *ListNodePoolsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// GetNodePoolRequest retrieves a node pool for a cluster. +type GetNodePoolRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` +} + +func (m *GetNodePoolRequest) Reset() { *m = GetNodePoolRequest{} } +func (m *GetNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*GetNodePoolRequest) ProtoMessage() {} +func (*GetNodePoolRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +func (m *GetNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *GetNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +// NodePool contains the name and configuration for a cluster's node pool. +// Node pools are a set of nodes (i.e. VM's), with a common configuration and +// specification, under the control of the cluster master. They may have a set +// of Kubernetes labels applied to them, which may be used to reference them +// during pod scheduling. They may also be resized up or down, to accommodate +// the workload. +type NodePool struct { + // The name of the node pool. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The node configuration of the pool. + Config *NodeConfig `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` + // The initial node count for the pool. You must ensure that your + // Compute Engine resource quota + // is sufficient for this number of instances. You must also have available + // firewall and routes quota. + InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount" json:"initial_node_count,omitempty"` + // [Output only] Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink" json:"self_link,omitempty"` + // [Output only] The version of the Kubernetes of this node. + Version string `protobuf:"bytes,101,opt,name=version" json:"version,omitempty"` + // [Output only] The resource URLs of [instance + // groups](/compute/docs/instance-groups/) associated with this + // node pool. + InstanceGroupUrls []string `protobuf:"bytes,102,rep,name=instance_group_urls,json=instanceGroupUrls" json:"instance_group_urls,omitempty"` + // [Output only] The status of the nodes in this pool instance. + Status NodePool_Status `protobuf:"varint,103,opt,name=status,enum=google.container.v1.NodePool_Status" json:"status,omitempty"` + // [Output only] Additional information about the current status of this + // node pool instance, if available. + StatusMessage string `protobuf:"bytes,104,opt,name=status_message,json=statusMessage" json:"status_message,omitempty"` + // Autoscaler configuration for this NodePool. Autoscaler is enabled + // only if a valid configuration is present. + Autoscaling *NodePoolAutoscaling `protobuf:"bytes,4,opt,name=autoscaling" json:"autoscaling,omitempty"` + // NodeManagement configuration for this NodePool. + Management *NodeManagement `protobuf:"bytes,5,opt,name=management" json:"management,omitempty"` +} + +func (m *NodePool) Reset() { *m = NodePool{} } +func (m *NodePool) String() string { return proto.CompactTextString(m) } +func (*NodePool) ProtoMessage() {} +func (*NodePool) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *NodePool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NodePool) GetConfig() *NodeConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *NodePool) GetInitialNodeCount() int32 { + if m != nil { + return m.InitialNodeCount + } + return 0 +} + +func (m *NodePool) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +func (m *NodePool) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *NodePool) GetInstanceGroupUrls() []string { + if m != nil { + return m.InstanceGroupUrls + } + return nil +} + +func (m *NodePool) GetStatus() NodePool_Status { + if m != nil { + return m.Status + } + return NodePool_STATUS_UNSPECIFIED +} + +func (m *NodePool) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *NodePool) GetAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.Autoscaling + } + return nil +} + +func (m *NodePool) GetManagement() *NodeManagement { + if m != nil { + return m.Management + } + return nil +} + +// NodeManagement defines the set of node management services turned on for the +// node pool. +type NodeManagement struct { + // A flag that specifies whether node auto-upgrade is enabled for the node + // pool. If enabled, node auto-upgrade helps keep the nodes in your node pool + // up to date with the latest release version of Kubernetes. + AutoUpgrade bool `protobuf:"varint,1,opt,name=auto_upgrade,json=autoUpgrade" json:"auto_upgrade,omitempty"` + // A flag that specifies whether the node auto-repair is enabled for the node + // pool. If enabled, the nodes in this node pool will be monitored and, if + // they fail health checks too many times, an automatic repair action will be + // triggered. + AutoRepair bool `protobuf:"varint,2,opt,name=auto_repair,json=autoRepair" json:"auto_repair,omitempty"` + // Specifies the Auto Upgrade knobs for the node pool. + UpgradeOptions *AutoUpgradeOptions `protobuf:"bytes,10,opt,name=upgrade_options,json=upgradeOptions" json:"upgrade_options,omitempty"` +} + +func (m *NodeManagement) Reset() { *m = NodeManagement{} } +func (m *NodeManagement) String() string { return proto.CompactTextString(m) } +func (*NodeManagement) ProtoMessage() {} +func (*NodeManagement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +func (m *NodeManagement) GetAutoUpgrade() bool { + if m != nil { + return m.AutoUpgrade + } + return false +} + +func (m *NodeManagement) GetAutoRepair() bool { + if m != nil { + return m.AutoRepair + } + return false +} + +func (m *NodeManagement) GetUpgradeOptions() *AutoUpgradeOptions { + if m != nil { + return m.UpgradeOptions + } + return nil +} + +// AutoUpgradeOptions defines the set of options for the user to control how +// the Auto Upgrades will proceed. +type AutoUpgradeOptions struct { + // [Output only] This field is set when upgrades are about to commence + // with the approximate start time for the upgrades, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + AutoUpgradeStartTime string `protobuf:"bytes,1,opt,name=auto_upgrade_start_time,json=autoUpgradeStartTime" json:"auto_upgrade_start_time,omitempty"` + // [Output only] This field is set when upgrades are about to commence + // with the description of the upgrade. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *AutoUpgradeOptions) Reset() { *m = AutoUpgradeOptions{} } +func (m *AutoUpgradeOptions) String() string { return proto.CompactTextString(m) } +func (*AutoUpgradeOptions) ProtoMessage() {} +func (*AutoUpgradeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *AutoUpgradeOptions) GetAutoUpgradeStartTime() string { + if m != nil { + return m.AutoUpgradeStartTime + } + return "" +} + +func (m *AutoUpgradeOptions) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// SetNodePoolManagementRequest sets the node management properties of a node +// pool. +type SetNodePoolManagementRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to update. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to update. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` + // NodeManagement configuration for the node pool. + Management *NodeManagement `protobuf:"bytes,5,opt,name=management" json:"management,omitempty"` +} + +func (m *SetNodePoolManagementRequest) Reset() { *m = SetNodePoolManagementRequest{} } +func (m *SetNodePoolManagementRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolManagementRequest) ProtoMessage() {} +func (*SetNodePoolManagementRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *SetNodePoolManagementRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetNodePoolManagementRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetNodePoolManagementRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetNodePoolManagementRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolManagementRequest) GetManagement() *NodeManagement { + if m != nil { + return m.Management + } + return nil +} + +// SetNodePoolSizeRequest sets the size a node +// pool. +type SetNodePoolSizeRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to update. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to update. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` + // The desired node count for the pool. + NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount" json:"node_count,omitempty"` +} + +func (m *SetNodePoolSizeRequest) Reset() { *m = SetNodePoolSizeRequest{} } +func (m *SetNodePoolSizeRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolSizeRequest) ProtoMessage() {} +func (*SetNodePoolSizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *SetNodePoolSizeRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetNodePoolSizeRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetNodePoolSizeRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetNodePoolSizeRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolSizeRequest) GetNodeCount() int32 { + if m != nil { + return m.NodeCount + } + return 0 +} + +// RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed +// NodePool upgrade. This will be an no-op if the last upgrade successfully +// completed. +type RollbackNodePoolUpgradeRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to rollback. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The name of the node pool to rollback. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId" json:"node_pool_id,omitempty"` +} + +func (m *RollbackNodePoolUpgradeRequest) Reset() { *m = RollbackNodePoolUpgradeRequest{} } +func (m *RollbackNodePoolUpgradeRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackNodePoolUpgradeRequest) ProtoMessage() {} +func (*RollbackNodePoolUpgradeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *RollbackNodePoolUpgradeRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RollbackNodePoolUpgradeRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *RollbackNodePoolUpgradeRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *RollbackNodePoolUpgradeRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +// ListNodePoolsResponse is the result of ListNodePoolsRequest. +type ListNodePoolsResponse struct { + // A list of node pools for a cluster. + NodePools []*NodePool `protobuf:"bytes,1,rep,name=node_pools,json=nodePools" json:"node_pools,omitempty"` +} + +func (m *ListNodePoolsResponse) Reset() { *m = ListNodePoolsResponse{} } +func (m *ListNodePoolsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNodePoolsResponse) ProtoMessage() {} +func (*ListNodePoolsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *ListNodePoolsResponse) GetNodePools() []*NodePool { + if m != nil { + return m.NodePools + } + return nil +} + +// NodePoolAutoscaling contains information required by cluster autoscaler to +// adjust the size of the node pool to the current cluster usage. +type NodePoolAutoscaling struct { + // Is autoscaling enabled for this node pool. + Enabled bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"` + // Minimum number of nodes in the NodePool. Must be >= 1 and <= + // max_node_count. + MinNodeCount int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount" json:"min_node_count,omitempty"` + // Maximum number of nodes in the NodePool. Must be >= min_node_count. There + // has to enough quota to scale up the cluster. + MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount" json:"max_node_count,omitempty"` +} + +func (m *NodePoolAutoscaling) Reset() { *m = NodePoolAutoscaling{} } +func (m *NodePoolAutoscaling) String() string { return proto.CompactTextString(m) } +func (*NodePoolAutoscaling) ProtoMessage() {} +func (*NodePoolAutoscaling) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *NodePoolAutoscaling) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *NodePoolAutoscaling) GetMinNodeCount() int32 { + if m != nil { + return m.MinNodeCount + } + return 0 +} + +func (m *NodePoolAutoscaling) GetMaxNodeCount() int32 { + if m != nil { + return m.MaxNodeCount + } + return 0 +} + +// SetLabelsRequest sets the Google Cloud Platform labels on a Google Container +// Engine cluster, which will in turn set them for Google Compute Engine +// resources used by that cluster +type SetLabelsRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // The labels to set for that cluster. + ResourceLabels map[string]string `protobuf:"bytes,4,rep,name=resource_labels,json=resourceLabels" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The fingerprint of the previous set of labels for this resource, + // used to detect conflicts. The fingerprint is initially generated by + // Container Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash when + // updating or changing labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `protobuf:"bytes,5,opt,name=label_fingerprint,json=labelFingerprint" json:"label_fingerprint,omitempty"` +} + +func (m *SetLabelsRequest) Reset() { *m = SetLabelsRequest{} } +func (m *SetLabelsRequest) String() string { return proto.CompactTextString(m) } +func (*SetLabelsRequest) ProtoMessage() {} +func (*SetLabelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *SetLabelsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetLabelsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetLabelsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLabelsRequest) GetResourceLabels() map[string]string { + if m != nil { + return m.ResourceLabels + } + return nil +} + +func (m *SetLabelsRequest) GetLabelFingerprint() string { + if m != nil { + return m.LabelFingerprint + } + return "" +} + +// SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for +// a cluster. +type SetLegacyAbacRequest struct { + // The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster to update. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // Whether ABAC authorization will be enabled in the cluster. + Enabled bool `protobuf:"varint,4,opt,name=enabled" json:"enabled,omitempty"` +} + +func (m *SetLegacyAbacRequest) Reset() { *m = SetLegacyAbacRequest{} } +func (m *SetLegacyAbacRequest) String() string { return proto.CompactTextString(m) } +func (*SetLegacyAbacRequest) ProtoMessage() {} +func (*SetLegacyAbacRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *SetLegacyAbacRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetLegacyAbacRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetLegacyAbacRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLegacyAbacRequest) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// StartIPRotationRequest creates a new IP for the cluster and then performs +// a node upgrade on each node pool to point to the new IP. +type StartIPRotationRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` +} + +func (m *StartIPRotationRequest) Reset() { *m = StartIPRotationRequest{} } +func (m *StartIPRotationRequest) String() string { return proto.CompactTextString(m) } +func (*StartIPRotationRequest) ProtoMessage() {} +func (*StartIPRotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +func (m *StartIPRotationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *StartIPRotationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *StartIPRotationRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// CompleteIPRotationRequest moves the cluster master back into single-IP mode. +type CompleteIPRotationRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` +} + +func (m *CompleteIPRotationRequest) Reset() { *m = CompleteIPRotationRequest{} } +func (m *CompleteIPRotationRequest) String() string { return proto.CompactTextString(m) } +func (*CompleteIPRotationRequest) ProtoMessage() {} +func (*CompleteIPRotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *CompleteIPRotationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CompleteIPRotationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CompleteIPRotationRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// AcceleratorConfig represents a Hardware Accelerator request. +type AcceleratorConfig struct { + // The number of the accelerator cards exposed to an instance. + AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount" json:"accelerator_count,omitempty"` + // The accelerator type resource name. List of supported accelerators + // [here](/compute/docs/gpus/#Introduction) + AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType" json:"accelerator_type,omitempty"` +} + +func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} } +func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) } +func (*AcceleratorConfig) ProtoMessage() {} +func (*AcceleratorConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *AcceleratorConfig) GetAcceleratorCount() int64 { + if m != nil { + return m.AcceleratorCount + } + return 0 +} + +func (m *AcceleratorConfig) GetAcceleratorType() string { + if m != nil { + return m.AcceleratorType + } + return "" +} + +// SetNetworkPolicyRequest enables/disables network policy for a cluster. +type SetNetworkPolicyRequest struct { + // The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The name of the cluster. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"` + // Configuration options for the NetworkPolicy feature. + NetworkPolicy *NetworkPolicy `protobuf:"bytes,4,opt,name=network_policy,json=networkPolicy" json:"network_policy,omitempty"` +} + +func (m *SetNetworkPolicyRequest) Reset() { *m = SetNetworkPolicyRequest{} } +func (m *SetNetworkPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*SetNetworkPolicyRequest) ProtoMessage() {} +func (*SetNetworkPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +func (m *SetNetworkPolicyRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetNetworkPolicyRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetNetworkPolicyRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetNetworkPolicyRequest) GetNetworkPolicy() *NetworkPolicy { + if m != nil { + return m.NetworkPolicy + } + return nil +} + +func init() { + proto.RegisterType((*NodeConfig)(nil), "google.container.v1.NodeConfig") + proto.RegisterType((*MasterAuth)(nil), "google.container.v1.MasterAuth") + proto.RegisterType((*ClientCertificateConfig)(nil), "google.container.v1.ClientCertificateConfig") + proto.RegisterType((*AddonsConfig)(nil), "google.container.v1.AddonsConfig") + proto.RegisterType((*HttpLoadBalancing)(nil), "google.container.v1.HttpLoadBalancing") + proto.RegisterType((*HorizontalPodAutoscaling)(nil), "google.container.v1.HorizontalPodAutoscaling") + proto.RegisterType((*KubernetesDashboard)(nil), "google.container.v1.KubernetesDashboard") + proto.RegisterType((*MasterAuthorizedNetworksConfig)(nil), "google.container.v1.MasterAuthorizedNetworksConfig") + proto.RegisterType((*MasterAuthorizedNetworksConfig_CidrBlock)(nil), "google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock") + proto.RegisterType((*LegacyAbac)(nil), "google.container.v1.LegacyAbac") + proto.RegisterType((*NetworkPolicy)(nil), "google.container.v1.NetworkPolicy") + proto.RegisterType((*IPAllocationPolicy)(nil), "google.container.v1.IPAllocationPolicy") + proto.RegisterType((*Cluster)(nil), "google.container.v1.Cluster") + proto.RegisterType((*ClusterUpdate)(nil), "google.container.v1.ClusterUpdate") + proto.RegisterType((*Operation)(nil), "google.container.v1.Operation") + proto.RegisterType((*CreateClusterRequest)(nil), "google.container.v1.CreateClusterRequest") + proto.RegisterType((*GetClusterRequest)(nil), "google.container.v1.GetClusterRequest") + proto.RegisterType((*UpdateClusterRequest)(nil), "google.container.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateNodePoolRequest)(nil), "google.container.v1.UpdateNodePoolRequest") + proto.RegisterType((*SetNodePoolAutoscalingRequest)(nil), "google.container.v1.SetNodePoolAutoscalingRequest") + proto.RegisterType((*SetLoggingServiceRequest)(nil), "google.container.v1.SetLoggingServiceRequest") + proto.RegisterType((*SetMonitoringServiceRequest)(nil), "google.container.v1.SetMonitoringServiceRequest") + proto.RegisterType((*SetAddonsConfigRequest)(nil), "google.container.v1.SetAddonsConfigRequest") + proto.RegisterType((*SetLocationsRequest)(nil), "google.container.v1.SetLocationsRequest") + proto.RegisterType((*UpdateMasterRequest)(nil), "google.container.v1.UpdateMasterRequest") + proto.RegisterType((*SetMasterAuthRequest)(nil), "google.container.v1.SetMasterAuthRequest") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.container.v1.DeleteClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.container.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.container.v1.ListClustersResponse") + proto.RegisterType((*GetOperationRequest)(nil), "google.container.v1.GetOperationRequest") + proto.RegisterType((*ListOperationsRequest)(nil), "google.container.v1.ListOperationsRequest") + proto.RegisterType((*CancelOperationRequest)(nil), "google.container.v1.CancelOperationRequest") + proto.RegisterType((*ListOperationsResponse)(nil), "google.container.v1.ListOperationsResponse") + proto.RegisterType((*GetServerConfigRequest)(nil), "google.container.v1.GetServerConfigRequest") + proto.RegisterType((*ServerConfig)(nil), "google.container.v1.ServerConfig") + proto.RegisterType((*CreateNodePoolRequest)(nil), "google.container.v1.CreateNodePoolRequest") + proto.RegisterType((*DeleteNodePoolRequest)(nil), "google.container.v1.DeleteNodePoolRequest") + proto.RegisterType((*ListNodePoolsRequest)(nil), "google.container.v1.ListNodePoolsRequest") + proto.RegisterType((*GetNodePoolRequest)(nil), "google.container.v1.GetNodePoolRequest") + proto.RegisterType((*NodePool)(nil), "google.container.v1.NodePool") + proto.RegisterType((*NodeManagement)(nil), "google.container.v1.NodeManagement") + proto.RegisterType((*AutoUpgradeOptions)(nil), "google.container.v1.AutoUpgradeOptions") + proto.RegisterType((*SetNodePoolManagementRequest)(nil), "google.container.v1.SetNodePoolManagementRequest") + proto.RegisterType((*SetNodePoolSizeRequest)(nil), "google.container.v1.SetNodePoolSizeRequest") + proto.RegisterType((*RollbackNodePoolUpgradeRequest)(nil), "google.container.v1.RollbackNodePoolUpgradeRequest") + proto.RegisterType((*ListNodePoolsResponse)(nil), "google.container.v1.ListNodePoolsResponse") + proto.RegisterType((*NodePoolAutoscaling)(nil), "google.container.v1.NodePoolAutoscaling") + proto.RegisterType((*SetLabelsRequest)(nil), "google.container.v1.SetLabelsRequest") + proto.RegisterType((*SetLegacyAbacRequest)(nil), "google.container.v1.SetLegacyAbacRequest") + proto.RegisterType((*StartIPRotationRequest)(nil), "google.container.v1.StartIPRotationRequest") + proto.RegisterType((*CompleteIPRotationRequest)(nil), "google.container.v1.CompleteIPRotationRequest") + proto.RegisterType((*AcceleratorConfig)(nil), "google.container.v1.AcceleratorConfig") + proto.RegisterType((*SetNetworkPolicyRequest)(nil), "google.container.v1.SetNetworkPolicyRequest") + proto.RegisterEnum("google.container.v1.NetworkPolicy_Provider", NetworkPolicy_Provider_name, NetworkPolicy_Provider_value) + proto.RegisterEnum("google.container.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("google.container.v1.Operation_Status", Operation_Status_name, Operation_Status_value) + proto.RegisterEnum("google.container.v1.Operation_Type", Operation_Type_name, Operation_Type_value) + proto.RegisterEnum("google.container.v1.SetMasterAuthRequest_Action", SetMasterAuthRequest_Action_name, SetMasterAuthRequest_Action_value) + proto.RegisterEnum("google.container.v1.NodePool_Status", NodePool_Status_name, NodePool_Status_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ClusterManager service + +type ClusterManagerClient interface { + // Lists all clusters owned by a project in either the specified zone or all + // zones. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Gets the details of a specific cluster. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Creates a cluster, consisting of the specified number and type of Google + // Compute Engine instances. + // + // By default, the cluster is created in the project's + // [default network](/compute/docs/networks-and-firewalls#networks). + // + // One firewall is added for the cluster. After cluster creation, + // the cluster creates routes for each node to allow the containers + // on that node to communicate with all other instances in the + // cluster. + // + // Finally, an entry is added to the project's global metadata indicating + // which CIDR range is being used by the cluster. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the settings of a specific cluster. + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the version and/or image type of a specific node pool. + UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the autoscaling settings of a specific node pool. + SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the logging service of a specific cluster. + SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the monitoring service of a specific cluster. + SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the addons of a specific cluster. + SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the locations of a specific cluster. + SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the master of a specific cluster. + UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) + // Used to set master auth materials. Currently supports :- + // Changing the admin password of a specific cluster. + // This can be either via password generation or explicitly set the password. + SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes the cluster, including the Kubernetes endpoint and all worker + // nodes. + // + // Firewalls and routes that were configured during cluster creation + // are also deleted. + // + // Other Google Compute Engine resources that might be in use by the cluster + // (e.g. load balancer resources) will not be deleted if they weren't present + // at the initial create time. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Lists all operations in a project in a specific zone or all zones. + ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) + // Gets the specified operation. + GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) + // Cancels the specified operation. + CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Returns configuration info about the Container Engine service. + GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) + // Lists the node pools for a cluster. + ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) + // Retrieves the node pool requested. + GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) + // Creates a node pool for a cluster. + CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes a node pool from a cluster. + DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Roll back the previously Aborted or Failed NodePool upgrade. + // This will be an no-op if the last upgrade successfully completed. + RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the NodeManagement options for a node pool. + SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets labels on a cluster. + SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) + // Enables or disables the ABAC authorization mechanism on a cluster. + SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) + // Start master IP rotation. + StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) + // Completes master IP rotation. + CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the size of a specific node pool. + SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) + // Enables/Disables Network Policy for a cluster. + SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) +} + +type clusterManagerClient struct { + cc *grpc.ClientConn +} + +func NewClusterManagerClient(cc *grpc.ClientConn) ClusterManagerClient { + return &clusterManagerClient{cc} +} + +func (c *clusterManagerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateNodePool", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLoggingService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMonitoringService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetAddonsConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLocations", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateMaster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMasterAuth", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { + out := new(ListOperationsResponse) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/ListOperations", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/GetOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/CancelOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) { + out := new(ServerConfig) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/GetServerConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) { + out := new(ListNodePoolsResponse) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/ListNodePools", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) { + out := new(NodePool) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/GetNodePool", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateNodePool", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteNodePool", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolManagement", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLabels", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLegacyAbac", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/StartIPRotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/CompleteIPRotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolSize", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNetworkPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ClusterManager service + +type ClusterManagerServer interface { + // Lists all clusters owned by a project in either the specified zone or all + // zones. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Gets the details of a specific cluster. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Creates a cluster, consisting of the specified number and type of Google + // Compute Engine instances. + // + // By default, the cluster is created in the project's + // [default network](/compute/docs/networks-and-firewalls#networks). + // + // One firewall is added for the cluster. After cluster creation, + // the cluster creates routes for each node to allow the containers + // on that node to communicate with all other instances in the + // cluster. + // + // Finally, an entry is added to the project's global metadata indicating + // which CIDR range is being used by the cluster. + CreateCluster(context.Context, *CreateClusterRequest) (*Operation, error) + // Updates the settings of a specific cluster. + UpdateCluster(context.Context, *UpdateClusterRequest) (*Operation, error) + // Updates the version and/or image type of a specific node pool. + UpdateNodePool(context.Context, *UpdateNodePoolRequest) (*Operation, error) + // Sets the autoscaling settings of a specific node pool. + SetNodePoolAutoscaling(context.Context, *SetNodePoolAutoscalingRequest) (*Operation, error) + // Sets the logging service of a specific cluster. + SetLoggingService(context.Context, *SetLoggingServiceRequest) (*Operation, error) + // Sets the monitoring service of a specific cluster. + SetMonitoringService(context.Context, *SetMonitoringServiceRequest) (*Operation, error) + // Sets the addons of a specific cluster. + SetAddonsConfig(context.Context, *SetAddonsConfigRequest) (*Operation, error) + // Sets the locations of a specific cluster. + SetLocations(context.Context, *SetLocationsRequest) (*Operation, error) + // Updates the master of a specific cluster. + UpdateMaster(context.Context, *UpdateMasterRequest) (*Operation, error) + // Used to set master auth materials. Currently supports :- + // Changing the admin password of a specific cluster. + // This can be either via password generation or explicitly set the password. + SetMasterAuth(context.Context, *SetMasterAuthRequest) (*Operation, error) + // Deletes the cluster, including the Kubernetes endpoint and all worker + // nodes. + // + // Firewalls and routes that were configured during cluster creation + // are also deleted. + // + // Other Google Compute Engine resources that might be in use by the cluster + // (e.g. load balancer resources) will not be deleted if they weren't present + // at the initial create time. + DeleteCluster(context.Context, *DeleteClusterRequest) (*Operation, error) + // Lists all operations in a project in a specific zone or all zones. + ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) + // Gets the specified operation. + GetOperation(context.Context, *GetOperationRequest) (*Operation, error) + // Cancels the specified operation. + CancelOperation(context.Context, *CancelOperationRequest) (*google_protobuf1.Empty, error) + // Returns configuration info about the Container Engine service. + GetServerConfig(context.Context, *GetServerConfigRequest) (*ServerConfig, error) + // Lists the node pools for a cluster. + ListNodePools(context.Context, *ListNodePoolsRequest) (*ListNodePoolsResponse, error) + // Retrieves the node pool requested. + GetNodePool(context.Context, *GetNodePoolRequest) (*NodePool, error) + // Creates a node pool for a cluster. + CreateNodePool(context.Context, *CreateNodePoolRequest) (*Operation, error) + // Deletes a node pool from a cluster. + DeleteNodePool(context.Context, *DeleteNodePoolRequest) (*Operation, error) + // Roll back the previously Aborted or Failed NodePool upgrade. + // This will be an no-op if the last upgrade successfully completed. + RollbackNodePoolUpgrade(context.Context, *RollbackNodePoolUpgradeRequest) (*Operation, error) + // Sets the NodeManagement options for a node pool. + SetNodePoolManagement(context.Context, *SetNodePoolManagementRequest) (*Operation, error) + // Sets labels on a cluster. + SetLabels(context.Context, *SetLabelsRequest) (*Operation, error) + // Enables or disables the ABAC authorization mechanism on a cluster. + SetLegacyAbac(context.Context, *SetLegacyAbacRequest) (*Operation, error) + // Start master IP rotation. + StartIPRotation(context.Context, *StartIPRotationRequest) (*Operation, error) + // Completes master IP rotation. + CompleteIPRotation(context.Context, *CompleteIPRotationRequest) (*Operation, error) + // Sets the size of a specific node pool. + SetNodePoolSize(context.Context, *SetNodePoolSizeRequest) (*Operation, error) + // Enables/Disables Network Policy for a cluster. + SetNetworkPolicy(context.Context, *SetNetworkPolicyRequest) (*Operation, error) +} + +func RegisterClusterManagerServer(s *grpc.Server, srv ClusterManagerServer) { + s.RegisterService(&_ClusterManager_serviceDesc, srv) +} + +func _ClusterManager_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateNodePool(ctx, req.(*UpdateNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolAutoscaling_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolAutoscalingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, req.(*SetNodePoolAutoscalingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLoggingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLoggingServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLoggingService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLoggingService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLoggingService(ctx, req.(*SetLoggingServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetMonitoringService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetMonitoringServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetMonitoringService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetMonitoringService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetMonitoringService(ctx, req.(*SetMonitoringServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetAddonsConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetAddonsConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetAddonsConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetAddonsConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetAddonsConfig(ctx, req.(*SetAddonsConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLocationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLocations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLocations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLocations(ctx, req.(*SetLocationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateMaster(ctx, req.(*UpdateMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetMasterAuth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetMasterAuthRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetMasterAuth(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetMasterAuth", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetMasterAuth(ctx, req.(*SetMasterAuthRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListOperations(ctx, req.(*ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetOperation(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CancelOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CancelOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CancelOperation(ctx, req.(*CancelOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetServerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetServerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetServerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetServerConfig(ctx, req.(*GetServerConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_ListNodePools_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodePoolsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListNodePools(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListNodePools", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListNodePools(ctx, req.(*ListNodePoolsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetNodePool(ctx, req.(*GetNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CreateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CreateNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CreateNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CreateNodePool(ctx, req.(*CreateNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_DeleteNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).DeleteNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/DeleteNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).DeleteNodePool(ctx, req.(*DeleteNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_RollbackNodePoolUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackNodePoolUpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, req.(*RollbackNodePoolUpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolManagement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolManagementRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolManagement", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, req.(*SetNodePoolManagementRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLabels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLabelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLabels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLabels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLabels(ctx, req.(*SetLabelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLegacyAbac_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLegacyAbacRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLegacyAbac(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLegacyAbac", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLegacyAbac(ctx, req.(*SetLegacyAbacRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_StartIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartIPRotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).StartIPRotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/StartIPRotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).StartIPRotation(ctx, req.(*StartIPRotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CompleteIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteIPRotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CompleteIPRotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CompleteIPRotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CompleteIPRotation(ctx, req.(*CompleteIPRotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolSize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolSizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolSize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolSize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolSize(ctx, req.(*SetNodePoolSizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNetworkPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNetworkPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNetworkPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, req.(*SetNetworkPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.container.v1.ClusterManager", + HandlerType: (*ClusterManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListClusters", + Handler: _ClusterManager_ListClusters_Handler, + }, + { + MethodName: "GetCluster", + Handler: _ClusterManager_GetCluster_Handler, + }, + { + MethodName: "CreateCluster", + Handler: _ClusterManager_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _ClusterManager_UpdateCluster_Handler, + }, + { + MethodName: "UpdateNodePool", + Handler: _ClusterManager_UpdateNodePool_Handler, + }, + { + MethodName: "SetNodePoolAutoscaling", + Handler: _ClusterManager_SetNodePoolAutoscaling_Handler, + }, + { + MethodName: "SetLoggingService", + Handler: _ClusterManager_SetLoggingService_Handler, + }, + { + MethodName: "SetMonitoringService", + Handler: _ClusterManager_SetMonitoringService_Handler, + }, + { + MethodName: "SetAddonsConfig", + Handler: _ClusterManager_SetAddonsConfig_Handler, + }, + { + MethodName: "SetLocations", + Handler: _ClusterManager_SetLocations_Handler, + }, + { + MethodName: "UpdateMaster", + Handler: _ClusterManager_UpdateMaster_Handler, + }, + { + MethodName: "SetMasterAuth", + Handler: _ClusterManager_SetMasterAuth_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _ClusterManager_DeleteCluster_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterManager_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _ClusterManager_GetOperation_Handler, + }, + { + MethodName: "CancelOperation", + Handler: _ClusterManager_CancelOperation_Handler, + }, + { + MethodName: "GetServerConfig", + Handler: _ClusterManager_GetServerConfig_Handler, + }, + { + MethodName: "ListNodePools", + Handler: _ClusterManager_ListNodePools_Handler, + }, + { + MethodName: "GetNodePool", + Handler: _ClusterManager_GetNodePool_Handler, + }, + { + MethodName: "CreateNodePool", + Handler: _ClusterManager_CreateNodePool_Handler, + }, + { + MethodName: "DeleteNodePool", + Handler: _ClusterManager_DeleteNodePool_Handler, + }, + { + MethodName: "RollbackNodePoolUpgrade", + Handler: _ClusterManager_RollbackNodePoolUpgrade_Handler, + }, + { + MethodName: "SetNodePoolManagement", + Handler: _ClusterManager_SetNodePoolManagement_Handler, + }, + { + MethodName: "SetLabels", + Handler: _ClusterManager_SetLabels_Handler, + }, + { + MethodName: "SetLegacyAbac", + Handler: _ClusterManager_SetLegacyAbac_Handler, + }, + { + MethodName: "StartIPRotation", + Handler: _ClusterManager_StartIPRotation_Handler, + }, + { + MethodName: "CompleteIPRotation", + Handler: _ClusterManager_CompleteIPRotation_Handler, + }, + { + MethodName: "SetNodePoolSize", + Handler: _ClusterManager_SetNodePoolSize_Handler, + }, + { + MethodName: "SetNetworkPolicy", + Handler: _ClusterManager_SetNetworkPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/container/v1/cluster_service.proto", +} + +func init() { proto.RegisterFile("google/container/v1/cluster_service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 4238 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5c, 0x5b, 0x8c, 0xdc, 0x58, + 0x5a, 0xc6, 0xdd, 0xd5, 0x97, 0xfa, 0xeb, 0xd2, 0xd5, 0xa7, 0x2f, 0xf1, 0xf6, 0xe4, 0xea, 0x99, + 0xcc, 0xe4, 0x32, 0xd3, 0x35, 0xc9, 0x64, 0xe7, 0x92, 0xc9, 0x8e, 0xa6, 0x52, 0x5d, 0xe9, 0x54, + 0xd2, 0xdd, 0x55, 0xeb, 0xea, 0x9e, 0x30, 0x03, 0xbb, 0x96, 0xdb, 0x3e, 0xa9, 0xf6, 0xb4, 0xcb, + 0xf6, 0xda, 0xae, 0xde, 0xe9, 0x44, 0x41, 0x62, 0x05, 0x12, 0x12, 0x02, 0x21, 0xc1, 0x22, 0x84, + 0xd0, 0x0a, 0xc1, 0x82, 0xd0, 0x8e, 0xb8, 0x2d, 0x68, 0x41, 0xc0, 0x03, 0x2f, 0x48, 0xbc, 0xf0, + 0xc4, 0x03, 0x82, 0x17, 0x84, 0x84, 0x90, 0x78, 0xe1, 0x95, 0x57, 0x74, 0x2e, 0x76, 0xd9, 0x55, + 0x76, 0x55, 0x77, 0x6a, 0x2a, 0xe1, 0x29, 0xe5, 0xff, 0xfc, 0xe7, 0x9c, 0xef, 0xff, 0x7d, 0xce, + 0x7f, 0x75, 0x07, 0xae, 0xb6, 0x6d, 0xbb, 0x6d, 0xe2, 0xb2, 0x66, 0x5b, 0xbe, 0x6a, 0x58, 0xd8, + 0x2d, 0x1f, 0xdd, 0x28, 0x6b, 0x66, 0xd7, 0xf3, 0xb1, 0xab, 0x78, 0xd8, 0x3d, 0x32, 0x34, 0xbc, + 0xee, 0xb8, 0xb6, 0x6f, 0xa3, 0x25, 0xc6, 0xba, 0x1e, 0xb2, 0xae, 0x1f, 0xdd, 0x58, 0x3b, 0xcb, + 0xe7, 0xab, 0x8e, 0x51, 0x56, 0x2d, 0xcb, 0xf6, 0x55, 0xdf, 0xb0, 0x2d, 0x8f, 0x4d, 0x59, 0x7b, + 0x85, 0x8f, 0xd2, 0xa7, 0xfd, 0xee, 0xe3, 0x32, 0xee, 0x38, 0xfe, 0x31, 0x1b, 0x94, 0xfe, 0x27, + 0x03, 0xb0, 0x63, 0xeb, 0xb8, 0x6a, 0x5b, 0x8f, 0x8d, 0x36, 0xba, 0x04, 0xf9, 0x8e, 0xaa, 0x1d, + 0x18, 0x16, 0x56, 0xfc, 0x63, 0x07, 0x8b, 0xc2, 0x45, 0xe1, 0x4a, 0x56, 0xce, 0x71, 0xda, 0xee, + 0xb1, 0x83, 0xd1, 0x45, 0xc8, 0xeb, 0x86, 0x77, 0xa8, 0x78, 0xc6, 0x13, 0xac, 0xb4, 0xf7, 0xc5, + 0xa9, 0x8b, 0xc2, 0x95, 0x19, 0x19, 0x08, 0xad, 0x65, 0x3c, 0xc1, 0x9b, 0xfb, 0x64, 0x11, 0x5b, + 0xed, 0xfa, 0x07, 0x8a, 0xa7, 0xd9, 0x0e, 0xf6, 0xc4, 0xe9, 0x8b, 0xd3, 0x64, 0x11, 0x4a, 0x6b, + 0x51, 0x12, 0x7a, 0x03, 0x16, 0xb8, 0x5c, 0x8a, 0xaa, 0x69, 0x76, 0xd7, 0xf2, 0xc5, 0x2c, 0xdd, + 0xaa, 0xc8, 0xc9, 0x15, 0x46, 0x45, 0x75, 0x98, 0xef, 0x60, 0x5f, 0xd5, 0x55, 0x5f, 0x15, 0x33, + 0x17, 0xa7, 0xaf, 0xe4, 0x6e, 0xbe, 0xb5, 0x9e, 0xa0, 0x82, 0xf5, 0x9e, 0x0c, 0xeb, 0xdb, 0x9c, + 0xbf, 0x66, 0xf9, 0xee, 0xb1, 0x1c, 0x4e, 0x47, 0xe7, 0x00, 0x8c, 0x8e, 0xda, 0xe6, 0x92, 0xcd, + 0xd0, 0xed, 0xb2, 0x94, 0x42, 0xe5, 0xaa, 0xc2, 0xac, 0xa9, 0xee, 0x63, 0xd3, 0x13, 0x67, 0xe9, + 0x3e, 0xd7, 0x47, 0xed, 0xb3, 0x45, 0xb9, 0xd9, 0x2e, 0x7c, 0x2a, 0x7a, 0x1d, 0x16, 0x4c, 0x5b, + 0x53, 0x4d, 0xc5, 0xf3, 0x74, 0x85, 0xc9, 0x35, 0x47, 0xf5, 0x53, 0xa0, 0xe4, 0x96, 0xa7, 0x57, + 0xa9, 0x58, 0x08, 0x32, 0xbe, 0xda, 0xf6, 0xc4, 0x79, 0xaa, 0x1a, 0xfa, 0x1b, 0x5d, 0x84, 0x9c, + 0xe3, 0x62, 0xf2, 0x72, 0x8c, 0x7d, 0x13, 0x8b, 0x70, 0x51, 0xb8, 0x32, 0x2f, 0x47, 0x49, 0xe8, + 0x01, 0xe4, 0x55, 0x4d, 0xc3, 0x26, 0x76, 0x55, 0xdf, 0x76, 0x3d, 0x31, 0x47, 0x81, 0xbe, 0x9e, + 0x08, 0xb4, 0xd2, 0x63, 0x64, 0x78, 0xe5, 0xd8, 0xdc, 0xb5, 0x0f, 0xa1, 0x10, 0x53, 0x14, 0x2a, + 0xc1, 0xf4, 0x21, 0x3e, 0xe6, 0x6f, 0x9c, 0xfc, 0x44, 0xcb, 0x30, 0x73, 0xa4, 0x9a, 0x5d, 0x4c, + 0x5f, 0x71, 0x56, 0x66, 0x0f, 0xb7, 0xa7, 0xde, 0x17, 0xd6, 0x3e, 0x80, 0x5c, 0x44, 0xfa, 0xd3, + 0x4c, 0x95, 0x7e, 0x3c, 0x05, 0xb0, 0xad, 0x92, 0x93, 0x5d, 0xe9, 0xfa, 0x07, 0x68, 0x0d, 0xe6, + 0xbb, 0x1e, 0x76, 0x2d, 0xb5, 0x13, 0x1c, 0xb6, 0xf0, 0x99, 0x8c, 0x39, 0xaa, 0xe7, 0x7d, 0xd7, + 0x76, 0x75, 0xbe, 0x4e, 0xf8, 0x8c, 0x0e, 0xe0, 0x6b, 0x9a, 0x69, 0x60, 0xcb, 0x57, 0x34, 0xec, + 0xfa, 0xc6, 0x63, 0x43, 0x53, 0x7d, 0xac, 0x68, 0x54, 0x52, 0x71, 0xfa, 0xa2, 0x70, 0x25, 0x77, + 0xf3, 0xcd, 0x44, 0xbd, 0x54, 0xe9, 0xac, 0x6a, 0x6f, 0x12, 0xd7, 0xce, 0x19, 0x2d, 0x79, 0x00, + 0xdd, 0x82, 0xd5, 0xe0, 0x2a, 0x6a, 0x6a, 0x74, 0x37, 0x51, 0xa7, 0x98, 0x96, 0xf9, 0x68, 0x55, + 0x8d, 0xcc, 0x45, 0x6f, 0x01, 0x1a, 0xc4, 0x27, 0x62, 0x3a, 0x63, 0x71, 0x60, 0x2b, 0x72, 0x36, + 0x39, 0x3b, 0x51, 0xe4, 0x63, 0x76, 0x36, 0x19, 0xe5, 0x21, 0x3e, 0x96, 0x5a, 0x70, 0x26, 0x05, + 0x37, 0x7a, 0x1f, 0x44, 0xc3, 0xf3, 0xba, 0x58, 0x49, 0xd8, 0x4e, 0xa0, 0x47, 0x68, 0x95, 0x8e, + 0x0f, 0xcc, 0x97, 0xfe, 0x6c, 0x0a, 0xf2, 0x15, 0x5d, 0xb7, 0x2d, 0x8f, 0x2f, 0xf5, 0x09, 0x2c, + 0x1d, 0xf8, 0xbe, 0xa3, 0x98, 0xb6, 0xaa, 0x2b, 0xfb, 0xaa, 0xa9, 0x5a, 0x9a, 0x61, 0xb5, 0xe9, + 0x2a, 0x69, 0xa7, 0xec, 0xbe, 0xef, 0x3b, 0x5b, 0xb6, 0xaa, 0xdf, 0x0d, 0xb8, 0xe5, 0xc5, 0x83, + 0x7e, 0x12, 0x3a, 0x84, 0xb5, 0x03, 0xdb, 0x35, 0x9e, 0x90, 0x89, 0xa6, 0xe2, 0xd8, 0xba, 0xa2, + 0x76, 0x7d, 0xdb, 0xd3, 0x54, 0x93, 0x2c, 0x3f, 0x45, 0x97, 0x4f, 0xbe, 0xd5, 0xf7, 0xc3, 0x69, + 0x4d, 0x5b, 0xaf, 0xf4, 0x26, 0xc9, 0xe2, 0x41, 0xca, 0x08, 0xfa, 0x19, 0x58, 0x3e, 0xec, 0xee, + 0x63, 0xd7, 0xc2, 0x3e, 0xf6, 0x14, 0x5d, 0xf5, 0x0e, 0xf6, 0x6d, 0xd5, 0xd5, 0xf9, 0x99, 0xb8, + 0x92, 0xb8, 0xcd, 0xc3, 0x70, 0xc2, 0x46, 0xc0, 0x2f, 0x2f, 0x1d, 0x0e, 0x12, 0xa5, 0x32, 0x2c, + 0x0e, 0x48, 0x4c, 0x8e, 0xa9, 0x6e, 0x78, 0xea, 0xbe, 0x89, 0x75, 0xae, 0xf1, 0xf0, 0x59, 0x7a, + 0x17, 0xc4, 0x34, 0x19, 0x86, 0xce, 0xbb, 0x01, 0x4b, 0x09, 0xa0, 0x86, 0x4e, 0xf9, 0x6f, 0x01, + 0xce, 0xf7, 0x2e, 0x16, 0xd9, 0x14, 0xeb, 0x3b, 0xd8, 0xff, 0xae, 0xed, 0x1e, 0x06, 0x2f, 0x58, + 0x84, 0x39, 0x6c, 0x45, 0x67, 0x07, 0x8f, 0xe8, 0xdb, 0x90, 0xd3, 0x0c, 0xdd, 0x55, 0xf6, 0x4d, + 0x5b, 0x3b, 0xf4, 0xc4, 0x29, 0x6a, 0x58, 0xbe, 0x91, 0xa8, 0xac, 0xe1, 0x7b, 0xac, 0x57, 0x0d, + 0xdd, 0xbd, 0x4b, 0x56, 0x91, 0x41, 0x0b, 0x7e, 0x7a, 0x6b, 0xdb, 0x90, 0x0d, 0x07, 0x88, 0x7f, + 0xd0, 0x0d, 0xcf, 0x31, 0xd5, 0x63, 0x25, 0x72, 0xef, 0x73, 0x9c, 0xb6, 0x43, 0xae, 0x3e, 0xb9, + 0x0f, 0x21, 0x1e, 0x7e, 0xf9, 0xb3, 0xe1, 0x7a, 0xd2, 0xeb, 0x00, 0x5b, 0xb8, 0xad, 0x6a, 0xc7, + 0x95, 0x7d, 0x55, 0x4b, 0x17, 0x4b, 0xfa, 0xa1, 0x00, 0x05, 0x8e, 0xaf, 0x69, 0x9b, 0x86, 0x76, + 0x8c, 0x36, 0x61, 0xde, 0x71, 0xed, 0x23, 0x43, 0xc7, 0x2e, 0x65, 0x2e, 0xa6, 0xd9, 0xf9, 0xe8, + 0xac, 0xf5, 0x26, 0x9f, 0x22, 0x87, 0x93, 0xa3, 0x9b, 0x4e, 0xc5, 0x37, 0x7d, 0x1b, 0xe6, 0x9b, + 0x3d, 0xae, 0xe5, 0xa6, 0xdc, 0xf8, 0xa4, 0xbe, 0x51, 0x93, 0x95, 0xbd, 0x9d, 0x56, 0xb3, 0x56, + 0xad, 0xdf, 0xab, 0xd7, 0x36, 0x4a, 0x3f, 0x85, 0x00, 0x66, 0xab, 0x95, 0xad, 0x7a, 0xb5, 0x51, + 0x12, 0xa4, 0x5f, 0x9b, 0x02, 0x54, 0x6f, 0x56, 0x4c, 0xe2, 0x23, 0x88, 0xe7, 0xe6, 0x58, 0x5f, + 0x83, 0x62, 0xd7, 0xc3, 0x8a, 0xe1, 0x28, 0xaa, 0x69, 0xa8, 0x1e, 0xf6, 0xb8, 0x78, 0xf9, 0xae, + 0x87, 0xeb, 0x4e, 0x85, 0xd1, 0xd0, 0x75, 0x58, 0xd4, 0x5c, 0x4c, 0xac, 0x9f, 0xd7, 0xdd, 0xb7, + 0x18, 0x6c, 0x0e, 0xa9, 0xc4, 0x06, 0x5a, 0x21, 0x9d, 0xfa, 0xdd, 0xf0, 0x89, 0x69, 0x7f, 0x9a, + 0xfb, 0xdd, 0x90, 0x4c, 0x5f, 0xc0, 0x35, 0x58, 0x0c, 0xac, 0x9e, 0xe1, 0x1c, 0xdd, 0x52, 0x88, + 0xee, 0xc5, 0x0c, 0x65, 0x5d, 0xe0, 0x03, 0x75, 0xe7, 0xe8, 0x16, 0x79, 0xa9, 0x04, 0xa7, 0x65, + 0xeb, 0x38, 0xc2, 0xc8, 0x9c, 0x6b, 0x9e, 0x50, 0x43, 0xae, 0x37, 0x01, 0x71, 0xdf, 0xee, 0x45, + 0x38, 0x67, 0x29, 0x67, 0x29, 0x18, 0x09, 0xb8, 0xa5, 0xff, 0x2d, 0xc2, 0x5c, 0x95, 0xed, 0x43, + 0x9c, 0x65, 0xe4, 0x9c, 0xd0, 0xdf, 0xc4, 0x59, 0xea, 0xd8, 0xd3, 0x5c, 0xc3, 0x21, 0x0a, 0xe3, + 0x27, 0x24, 0x4a, 0x22, 0xfb, 0x19, 0x96, 0xe1, 0x1b, 0xaa, 0xa9, 0x50, 0x74, 0xcc, 0x1b, 0x4f, + 0x53, 0x6f, 0x5c, 0xe2, 0x23, 0xcc, 0x9b, 0x13, 0x87, 0xfc, 0x31, 0xe4, 0x38, 0x17, 0xf5, 0x20, + 0x19, 0x6a, 0x2d, 0x2e, 0x8c, 0x08, 0x01, 0x64, 0xb0, 0x7a, 0xa1, 0xd3, 0xc7, 0x90, 0xeb, 0xd0, + 0xab, 0x41, 0xac, 0xdb, 0x01, 0x55, 0x41, 0xda, 0x0a, 0xbd, 0x2b, 0x24, 0x43, 0xa7, 0xe7, 0x0b, + 0xdf, 0x20, 0xc1, 0x43, 0xbb, 0x6d, 0x58, 0xed, 0x20, 0xe8, 0xe3, 0xea, 0x29, 0x72, 0x72, 0x8b, + 0x51, 0x89, 0x73, 0xe9, 0xd8, 0x96, 0xe1, 0xdb, 0x6e, 0x94, 0x77, 0x8e, 0x39, 0x97, 0xde, 0x48, + 0xc0, 0x2e, 0xc2, 0x5c, 0x70, 0x2e, 0xe6, 0x29, 0x4f, 0xf0, 0x98, 0xfc, 0x96, 0xb3, 0xc9, 0x6f, + 0xf9, 0x1e, 0x14, 0x54, 0xea, 0x2d, 0x02, 0x1d, 0x01, 0x95, 0xf0, 0x52, 0x72, 0xf4, 0x11, 0xf1, + 0x2b, 0x72, 0x5e, 0x8d, 0x7a, 0x99, 0xf3, 0x00, 0x91, 0x83, 0x9a, 0xa3, 0x9b, 0x45, 0x28, 0xe8, + 0x0e, 0x50, 0xad, 0x2a, 0x8e, 0x6d, 0x9b, 0x9e, 0x98, 0xa7, 0x96, 0xe8, 0x5c, 0xea, 0x8b, 0x68, + 0xda, 0xb6, 0x29, 0x67, 0x2d, 0xfe, 0xcb, 0x43, 0x67, 0x21, 0x1b, 0xdc, 0x22, 0x4f, 0x2c, 0xd0, + 0xe8, 0xaa, 0x47, 0x40, 0xef, 0xc2, 0x19, 0x76, 0x4b, 0x95, 0x88, 0x8f, 0x50, 0x4d, 0xe7, 0x40, + 0x15, 0x8b, 0xf4, 0xc6, 0xac, 0xb0, 0xe1, 0x9e, 0xed, 0xad, 0x90, 0x41, 0xf4, 0x29, 0x2c, 0xb8, + 0xd8, 0xb3, 0xbb, 0xae, 0x86, 0x15, 0x1e, 0x24, 0x2e, 0x50, 0x60, 0x6f, 0xa7, 0xc4, 0x18, 0x54, + 0x75, 0xeb, 0x32, 0x9f, 0x13, 0x8d, 0x14, 0x8b, 0x6e, 0x8c, 0x48, 0xae, 0x2f, 0x5d, 0x51, 0x79, + 0x6c, 0x58, 0x6d, 0xec, 0x3a, 0xae, 0x61, 0xf9, 0x62, 0x89, 0xdd, 0x0a, 0x3a, 0x70, 0xaf, 0x47, + 0x27, 0x67, 0xcc, 0xa4, 0x76, 0x4f, 0x51, 0xf7, 0x55, 0x4d, 0x44, 0x43, 0xce, 0x58, 0xcf, 0x3e, + 0xca, 0x60, 0xf6, 0x6c, 0x65, 0x1d, 0x8a, 0xc1, 0xed, 0x77, 0xa8, 0x95, 0x11, 0x97, 0xe8, 0x22, + 0xd2, 0x68, 0x2b, 0x28, 0x17, 0xac, 0x98, 0x29, 0xfd, 0x14, 0x96, 0xa9, 0x69, 0x0a, 0xd4, 0x1b, + 0x2c, 0xb8, 0x4c, 0x17, 0x7c, 0x23, 0x71, 0xc1, 0x41, 0x2b, 0x27, 0x23, 0xc3, 0x19, 0xb0, 0x7c, + 0x3f, 0x07, 0x97, 0x22, 0x77, 0x89, 0xf9, 0x19, 0x85, 0xef, 0x1e, 0x9e, 0xbf, 0x55, 0xba, 0xcf, + 0x3b, 0xcf, 0xe1, 0xa4, 0xe4, 0xf3, 0x9d, 0xe1, 0x8e, 0xf2, 0x15, 0xc8, 0x7a, 0xd8, 0x7c, 0xac, + 0x98, 0x86, 0x75, 0xc8, 0xc3, 0xbc, 0x79, 0x42, 0xd8, 0x32, 0xac, 0x43, 0x62, 0x8e, 0x9e, 0xd8, + 0x56, 0x10, 0xcc, 0xd1, 0xdf, 0xc4, 0x31, 0x63, 0x4b, 0x77, 0x6c, 0xf2, 0xf2, 0x58, 0xf4, 0x16, + 0x3e, 0x93, 0x43, 0x17, 0x18, 0xa2, 0xe0, 0xb2, 0x1d, 0x61, 0xd7, 0x23, 0x66, 0xab, 0x4d, 0x59, + 0x57, 0xf8, 0x30, 0x3f, 0x36, 0x9f, 0xb0, 0x41, 0x1a, 0x78, 0x76, 0x5d, 0x97, 0x04, 0x75, 0x5c, + 0x19, 0xc1, 0xb4, 0x03, 0x1e, 0x78, 0xb2, 0x51, 0x26, 0x6b, 0x30, 0xeb, 0x6d, 0x08, 0xe8, 0xcc, + 0xec, 0x05, 0x73, 0x0c, 0x3a, 0x07, 0xf1, 0x31, 0x72, 0x75, 0x82, 0x19, 0x17, 0x20, 0xc7, 0x1d, + 0x88, 0x6f, 0x74, 0xb0, 0xf8, 0x39, 0xbb, 0x91, 0x8c, 0xb4, 0x6b, 0x74, 0x30, 0xfa, 0x10, 0x66, + 0x3d, 0x5f, 0xf5, 0xbb, 0x9e, 0x78, 0x48, 0x3d, 0xe6, 0xab, 0x43, 0x0f, 0x7d, 0x8b, 0xb2, 0xca, + 0x7c, 0x0a, 0xba, 0x0c, 0x45, 0xf6, 0x4b, 0xe9, 0x60, 0xcf, 0x53, 0xdb, 0x58, 0x34, 0xe9, 0x06, + 0x05, 0x46, 0xdd, 0x66, 0x44, 0xf4, 0x16, 0x2c, 0xc5, 0x7d, 0x08, 0xcd, 0x2f, 0xc5, 0x0e, 0x33, + 0xd7, 0x51, 0x47, 0x42, 0x92, 0xcc, 0x14, 0x67, 0x62, 0x25, 0x3b, 0x13, 0xb4, 0x0e, 0x4b, 0x86, + 0xe5, 0xf9, 0xaa, 0xa5, 0x61, 0xa5, 0xed, 0xda, 0x5d, 0x47, 0xe9, 0xba, 0xa6, 0x27, 0xda, 0xd4, + 0x3c, 0x2c, 0x06, 0x43, 0x9b, 0x64, 0x64, 0xcf, 0x35, 0x3d, 0xb2, 0x7a, 0x4c, 0x87, 0xcc, 0x75, + 0x38, 0x0c, 0x4b, 0x44, 0x83, 0xcc, 0x75, 0x5c, 0x80, 0x1c, 0xfe, 0xc2, 0x31, 0x5c, 0xae, 0xbf, + 0xef, 0x30, 0xfd, 0x31, 0x12, 0xd1, 0xdf, 0x5a, 0x05, 0x96, 0x12, 0x2c, 0xc1, 0xa9, 0xb2, 0x26, + 0x03, 0x66, 0x99, 0x5e, 0xd1, 0x2a, 0xa0, 0xd6, 0x6e, 0x65, 0x77, 0xaf, 0xd5, 0x17, 0x4f, 0x94, + 0x20, 0x4f, 0x23, 0x8d, 0x56, 0xbd, 0xb1, 0x53, 0xdf, 0xd9, 0x2c, 0x09, 0x28, 0x07, 0x73, 0xf2, + 0xde, 0x0e, 0x7d, 0x98, 0x42, 0x0b, 0x90, 0x93, 0x6b, 0xd5, 0xc6, 0x4e, 0xb5, 0xbe, 0x45, 0x08, + 0xd3, 0x28, 0x0f, 0xf3, 0xad, 0xdd, 0x46, 0xb3, 0x49, 0x9e, 0x32, 0x28, 0x0b, 0x33, 0x35, 0x59, + 0x6e, 0xc8, 0xa5, 0x19, 0xe9, 0xfb, 0x33, 0x50, 0xe0, 0xef, 0x72, 0xcf, 0xd1, 0x49, 0x72, 0xf2, + 0x36, 0x2c, 0xeb, 0xd8, 0x33, 0x5c, 0x72, 0x07, 0xa3, 0x47, 0x8a, 0x85, 0x03, 0x88, 0x8f, 0x45, + 0x8f, 0xd4, 0x1d, 0x58, 0x0b, 0x66, 0x24, 0x38, 0x2a, 0x16, 0x1d, 0x88, 0x9c, 0x63, 0x7b, 0xc0, + 0x5f, 0xed, 0xc1, 0x4a, 0x30, 0x3b, 0xee, 0x71, 0x66, 0x4f, 0xea, 0x71, 0x96, 0xf8, 0xfc, 0x58, + 0x7a, 0x53, 0xee, 0x13, 0x83, 0x38, 0x18, 0xc5, 0xd0, 0x03, 0xbf, 0x19, 0x11, 0x83, 0xb8, 0x92, + 0xba, 0x4e, 0x8e, 0x41, 0x30, 0x21, 0x52, 0x38, 0x60, 0x2e, 0xb4, 0xc4, 0x47, 0xea, 0x61, 0xfd, + 0xe0, 0x10, 0xce, 0x0d, 0x2e, 0x1f, 0x4d, 0x74, 0xb2, 0x43, 0x32, 0x90, 0x60, 0xd7, 0x68, 0x8e, + 0xb3, 0xd6, 0x87, 0x28, 0x9a, 0x3b, 0x5c, 0x87, 0x00, 0xaf, 0xd2, 0x73, 0x77, 0x40, 0xcf, 0x73, + 0x80, 0x6c, 0x2b, 0xf4, 0x7a, 0xbf, 0x22, 0xc0, 0xd5, 0xf0, 0x75, 0x8c, 0x34, 0xab, 0xf9, 0xe7, + 0x37, 0xab, 0x97, 0x83, 0x57, 0x3a, 0xdc, 0xba, 0xde, 0x82, 0xd5, 0x3e, 0x38, 0xc1, 0x89, 0xe2, + 0x19, 0x75, 0x6c, 0x19, 0x7e, 0xa6, 0xa4, 0x3f, 0x9e, 0x85, 0x6c, 0xc3, 0xc1, 0x2e, 0x15, 0x2a, + 0x31, 0x26, 0x0c, 0x0c, 0xf3, 0x54, 0xc4, 0x30, 0x3f, 0x80, 0xa2, 0x1d, 0x4c, 0x62, 0xef, 0x6f, + 0x7a, 0x88, 0x0d, 0x0b, 0xd7, 0x5f, 0x27, 0xaf, 0x54, 0x2e, 0x84, 0x53, 0xe9, 0x1b, 0xfe, 0x46, + 0x68, 0x07, 0x33, 0x74, 0x8d, 0xcb, 0x23, 0xd6, 0xe8, 0xb3, 0x84, 0xab, 0x30, 0xab, 0x63, 0x5f, + 0x35, 0x4c, 0x7e, 0x84, 0xf8, 0x53, 0x82, 0x85, 0x9c, 0x49, 0xb2, 0x90, 0x31, 0x9f, 0x34, 0xdb, + 0xe7, 0x93, 0x2e, 0x40, 0xce, 0x57, 0xdd, 0x36, 0xf6, 0xd9, 0x30, 0x3b, 0xd2, 0xc0, 0x48, 0x94, + 0xe1, 0x1c, 0x80, 0xe7, 0xab, 0xae, 0xcf, 0x6c, 0x14, 0xb0, 0x84, 0x8a, 0x52, 0xa8, 0x89, 0xff, + 0x1a, 0xf5, 0x5f, 0x6c, 0x90, 0x85, 0x64, 0x73, 0xd8, 0xd2, 0xc9, 0x90, 0x24, 0x8f, 0x34, 0x3d, + 0x39, 0x98, 0x6b, 0xd6, 0x76, 0x36, 0x12, 0xac, 0xce, 0x3c, 0x64, 0x36, 0x1a, 0x3b, 0x35, 0x66, + 0x6e, 0x2a, 0x77, 0x1b, 0xf2, 0x2e, 0x35, 0x37, 0xd2, 0x7f, 0x4c, 0x41, 0x86, 0xaa, 0x74, 0x19, + 0x4a, 0xbb, 0x9f, 0x36, 0x6b, 0x7d, 0x0b, 0x22, 0x28, 0x56, 0xe5, 0x5a, 0x65, 0xb7, 0xa6, 0x54, + 0xb7, 0xf6, 0x5a, 0xbb, 0x35, 0xb9, 0x24, 0x10, 0xda, 0x46, 0x6d, 0xab, 0x16, 0xa1, 0x4d, 0x11, + 0xda, 0x5e, 0x73, 0x53, 0xae, 0x6c, 0xd4, 0x94, 0xed, 0x0a, 0xa5, 0x4d, 0xa3, 0x45, 0x28, 0x04, + 0xb4, 0x9d, 0xc6, 0x46, 0xad, 0x55, 0xca, 0x10, 0x36, 0xb9, 0xd6, 0xac, 0xd4, 0xe5, 0x70, 0xea, + 0x0c, 0x9b, 0xba, 0x11, 0xdd, 0x62, 0x96, 0x80, 0xe1, 0xdb, 0x92, 0x99, 0x4a, 0xb3, 0xd1, 0xd8, + 0x2a, 0xcd, 0x11, 0x2a, 0xdf, 0xb8, 0x47, 0x9d, 0x47, 0x67, 0x41, 0x6c, 0xd5, 0x76, 0x7b, 0x24, + 0x65, 0xbb, 0xb2, 0x53, 0xd9, 0xac, 0x6d, 0xd7, 0x76, 0x76, 0x4b, 0x59, 0xb4, 0x02, 0x8b, 0x95, + 0xbd, 0xdd, 0x86, 0xc2, 0xb7, 0x65, 0x40, 0x80, 0x28, 0x90, 0x92, 0xe3, 0x00, 0x73, 0xa8, 0x08, + 0x40, 0x16, 0xdb, 0xaa, 0xdc, 0xad, 0x6d, 0xb5, 0x4a, 0x79, 0xb4, 0x04, 0x0b, 0xe4, 0x99, 0xc9, + 0xa4, 0x54, 0xf6, 0x76, 0xef, 0x97, 0x0a, 0x54, 0xfb, 0xb1, 0x1d, 0x5b, 0xf5, 0xcf, 0x6a, 0xa5, + 0x62, 0x48, 0xaf, 0xed, 0x3e, 0x6a, 0xc8, 0x0f, 0x95, 0x66, 0x63, 0xab, 0x5e, 0xfd, 0xb4, 0xb4, + 0x20, 0xfd, 0xbc, 0x00, 0xcb, 0x55, 0xea, 0xc4, 0xb9, 0x35, 0x97, 0xf1, 0x77, 0xba, 0xd8, 0xf3, + 0xc9, 0x51, 0x70, 0x5c, 0xfb, 0x73, 0xac, 0xf9, 0xc4, 0xfa, 0xb1, 0x0b, 0x94, 0xe5, 0x94, 0xba, + 0x9e, 0x78, 0x8b, 0xde, 0x85, 0x39, 0x1e, 0xba, 0xf0, 0x3a, 0xca, 0xd9, 0x61, 0x21, 0x80, 0x1c, + 0x30, 0x4b, 0x18, 0x16, 0x37, 0xb1, 0x3f, 0xfe, 0xfe, 0xb4, 0x3c, 0xc6, 0xf3, 0x14, 0x9d, 0x67, + 0xac, 0xd9, 0x20, 0x41, 0xa1, 0x69, 0xfe, 0x32, 0xf3, 0x55, 0x93, 0xde, 0x0a, 0xdd, 0x86, 0xd9, + 0x2e, 0xdd, 0x89, 0xa7, 0x88, 0xd2, 0x30, 0x45, 0x30, 0x4c, 0x32, 0x9f, 0x21, 0xfd, 0x93, 0x00, + 0x2b, 0x8c, 0x14, 0x66, 0x2e, 0x13, 0xc3, 0x79, 0x11, 0xf2, 0x31, 0x27, 0xc7, 0x7c, 0x35, 0x58, + 0x3d, 0xef, 0x76, 0x89, 0x73, 0x04, 0xb6, 0x97, 0x19, 0x1d, 0x9a, 0x05, 0x07, 0x6e, 0x3c, 0x5e, + 0x31, 0x9f, 0xed, 0xab, 0x98, 0x4b, 0xff, 0x2e, 0xc0, 0xb9, 0x16, 0xf6, 0x93, 0x7c, 0xd7, 0x4b, + 0x94, 0xeb, 0x01, 0xe4, 0xa2, 0x5e, 0x77, 0xe6, 0x94, 0x5e, 0x37, 0x3a, 0x59, 0xfa, 0xbe, 0x00, + 0x62, 0x0b, 0xfb, 0x5b, 0xb1, 0xf4, 0x7b, 0x72, 0xc2, 0x25, 0x14, 0x00, 0x32, 0x49, 0x05, 0x00, + 0xe9, 0x07, 0x02, 0xbc, 0xd2, 0xc2, 0xfe, 0x40, 0xe8, 0x34, 0x39, 0x68, 0xc9, 0x25, 0x87, 0x4c, + 0x4a, 0xc9, 0x41, 0xfa, 0xb1, 0x00, 0xab, 0x2d, 0xec, 0xc7, 0x82, 0xb2, 0x89, 0x61, 0x1b, 0xa8, + 0x4c, 0x64, 0x9e, 0xab, 0x32, 0x21, 0xfd, 0xa2, 0x00, 0x4b, 0xf4, 0x6d, 0xf3, 0xc0, 0x69, 0x72, + 0x88, 0x63, 0x55, 0x8a, 0x4c, 0x5f, 0x95, 0x42, 0xfa, 0x55, 0x01, 0x96, 0x98, 0x9d, 0x60, 0x11, + 0xd0, 0xe4, 0x70, 0x5c, 0x86, 0x62, 0x5f, 0x04, 0xc6, 0xde, 0x68, 0xa1, 0x13, 0x0b, 0xbd, 0xbe, + 0x9c, 0x82, 0x65, 0x72, 0xdc, 0x7a, 0x65, 0xab, 0x89, 0x21, 0xba, 0x0f, 0xb3, 0xaa, 0xe6, 0x07, + 0x48, 0x8a, 0x29, 0x05, 0x96, 0x24, 0x30, 0xeb, 0x15, 0x3a, 0x4f, 0xe6, 0xf3, 0xd1, 0x7b, 0xa1, + 0xa5, 0x3e, 0x61, 0x29, 0x2e, 0x30, 0xd3, 0x1f, 0xc1, 0x2c, 0x5b, 0x8a, 0xc4, 0x32, 0x7b, 0x3b, + 0x0f, 0x77, 0x1a, 0x8f, 0x76, 0x58, 0x82, 0x45, 0xfc, 0x6c, 0xb3, 0xd2, 0x6a, 0x3d, 0x6a, 0xc8, + 0x1b, 0x25, 0x81, 0x78, 0xf9, 0xcd, 0xda, 0x4e, 0x4d, 0x26, 0x11, 0x43, 0x48, 0x9e, 0x92, 0x0e, + 0x60, 0x79, 0x03, 0x9b, 0x78, 0xf2, 0xce, 0x48, 0xba, 0x0f, 0x4b, 0x5b, 0x86, 0x17, 0xf8, 0xd7, + 0x31, 0xce, 0xab, 0xd4, 0x85, 0xe5, 0xf8, 0x4a, 0x9e, 0x63, 0x5b, 0x1e, 0x46, 0xef, 0xc3, 0x3c, + 0xdf, 0xce, 0x13, 0x05, 0x5a, 0xf1, 0x1a, 0xee, 0xf9, 0x43, 0x6e, 0xf4, 0x2a, 0x14, 0x3a, 0x86, + 0xe7, 0x11, 0x6b, 0x41, 0x76, 0x60, 0x3d, 0x85, 0xac, 0x9c, 0xe7, 0xc4, 0xcf, 0x08, 0x4d, 0x3a, + 0x84, 0xa5, 0x4d, 0xec, 0x87, 0x11, 0xf3, 0x18, 0x9a, 0xba, 0x04, 0xf9, 0x5e, 0x9c, 0x1f, 0xea, + 0x2a, 0x17, 0xd2, 0xea, 0xba, 0xf4, 0x00, 0x56, 0x88, 0x8c, 0xe1, 0x6e, 0xe3, 0xe8, 0xcb, 0x82, + 0xd5, 0xaa, 0x6a, 0x69, 0xd8, 0x7c, 0x41, 0xd8, 0x9f, 0xc1, 0x6a, 0x3f, 0x76, 0xfe, 0x86, 0x3e, + 0x02, 0x08, 0x19, 0x83, 0x77, 0x74, 0x7e, 0x78, 0x62, 0x22, 0x47, 0x66, 0x9c, 0xec, 0x3d, 0x3d, + 0x84, 0xd5, 0x4d, 0xec, 0x13, 0xe3, 0x8e, 0xdd, 0x71, 0xad, 0xb9, 0xf4, 0x0b, 0x53, 0x90, 0x8f, + 0x2e, 0x85, 0xde, 0x85, 0x33, 0x3a, 0x7e, 0xac, 0x76, 0x4d, 0x7f, 0xa0, 0x40, 0xc6, 0x16, 0x5c, + 0xe1, 0xc3, 0x7d, 0x05, 0xb2, 0x75, 0x58, 0x3a, 0x52, 0x4d, 0x23, 0x5e, 0x95, 0x08, 0x3e, 0x37, + 0x58, 0xa4, 0x43, 0x91, 0xa2, 0x84, 0xc7, 0xf2, 0x79, 0xb6, 0x4f, 0x24, 0xac, 0xc9, 0x04, 0xf9, + 0x3c, 0x1d, 0xe9, 0xe5, 0xf3, 0xd7, 0x80, 0x2d, 0x11, 0xe1, 0xf5, 0xc4, 0x19, 0xba, 0xf6, 0x02, + 0x1d, 0x08, 0x59, 0x3d, 0x74, 0x13, 0x56, 0x18, 0x6f, 0xdc, 0x9a, 0xb2, 0x4f, 0x09, 0xb2, 0x32, + 0x83, 0x19, 0x4b, 0x67, 0x3d, 0xe9, 0x0f, 0x04, 0x58, 0x61, 0xf1, 0xf9, 0xe4, 0xa3, 0xc1, 0xdb, + 0x90, 0x0d, 0xa3, 0x26, 0xee, 0x1d, 0x47, 0x94, 0xd4, 0xe7, 0x83, 0x88, 0x4a, 0xfa, 0x65, 0x01, + 0x56, 0x98, 0x3d, 0xfb, 0x7f, 0x10, 0xb5, 0x12, 0xe3, 0x4a, 0x2e, 0x42, 0x00, 0x65, 0x72, 0x3e, + 0x5a, 0xfa, 0x25, 0x01, 0xd0, 0x66, 0x2f, 0xba, 0x7d, 0x99, 0x42, 0xff, 0x57, 0x06, 0xe6, 0x03, + 0x1c, 0x89, 0x95, 0x8f, 0xf7, 0x60, 0x96, 0x87, 0x3e, 0x53, 0x27, 0x6b, 0x5c, 0x71, 0xf6, 0x53, + 0x36, 0xc9, 0x86, 0x96, 0xc5, 0x45, 0x98, 0x0b, 0x6e, 0x2d, 0xab, 0x8c, 0x07, 0x8f, 0x69, 0xe5, + 0xd7, 0xc7, 0x69, 0xe5, 0xd7, 0x3b, 0x61, 0x9d, 0xa5, 0x4d, 0x63, 0x80, 0xd7, 0x86, 0x1e, 0xd5, + 0xd1, 0x05, 0xe7, 0x83, 0xa4, 0x72, 0x4a, 0x5f, 0x9a, 0x90, 0x19, 0x23, 0x4d, 0x40, 0x55, 0x80, + 0x8e, 0x6a, 0xa9, 0x6d, 0xdc, 0xc1, 0x96, 0xcf, 0xc3, 0x8d, 0x57, 0x53, 0x97, 0xda, 0x0e, 0x59, + 0xe5, 0xc8, 0x34, 0x92, 0xaf, 0x8f, 0x59, 0xe3, 0x5d, 0x05, 0xc4, 0x1f, 0x94, 0x47, 0xf5, 0xdd, + 0xfb, 0x0a, 0xab, 0xe8, 0x4e, 0xf7, 0xd7, 0x7e, 0x33, 0xb1, 0xda, 0xef, 0x4c, 0xaf, 0xf6, 0x3b, + 0x2b, 0xfd, 0xa1, 0x00, 0xc5, 0x38, 0x44, 0xe2, 0x9c, 0x88, 0xa8, 0x4a, 0xd7, 0x69, 0xbb, 0xaa, + 0x1e, 0x7c, 0x53, 0x42, 0xc5, 0xdf, 0x63, 0x24, 0x74, 0x81, 0xa9, 0x52, 0x71, 0xb1, 0xa3, 0x1a, + 0x2e, 0xef, 0x3d, 0x03, 0x21, 0xc9, 0x94, 0x82, 0x9a, 0xb0, 0xc0, 0xa7, 0x2b, 0xb6, 0x13, 0xd4, + 0x2a, 0xd3, 0x9b, 0x44, 0x95, 0xde, 0xda, 0x0d, 0xc6, 0x2e, 0x17, 0xbb, 0xb1, 0x67, 0xa9, 0x03, + 0x68, 0x90, 0x0b, 0x7d, 0x1d, 0xce, 0x44, 0xb1, 0x2a, 0x91, 0x8a, 0x17, 0xbb, 0x2d, 0xcb, 0x11, + 0xd8, 0xad, 0xb0, 0xf8, 0x35, 0xb2, 0x97, 0x2c, 0xfd, 0x8b, 0x00, 0x67, 0x23, 0x99, 0x6e, 0xe4, + 0x0d, 0xbe, 0xc4, 0x44, 0xf7, 0x2b, 0x39, 0x75, 0x5f, 0xb2, 0x44, 0x2d, 0x90, 0xac, 0x65, 0x3c, + 0xc1, 0x2f, 0x53, 0xa6, 0x73, 0xbc, 0xf9, 0xcb, 0xec, 0xd0, 0x0c, 0xb5, 0x43, 0x59, 0x2b, 0x30, + 0x40, 0xd2, 0x6f, 0x08, 0x70, 0x5e, 0xb6, 0x4d, 0x73, 0x5f, 0xd5, 0x0e, 0x03, 0xc8, 0xfc, 0x75, + 0xbe, 0x4c, 0xfb, 0xbc, 0xc7, 0x22, 0xcb, 0x88, 0x53, 0xe2, 0xc1, 0x59, 0xbc, 0x97, 0x2d, 0x9c, + 0xae, 0x97, 0x2d, 0x3d, 0x85, 0xa5, 0xa4, 0xda, 0x7f, 0xfa, 0x57, 0x3c, 0xaf, 0x41, 0xb1, 0x63, + 0x58, 0x51, 0x4b, 0xce, 0x3e, 0xce, 0xcc, 0x77, 0x0c, 0xab, 0x67, 0xc5, 0x09, 0x97, 0xfa, 0xc5, + 0xa0, 0xbd, 0xcf, 0x77, 0xd4, 0x2f, 0x42, 0x2e, 0xe9, 0xaf, 0xa7, 0xa0, 0x44, 0x92, 0x61, 0xda, + 0xb0, 0x9a, 0x9c, 0x72, 0xf7, 0x07, 0x3b, 0xeb, 0xec, 0x33, 0xcf, 0x0f, 0xd2, 0x12, 0xbf, 0x18, + 0xa2, 0xe7, 0x6f, 0xb1, 0xcf, 0x24, 0xb7, 0xd8, 0xbf, 0x8a, 0x66, 0xdd, 0xf7, 0x04, 0x9a, 0x2e, + 0x47, 0x3a, 0xf0, 0x13, 0x53, 0x5f, 0xe4, 0x2c, 0x64, 0xe2, 0x5f, 0x21, 0x7d, 0x0e, 0xab, 0xd4, + 0xc2, 0xd5, 0x9b, 0x32, 0xff, 0x1e, 0x78, 0x72, 0xa1, 0x52, 0x07, 0xbe, 0x56, 0xb5, 0x3b, 0x0e, + 0x89, 0x11, 0x5f, 0xc4, 0x76, 0x87, 0xb0, 0x38, 0xf0, 0x75, 0x2b, 0x79, 0xc9, 0x91, 0xef, 0x5b, + 0xf9, 0xc1, 0x26, 0xbb, 0x4d, 0xcb, 0x25, 0x35, 0xca, 0x4d, 0xae, 0xc0, 0x55, 0x88, 0xd2, 0x58, + 0x1e, 0xc0, 0x00, 0x2c, 0x44, 0xe8, 0xb4, 0xc8, 0xf9, 0x13, 0x01, 0xce, 0x10, 0x03, 0x19, 0xfb, + 0x12, 0x62, 0x62, 0xef, 0x73, 0xf0, 0xf3, 0x8c, 0xcc, 0x73, 0x7e, 0x9e, 0x71, 0xf3, 0xb7, 0xaf, + 0x42, 0x91, 0x27, 0x4c, 0xcc, 0xf6, 0xbb, 0xe8, 0x77, 0x04, 0xc8, 0x47, 0xd3, 0x7c, 0x94, 0x1c, + 0xef, 0x24, 0xd4, 0x14, 0xd6, 0xae, 0x9e, 0x80, 0x93, 0x19, 0x3d, 0xe9, 0xbd, 0xef, 0xfd, 0xf3, + 0x7f, 0xfe, 0xfa, 0xd4, 0x0d, 0x54, 0x2e, 0x1f, 0xdd, 0x28, 0x73, 0x6d, 0x78, 0xe5, 0xa7, 0x3d, + 0x4d, 0x3d, 0x2b, 0xd3, 0x4c, 0xb3, 0xfc, 0x94, 0xfc, 0xf3, 0xac, 0x1c, 0x96, 0x0c, 0x7e, 0x4b, + 0x00, 0xe8, 0xb5, 0x0b, 0x50, 0xf2, 0x17, 0xa7, 0x03, 0xfd, 0x84, 0xb5, 0xa1, 0x15, 0x09, 0x69, + 0x83, 0xa2, 0xf9, 0x08, 0xdd, 0x39, 0x25, 0x9a, 0xf2, 0xd3, 0xde, 0x7b, 0x7a, 0x86, 0x7e, 0x53, + 0x80, 0x42, 0xac, 0x99, 0x82, 0x92, 0x15, 0x92, 0xd4, 0x70, 0x59, 0x1b, 0x91, 0x8e, 0x4b, 0xb7, + 0x29, 0xc4, 0x5b, 0xd2, 0x69, 0x15, 0x76, 0x5b, 0xb8, 0x86, 0x7e, 0x5f, 0x80, 0x42, 0xac, 0xf5, + 0x91, 0x02, 0x2c, 0xa9, 0x3d, 0x32, 0x12, 0xd8, 0x26, 0x05, 0x56, 0x59, 0x1b, 0x4b, 0x77, 0x04, + 0xe5, 0x3f, 0x08, 0x50, 0x8c, 0x77, 0x3e, 0xd0, 0xb5, 0x21, 0x30, 0xfb, 0x72, 0xae, 0x91, 0x38, + 0xdb, 0x14, 0xa7, 0x2a, 0xfd, 0xec, 0x38, 0x38, 0xcb, 0xa1, 0xe3, 0x2d, 0x3f, 0x8d, 0xfa, 0xfb, + 0x67, 0x65, 0x56, 0x17, 0x24, 0x72, 0xfc, 0x5b, 0x3c, 0x5a, 0x8a, 0x7a, 0xe5, 0x9b, 0x69, 0xfe, + 0x2a, 0xbd, 0x3d, 0x32, 0x52, 0x2e, 0x93, 0xca, 0xf5, 0x58, 0x52, 0x27, 0x23, 0x57, 0x24, 0x85, + 0x21, 0xc2, 0xfd, 0x85, 0x00, 0x8b, 0x03, 0xcd, 0x0e, 0xf4, 0x56, 0xaa, 0x1f, 0x4e, 0x6a, 0x8a, + 0x8c, 0x14, 0xa9, 0x41, 0x45, 0xaa, 0x4b, 0x1b, 0x63, 0x89, 0xc4, 0xdb, 0x21, 0x04, 0xf5, 0xdf, + 0x31, 0x67, 0x3b, 0xf8, 0x15, 0x49, 0x7a, 0xe5, 0x38, 0xa5, 0x6b, 0x32, 0x12, 0xbb, 0x4c, 0xb1, + 0x6f, 0x49, 0x9b, 0x63, 0x61, 0xef, 0x35, 0x4b, 0x08, 0xfc, 0x3f, 0x11, 0x60, 0xa1, 0xaf, 0x51, + 0x82, 0xae, 0xa7, 0x21, 0x4f, 0x68, 0xa7, 0x8c, 0x04, 0xbd, 0x43, 0x41, 0xdf, 0x97, 0xaa, 0x63, + 0x81, 0x66, 0x7d, 0x12, 0x02, 0xf8, 0x4b, 0x01, 0xf2, 0xd1, 0x26, 0x49, 0x8a, 0x0f, 0x49, 0xe8, + 0xa3, 0x8c, 0x84, 0xfa, 0x4d, 0x0a, 0xf5, 0xa1, 0x74, 0x6f, 0xcc, 0xb3, 0xc1, 0xb7, 0x25, 0x68, + 0xff, 0x48, 0x80, 0x7c, 0xb4, 0x95, 0x92, 0x82, 0x36, 0xa1, 0xdb, 0xf2, 0x82, 0x14, 0xcb, 0x0a, + 0x86, 0x04, 0xea, 0x9f, 0x0b, 0x50, 0x88, 0xf5, 0x35, 0x52, 0x2c, 0x79, 0x52, 0xef, 0x63, 0x24, + 0xd8, 0x3d, 0x0a, 0xb6, 0x21, 0x3d, 0x18, 0xcb, 0x92, 0x7b, 0xd1, 0xad, 0x09, 0xe6, 0xdf, 0x15, + 0xa0, 0x10, 0xeb, 0x75, 0xa4, 0x60, 0x4e, 0xea, 0x87, 0x8c, 0xc4, 0xcc, 0x3d, 0xf7, 0xb5, 0xf1, + 0x3c, 0xf7, 0x0f, 0x05, 0x28, 0xc6, 0x4b, 0xe7, 0x29, 0xae, 0x27, 0xb1, 0x37, 0xb0, 0x76, 0xfd, + 0x44, 0xbc, 0x3c, 0xf2, 0xf9, 0x80, 0x22, 0x7e, 0x07, 0xdd, 0x38, 0x21, 0xe2, 0x48, 0x19, 0xfe, + 0xf7, 0x04, 0xc8, 0x47, 0x5b, 0x21, 0x29, 0x07, 0x35, 0xa1, 0x5b, 0x32, 0x52, 0x8f, 0xf7, 0x29, + 0xaa, 0xbb, 0xe8, 0xe3, 0x53, 0xa3, 0x2a, 0x3f, 0x8d, 0xf6, 0x25, 0x9e, 0xa1, 0x1f, 0x09, 0xb0, + 0xd0, 0xd7, 0xf6, 0x48, 0x31, 0x56, 0xc9, 0xcd, 0x91, 0xb5, 0xd5, 0x80, 0x39, 0xf8, 0x5b, 0xc4, + 0xf5, 0x5a, 0xc7, 0xf1, 0x8f, 0x4f, 0x6d, 0x59, 0x53, 0x21, 0xde, 0xd6, 0xe8, 0xc6, 0xe4, 0x6c, + 0xfe, 0x40, 0x80, 0x85, 0xbe, 0xa6, 0x45, 0x0a, 0xd8, 0xe4, 0xd6, 0xc6, 0xda, 0xa5, 0x94, 0xeb, + 0xd7, 0xe3, 0x94, 0x3e, 0xa4, 0xb8, 0xbf, 0x8e, 0xde, 0x39, 0x21, 0x6e, 0x8f, 0x4e, 0xe6, 0xb5, + 0xd7, 0x9f, 0x08, 0x50, 0x88, 0x95, 0x0d, 0x50, 0x7a, 0x90, 0xdd, 0x5f, 0xef, 0x5e, 0xbb, 0x76, + 0x12, 0x56, 0x7e, 0x2c, 0xb9, 0xa5, 0x42, 0xf7, 0xbe, 0x9a, 0x30, 0x02, 0xfd, 0xa5, 0x00, 0xb9, + 0x48, 0x65, 0x1c, 0xbd, 0x91, 0xa6, 0xd5, 0xfe, 0x38, 0x6e, 0x78, 0xe9, 0x43, 0xfa, 0x16, 0xc5, + 0xf9, 0x08, 0xed, 0x4d, 0x24, 0xdc, 0x41, 0x7f, 0x2a, 0x40, 0x31, 0xde, 0x70, 0x49, 0xb1, 0x04, + 0x89, 0x5d, 0x99, 0x17, 0xe4, 0xbd, 0x42, 0xf4, 0xe4, 0x08, 0xff, 0xad, 0x00, 0xc5, 0x78, 0xeb, + 0x25, 0x05, 0x71, 0x62, 0x7f, 0x66, 0x24, 0x62, 0xae, 0xef, 0x6b, 0x13, 0xd2, 0xf7, 0xbf, 0x0a, + 0x70, 0x26, 0xa5, 0x58, 0x87, 0x92, 0x3f, 0x2f, 0x1d, 0x5e, 0xda, 0x1b, 0x29, 0x8f, 0x41, 0xe5, + 0xd1, 0xa4, 0x6f, 0x4f, 0x44, 0x9e, 0xdb, 0x2e, 0x47, 0xc7, 0x13, 0x81, 0x95, 0xc4, 0x82, 0x30, + 0xba, 0x31, 0x2a, 0x0f, 0x18, 0x28, 0x1e, 0x8f, 0x94, 0xcb, 0xa2, 0x72, 0x1d, 0x48, 0xda, 0x64, + 0xd2, 0x00, 0xea, 0xd5, 0x03, 0x4c, 0x44, 0xb8, 0x1f, 0x09, 0x90, 0x0d, 0x0b, 0x6d, 0xe8, 0xf2, + 0x89, 0x0a, 0x71, 0x23, 0x85, 0xf8, 0x84, 0x0a, 0xd1, 0x94, 0x1e, 0x8e, 0x25, 0x44, 0xbc, 0xb2, + 0xc7, 0x03, 0xe8, 0x42, 0xac, 0xd8, 0x96, 0x1e, 0x36, 0x0d, 0x14, 0xe4, 0x5e, 0x50, 0xc4, 0xdf, + 0xfb, 0xf3, 0x1b, 0x02, 0xf8, 0xaf, 0x48, 0xc4, 0x1f, 0xaf, 0xcc, 0xa5, 0x45, 0xfc, 0x89, 0xf5, + 0xbb, 0x91, 0xa0, 0x1f, 0x51, 0xd0, 0xdf, 0x94, 0xb6, 0xc6, 0x8b, 0xf5, 0xe8, 0xe6, 0x4e, 0xb0, + 0x39, 0x41, 0xfe, 0xf7, 0x02, 0xa0, 0xc1, 0x3a, 0x1f, 0x5a, 0x4f, 0x36, 0xa2, 0x69, 0x05, 0xc1, + 0x91, 0xf8, 0x3f, 0xa3, 0xf8, 0x77, 0xa5, 0xc6, 0x58, 0xf8, 0xb5, 0x60, 0xff, 0x98, 0x08, 0xff, + 0xc8, 0xd2, 0xad, 0x68, 0xbb, 0x23, 0x3d, 0xdd, 0x4a, 0x68, 0x8a, 0x8c, 0x04, 0x7f, 0x40, 0xc1, + 0xef, 0x4b, 0xdf, 0x9a, 0xd8, 0x5d, 0x25, 0x68, 0x88, 0x28, 0x7f, 0x23, 0xd0, 0x02, 0x7d, 0xfc, + 0xcf, 0x5b, 0xdf, 0x4c, 0x95, 0x25, 0xa1, 0x7e, 0x39, 0x52, 0x98, 0x9f, 0xa6, 0xc2, 0xc8, 0xd2, + 0xf6, 0xb8, 0x59, 0x43, 0x6c, 0xf7, 0xdb, 0xc2, 0xb5, 0xbb, 0x16, 0x9c, 0xd1, 0xec, 0x4e, 0xd2, + 0xf6, 0x77, 0x97, 0x78, 0x9a, 0xc0, 0x93, 0xf3, 0x26, 0x89, 0x14, 0x9b, 0xc2, 0x67, 0x77, 0x38, + 0x6f, 0xdb, 0x36, 0x55, 0xab, 0xbd, 0x6e, 0xbb, 0xed, 0x72, 0x1b, 0x5b, 0x34, 0x8e, 0x2c, 0xb3, + 0x21, 0xd5, 0x31, 0xbc, 0xd8, 0x7f, 0xa1, 0xf1, 0x61, 0xf8, 0xb0, 0x3f, 0x4b, 0x19, 0xdf, 0xf9, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0e, 0xd3, 0x30, 0x4e, 0x6a, 0x43, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/admin/v1beta1/datastore_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/admin/v1beta1/datastore_admin.pb.go new file mode 100644 index 000000000..e9f3136de --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/admin/v1beta1/datastore_admin.pb.go @@ -0,0 +1,732 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/admin/v1beta1/datastore_admin.proto + +/* +Package admin is a generated protocol buffer package. + +It is generated from these files: + google/datastore/admin/v1beta1/datastore_admin.proto + +It has these top-level messages: + CommonMetadata + Progress + ExportEntitiesRequest + ImportEntitiesRequest + ExportEntitiesResponse + ExportEntitiesMetadata + ImportEntitiesMetadata + EntityFilter +*/ +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Operation types. +type OperationType int32 + +const ( + // Unspecified. + OperationType_OPERATION_TYPE_UNSPECIFIED OperationType = 0 + // ExportEntities. + OperationType_EXPORT_ENTITIES OperationType = 1 + // ImportEntities. + OperationType_IMPORT_ENTITIES OperationType = 2 +) + +var OperationType_name = map[int32]string{ + 0: "OPERATION_TYPE_UNSPECIFIED", + 1: "EXPORT_ENTITIES", + 2: "IMPORT_ENTITIES", +} +var OperationType_value = map[string]int32{ + "OPERATION_TYPE_UNSPECIFIED": 0, + "EXPORT_ENTITIES": 1, + "IMPORT_ENTITIES": 2, +} + +func (x OperationType) String() string { + return proto.EnumName(OperationType_name, int32(x)) +} +func (OperationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The various possible states for an ongoing Operation. +type CommonMetadata_State int32 + +const ( + // Unspecified. + CommonMetadata_STATE_UNSPECIFIED CommonMetadata_State = 0 + // Request is being prepared for processing. + CommonMetadata_INITIALIZING CommonMetadata_State = 1 + // Request is actively being processed. + CommonMetadata_PROCESSING CommonMetadata_State = 2 + // Request is in the process of being cancelled after user called + // google.longrunning.Operations.CancelOperation on the operation. + CommonMetadata_CANCELLING CommonMetadata_State = 3 + // Request has been processed and is in its finalization stage. + CommonMetadata_FINALIZING CommonMetadata_State = 4 + // Request has completed successfully. + CommonMetadata_SUCCESSFUL CommonMetadata_State = 5 + // Request has finished being processed, but encountered an error. + CommonMetadata_FAILED CommonMetadata_State = 6 + // Request has finished being cancelled after user called + // google.longrunning.Operations.CancelOperation. + CommonMetadata_CANCELLED CommonMetadata_State = 7 +) + +var CommonMetadata_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "INITIALIZING", + 2: "PROCESSING", + 3: "CANCELLING", + 4: "FINALIZING", + 5: "SUCCESSFUL", + 6: "FAILED", + 7: "CANCELLED", +} +var CommonMetadata_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "INITIALIZING": 1, + "PROCESSING": 2, + "CANCELLING": 3, + "FINALIZING": 4, + "SUCCESSFUL": 5, + "FAILED": 6, + "CANCELLED": 7, +} + +func (x CommonMetadata_State) String() string { + return proto.EnumName(CommonMetadata_State_name, int32(x)) +} +func (CommonMetadata_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// Metadata common to all Datastore Admin operations. +type CommonMetadata struct { + // The time that work began on the operation. + StartTime *google_protobuf3.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time the operation ended, either successfully or otherwise. + EndTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // The type of the operation. Can be used as a filter in + // ListOperationsRequest. + OperationType OperationType `protobuf:"varint,3,opt,name=operation_type,json=operationType,enum=google.datastore.admin.v1beta1.OperationType" json:"operation_type,omitempty"` + // The client-assigned labels which were provided when the operation was + // created. May also include additional labels. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The current state of the Operation. + State CommonMetadata_State `protobuf:"varint,5,opt,name=state,enum=google.datastore.admin.v1beta1.CommonMetadata_State" json:"state,omitempty"` +} + +func (m *CommonMetadata) Reset() { *m = CommonMetadata{} } +func (m *CommonMetadata) String() string { return proto.CompactTextString(m) } +func (*CommonMetadata) ProtoMessage() {} +func (*CommonMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CommonMetadata) GetStartTime() *google_protobuf3.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *CommonMetadata) GetEndTime() *google_protobuf3.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *CommonMetadata) GetOperationType() OperationType { + if m != nil { + return m.OperationType + } + return OperationType_OPERATION_TYPE_UNSPECIFIED +} + +func (m *CommonMetadata) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *CommonMetadata) GetState() CommonMetadata_State { + if m != nil { + return m.State + } + return CommonMetadata_STATE_UNSPECIFIED +} + +// Measures the progress of a particular metric. +type Progress struct { + // The amount of work that has been completed. Note that this may be greater + // than work_estimated. + WorkCompleted int64 `protobuf:"varint,1,opt,name=work_completed,json=workCompleted" json:"work_completed,omitempty"` + // An estimate of how much work needs to be performed. May be zero if the + // work estimate is unavailable. + WorkEstimated int64 `protobuf:"varint,2,opt,name=work_estimated,json=workEstimated" json:"work_estimated,omitempty"` +} + +func (m *Progress) Reset() { *m = Progress{} } +func (m *Progress) String() string { return proto.CompactTextString(m) } +func (*Progress) ProtoMessage() {} +func (*Progress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Progress) GetWorkCompleted() int64 { + if m != nil { + return m.WorkCompleted + } + return 0 +} + +func (m *Progress) GetWorkEstimated() int64 { + if m != nil { + return m.WorkEstimated + } + return 0 +} + +// The request for +// [google.datastore.admin.v1beta1.DatastoreAdmin.ExportEntities][google.datastore.admin.v1beta1.DatastoreAdmin.ExportEntities]. +type ExportEntitiesRequest struct { + // Project ID against which to make the request. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Client-assigned labels. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Description of what data from the project is included in the export. + EntityFilter *EntityFilter `protobuf:"bytes,3,opt,name=entity_filter,json=entityFilter" json:"entity_filter,omitempty"` + // Location for the export metadata and data files. + // + // The full resource URL of the external storage location. Currently, only + // Google Cloud Storage is supported. So output_url_prefix should be of the + // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the + // name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional Cloud + // Storage namespace path (this is not a Cloud Datastore namespace). For more + // information about Cloud Storage namespace paths, see + // [Object name + // considerations](https://cloud.google.com/storage/docs/naming#object-considerations). + // + // The resulting files will be nested deeper than the specified URL prefix. + // The final output URL will be provided in the + // [google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url][google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url] + // field. That value should be used for subsequent ImportEntities operations. + // + // By nesting the data files deeper, the same Cloud Storage bucket can be used + // in multiple ExportEntities operations without conflict. + OutputUrlPrefix string `protobuf:"bytes,4,opt,name=output_url_prefix,json=outputUrlPrefix" json:"output_url_prefix,omitempty"` +} + +func (m *ExportEntitiesRequest) Reset() { *m = ExportEntitiesRequest{} } +func (m *ExportEntitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesRequest) ProtoMessage() {} +func (*ExportEntitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExportEntitiesRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ExportEntitiesRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ExportEntitiesRequest) GetEntityFilter() *EntityFilter { + if m != nil { + return m.EntityFilter + } + return nil +} + +func (m *ExportEntitiesRequest) GetOutputUrlPrefix() string { + if m != nil { + return m.OutputUrlPrefix + } + return "" +} + +// The request for +// [google.datastore.admin.v1beta1.DatastoreAdmin.ImportEntities][google.datastore.admin.v1beta1.DatastoreAdmin.ImportEntities]. +type ImportEntitiesRequest struct { + // Project ID against which to make the request. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Client-assigned labels. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The full resource URL of the external storage location. Currently, only + // Google Cloud Storage is supported. So input_url should be of the form: + // `gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE`, where + // `BUCKET_NAME` is the name of the Cloud Storage bucket, `NAMESPACE_PATH` is + // an optional Cloud Storage namespace path (this is not a Cloud Datastore + // namespace), and `OVERALL_EXPORT_METADATA_FILE` is the metadata file written + // by the ExportEntities operation. For more information about Cloud Storage + // namespace paths, see + // [Object name + // considerations](https://cloud.google.com/storage/docs/naming#object-considerations). + // + // For more information, see + // [google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url][google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url]. + InputUrl string `protobuf:"bytes,3,opt,name=input_url,json=inputUrl" json:"input_url,omitempty"` + // Optionally specify which kinds/namespaces are to be imported. If provided, + // the list must be a subset of the EntityFilter used in creating the export, + // otherwise a FAILED_PRECONDITION error will be returned. If no filter is + // specified then all entities from the export are imported. + EntityFilter *EntityFilter `protobuf:"bytes,4,opt,name=entity_filter,json=entityFilter" json:"entity_filter,omitempty"` +} + +func (m *ImportEntitiesRequest) Reset() { *m = ImportEntitiesRequest{} } +func (m *ImportEntitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ImportEntitiesRequest) ProtoMessage() {} +func (*ImportEntitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ImportEntitiesRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ImportEntitiesRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ImportEntitiesRequest) GetInputUrl() string { + if m != nil { + return m.InputUrl + } + return "" +} + +func (m *ImportEntitiesRequest) GetEntityFilter() *EntityFilter { + if m != nil { + return m.EntityFilter + } + return nil +} + +// The response for +// [google.datastore.admin.v1beta1.DatastoreAdmin.ExportEntities][google.datastore.admin.v1beta1.DatastoreAdmin.ExportEntities]. +type ExportEntitiesResponse struct { + // Location of the output metadata file. This can be used to begin an import + // into Cloud Datastore (this project or another project). See + // [google.datastore.admin.v1beta1.ImportEntitiesRequest.input_url][google.datastore.admin.v1beta1.ImportEntitiesRequest.input_url]. + // Only present if the operation completed successfully. + OutputUrl string `protobuf:"bytes,1,opt,name=output_url,json=outputUrl" json:"output_url,omitempty"` +} + +func (m *ExportEntitiesResponse) Reset() { *m = ExportEntitiesResponse{} } +func (m *ExportEntitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesResponse) ProtoMessage() {} +func (*ExportEntitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExportEntitiesResponse) GetOutputUrl() string { + if m != nil { + return m.OutputUrl + } + return "" +} + +// Metadata for ExportEntities operations. +type ExportEntitiesMetadata struct { + // Metadata common to all Datastore Admin operations. + Common *CommonMetadata `protobuf:"bytes,1,opt,name=common" json:"common,omitempty"` + // An estimate of the number of entities processed. + ProgressEntities *Progress `protobuf:"bytes,2,opt,name=progress_entities,json=progressEntities" json:"progress_entities,omitempty"` + // An estimate of the number of bytes processed. + ProgressBytes *Progress `protobuf:"bytes,3,opt,name=progress_bytes,json=progressBytes" json:"progress_bytes,omitempty"` + // Description of which entities are being exported. + EntityFilter *EntityFilter `protobuf:"bytes,4,opt,name=entity_filter,json=entityFilter" json:"entity_filter,omitempty"` + // Location for the export metadata and data files. This will be the same + // value as the + // [google.datastore.admin.v1beta1.ExportEntitiesRequest.output_url_prefix][google.datastore.admin.v1beta1.ExportEntitiesRequest.output_url_prefix] + // field. The final output location is provided in + // [google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url][google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url]. + OutputUrlPrefix string `protobuf:"bytes,5,opt,name=output_url_prefix,json=outputUrlPrefix" json:"output_url_prefix,omitempty"` +} + +func (m *ExportEntitiesMetadata) Reset() { *m = ExportEntitiesMetadata{} } +func (m *ExportEntitiesMetadata) String() string { return proto.CompactTextString(m) } +func (*ExportEntitiesMetadata) ProtoMessage() {} +func (*ExportEntitiesMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ExportEntitiesMetadata) GetCommon() *CommonMetadata { + if m != nil { + return m.Common + } + return nil +} + +func (m *ExportEntitiesMetadata) GetProgressEntities() *Progress { + if m != nil { + return m.ProgressEntities + } + return nil +} + +func (m *ExportEntitiesMetadata) GetProgressBytes() *Progress { + if m != nil { + return m.ProgressBytes + } + return nil +} + +func (m *ExportEntitiesMetadata) GetEntityFilter() *EntityFilter { + if m != nil { + return m.EntityFilter + } + return nil +} + +func (m *ExportEntitiesMetadata) GetOutputUrlPrefix() string { + if m != nil { + return m.OutputUrlPrefix + } + return "" +} + +// Metadata for ImportEntities operations. +type ImportEntitiesMetadata struct { + // Metadata common to all Datastore Admin operations. + Common *CommonMetadata `protobuf:"bytes,1,opt,name=common" json:"common,omitempty"` + // An estimate of the number of entities processed. + ProgressEntities *Progress `protobuf:"bytes,2,opt,name=progress_entities,json=progressEntities" json:"progress_entities,omitempty"` + // An estimate of the number of bytes processed. + ProgressBytes *Progress `protobuf:"bytes,3,opt,name=progress_bytes,json=progressBytes" json:"progress_bytes,omitempty"` + // Description of which entities are being imported. + EntityFilter *EntityFilter `protobuf:"bytes,4,opt,name=entity_filter,json=entityFilter" json:"entity_filter,omitempty"` + // The location of the import metadata file. This will be the same value as + // the [google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url][google.datastore.admin.v1beta1.ExportEntitiesResponse.output_url] + // field. + InputUrl string `protobuf:"bytes,5,opt,name=input_url,json=inputUrl" json:"input_url,omitempty"` +} + +func (m *ImportEntitiesMetadata) Reset() { *m = ImportEntitiesMetadata{} } +func (m *ImportEntitiesMetadata) String() string { return proto.CompactTextString(m) } +func (*ImportEntitiesMetadata) ProtoMessage() {} +func (*ImportEntitiesMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ImportEntitiesMetadata) GetCommon() *CommonMetadata { + if m != nil { + return m.Common + } + return nil +} + +func (m *ImportEntitiesMetadata) GetProgressEntities() *Progress { + if m != nil { + return m.ProgressEntities + } + return nil +} + +func (m *ImportEntitiesMetadata) GetProgressBytes() *Progress { + if m != nil { + return m.ProgressBytes + } + return nil +} + +func (m *ImportEntitiesMetadata) GetEntityFilter() *EntityFilter { + if m != nil { + return m.EntityFilter + } + return nil +} + +func (m *ImportEntitiesMetadata) GetInputUrl() string { + if m != nil { + return m.InputUrl + } + return "" +} + +// Identifies a subset of entities in a project. This is specified as +// combinations of kinds and namespaces (either or both of which may be all, as +// described in the following examples). +// Example usage: +// +// Entire project: +// kinds=[], namespace_ids=[] +// +// Kinds Foo and Bar in all namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=[] +// +// Kinds Foo and Bar only in the default namespace: +// kinds=['Foo', 'Bar'], namespace_ids=[''] +// +// Kinds Foo and Bar in both the default and Baz namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz'] +// +// The entire Baz namespace: +// kinds=[], namespace_ids=['Baz'] +type EntityFilter struct { + // If empty, then this represents all kinds. + Kinds []string `protobuf:"bytes,1,rep,name=kinds" json:"kinds,omitempty"` + // An empty list represents all namespaces. This is the preferred + // usage for projects that don't use namespaces. + // + // An empty string element represents the default namespace. This should be + // used if the project has data in non-default namespaces, but doesn't want to + // include them. + // Each namespace in this list must be unique. + NamespaceIds []string `protobuf:"bytes,2,rep,name=namespace_ids,json=namespaceIds" json:"namespace_ids,omitempty"` +} + +func (m *EntityFilter) Reset() { *m = EntityFilter{} } +func (m *EntityFilter) String() string { return proto.CompactTextString(m) } +func (*EntityFilter) ProtoMessage() {} +func (*EntityFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EntityFilter) GetKinds() []string { + if m != nil { + return m.Kinds + } + return nil +} + +func (m *EntityFilter) GetNamespaceIds() []string { + if m != nil { + return m.NamespaceIds + } + return nil +} + +func init() { + proto.RegisterType((*CommonMetadata)(nil), "google.datastore.admin.v1beta1.CommonMetadata") + proto.RegisterType((*Progress)(nil), "google.datastore.admin.v1beta1.Progress") + proto.RegisterType((*ExportEntitiesRequest)(nil), "google.datastore.admin.v1beta1.ExportEntitiesRequest") + proto.RegisterType((*ImportEntitiesRequest)(nil), "google.datastore.admin.v1beta1.ImportEntitiesRequest") + proto.RegisterType((*ExportEntitiesResponse)(nil), "google.datastore.admin.v1beta1.ExportEntitiesResponse") + proto.RegisterType((*ExportEntitiesMetadata)(nil), "google.datastore.admin.v1beta1.ExportEntitiesMetadata") + proto.RegisterType((*ImportEntitiesMetadata)(nil), "google.datastore.admin.v1beta1.ImportEntitiesMetadata") + proto.RegisterType((*EntityFilter)(nil), "google.datastore.admin.v1beta1.EntityFilter") + proto.RegisterEnum("google.datastore.admin.v1beta1.OperationType", OperationType_name, OperationType_value) + proto.RegisterEnum("google.datastore.admin.v1beta1.CommonMetadata_State", CommonMetadata_State_name, CommonMetadata_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DatastoreAdmin service + +type DatastoreAdminClient interface { + // Exports a copy of all or a subset of entities from Google Cloud Datastore + // to another storage system, such as Google Cloud Storage. Recent updates to + // entities may not be reflected in the export. The export occurs in the + // background and its progress can be monitored and managed via the + // Operation resource that is created. The output of an export may only be + // used once the associated operation is done. If an export operation is + // cancelled before completion it may leave partial data behind in Google + // Cloud Storage. + ExportEntities(ctx context.Context, in *ExportEntitiesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Imports entities into Google Cloud Datastore. Existing entities with the + // same key are overwritten. The import occurs in the background and its + // progress can be monitored and managed via the Operation resource that is + // created. If an ImportEntities operation is cancelled, it is possible + // that a subset of the data has already been imported to Cloud Datastore. + ImportEntities(ctx context.Context, in *ImportEntitiesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type datastoreAdminClient struct { + cc *grpc.ClientConn +} + +func NewDatastoreAdminClient(cc *grpc.ClientConn) DatastoreAdminClient { + return &datastoreAdminClient{cc} +} + +func (c *datastoreAdminClient) ExportEntities(ctx context.Context, in *ExportEntitiesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.datastore.admin.v1beta1.DatastoreAdmin/ExportEntities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreAdminClient) ImportEntities(ctx context.Context, in *ImportEntitiesRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.datastore.admin.v1beta1.DatastoreAdmin/ImportEntities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DatastoreAdmin service + +type DatastoreAdminServer interface { + // Exports a copy of all or a subset of entities from Google Cloud Datastore + // to another storage system, such as Google Cloud Storage. Recent updates to + // entities may not be reflected in the export. The export occurs in the + // background and its progress can be monitored and managed via the + // Operation resource that is created. The output of an export may only be + // used once the associated operation is done. If an export operation is + // cancelled before completion it may leave partial data behind in Google + // Cloud Storage. + ExportEntities(context.Context, *ExportEntitiesRequest) (*google_longrunning.Operation, error) + // Imports entities into Google Cloud Datastore. Existing entities with the + // same key are overwritten. The import occurs in the background and its + // progress can be monitored and managed via the Operation resource that is + // created. If an ImportEntities operation is cancelled, it is possible + // that a subset of the data has already been imported to Cloud Datastore. + ImportEntities(context.Context, *ImportEntitiesRequest) (*google_longrunning.Operation, error) +} + +func RegisterDatastoreAdminServer(s *grpc.Server, srv DatastoreAdminServer) { + s.RegisterService(&_DatastoreAdmin_serviceDesc, srv) +} + +func _DatastoreAdmin_ExportEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportEntitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreAdminServer).ExportEntities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.admin.v1beta1.DatastoreAdmin/ExportEntities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreAdminServer).ExportEntities(ctx, req.(*ExportEntitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatastoreAdmin_ImportEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportEntitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreAdminServer).ImportEntities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.admin.v1beta1.DatastoreAdmin/ImportEntities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreAdminServer).ImportEntities(ctx, req.(*ImportEntitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatastoreAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.datastore.admin.v1beta1.DatastoreAdmin", + HandlerType: (*DatastoreAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ExportEntities", + Handler: _DatastoreAdmin_ExportEntities_Handler, + }, + { + MethodName: "ImportEntities", + Handler: _DatastoreAdmin_ImportEntities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/datastore/admin/v1beta1/datastore_admin.proto", +} + +func init() { + proto.RegisterFile("google/datastore/admin/v1beta1/datastore_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 996 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x8f, 0xdb, 0x44, + 0x14, 0xc6, 0xce, 0x26, 0x6d, 0xde, 0x6e, 0xd2, 0xec, 0x94, 0xad, 0xa2, 0x40, 0xcb, 0xca, 0xa5, + 0xd2, 0x6a, 0x05, 0x0e, 0x1b, 0x5a, 0x41, 0x97, 0x53, 0x36, 0xeb, 0x54, 0x46, 0x69, 0x12, 0x1c, + 0x07, 0x75, 0x7b, 0xb1, 0x9c, 0x78, 0x36, 0x32, 0x6b, 0x7b, 0x8c, 0x3d, 0x29, 0x8d, 0x10, 0x17, + 0x2e, 0x1c, 0x38, 0x72, 0xe1, 0x1f, 0x20, 0xf1, 0x1b, 0xb8, 0x70, 0xe1, 0xc2, 0x91, 0xbf, 0xc0, + 0x8f, 0xe0, 0x88, 0x66, 0x3c, 0x76, 0xe2, 0x25, 0x10, 0xca, 0x16, 0x4e, 0xdc, 0xfc, 0xde, 0xbc, + 0xef, 0x9b, 0x37, 0xdf, 0x9b, 0xf7, 0x3c, 0x70, 0x7f, 0x46, 0xc8, 0xcc, 0xc3, 0x4d, 0xc7, 0xa6, + 0x76, 0x4c, 0x49, 0x84, 0x9b, 0xb6, 0xe3, 0xbb, 0x41, 0xf3, 0xd9, 0xd1, 0x04, 0x53, 0xfb, 0x68, + 0xe9, 0xb7, 0xb8, 0x5f, 0x0d, 0x23, 0x42, 0x09, 0xba, 0x93, 0xa0, 0xd4, 0x6c, 0x55, 0x4d, 0x56, + 0x05, 0xaa, 0xf1, 0xba, 0x60, 0xb5, 0x43, 0xb7, 0x69, 0x07, 0x01, 0xa1, 0x36, 0x75, 0x49, 0x10, + 0x27, 0xe8, 0xc6, 0x5d, 0xb1, 0xea, 0x91, 0x60, 0x16, 0xcd, 0x83, 0xc0, 0x0d, 0x66, 0x4d, 0x12, + 0xe2, 0x28, 0x17, 0xf4, 0x86, 0x08, 0xe2, 0xd6, 0x64, 0x7e, 0xde, 0xa4, 0xae, 0x8f, 0x63, 0x6a, + 0xfb, 0x61, 0x12, 0xa0, 0xfc, 0xb8, 0x05, 0xd5, 0x0e, 0xf1, 0x7d, 0x12, 0x3c, 0xc6, 0xd4, 0x66, + 0x99, 0xa0, 0x87, 0x00, 0x31, 0xb5, 0x23, 0x6a, 0xb1, 0xd8, 0xba, 0xb4, 0x2f, 0x1d, 0x6c, 0xb7, + 0x1a, 0xaa, 0xc8, 0x35, 0x25, 0x52, 0xcd, 0x94, 0xc8, 0x28, 0xf3, 0x68, 0x66, 0xa3, 0x07, 0x70, + 0x1d, 0x07, 0x4e, 0x02, 0x94, 0x37, 0x02, 0xaf, 0xe1, 0xc0, 0xe1, 0x30, 0x13, 0xaa, 0x59, 0xe6, + 0x16, 0x5d, 0x84, 0xb8, 0x5e, 0xd8, 0x97, 0x0e, 0xaa, 0xad, 0xb7, 0xd5, 0xbf, 0x56, 0x48, 0x1d, + 0xa4, 0x28, 0x73, 0x11, 0x62, 0xa3, 0x42, 0x56, 0x4d, 0x64, 0x40, 0xc9, 0xb3, 0x27, 0xd8, 0x8b, + 0xeb, 0x5b, 0xfb, 0x85, 0x83, 0xed, 0xd6, 0xf1, 0x26, 0xb6, 0xbc, 0x0e, 0x6a, 0x8f, 0x83, 0xb5, + 0x80, 0x46, 0x0b, 0x43, 0x30, 0xa1, 0x0f, 0xa1, 0x18, 0x53, 0x9b, 0xe2, 0x7a, 0x91, 0x27, 0x78, + 0xff, 0x05, 0x29, 0x47, 0x0c, 0x6b, 0x24, 0x14, 0x8d, 0x87, 0xb0, 0xbd, 0xb2, 0x05, 0xaa, 0x41, + 0xe1, 0x02, 0x2f, 0xb8, 0xde, 0x65, 0x83, 0x7d, 0xa2, 0x57, 0xa1, 0xf8, 0xcc, 0xf6, 0xe6, 0x89, + 0x94, 0x65, 0x23, 0x31, 0x8e, 0xe5, 0xf7, 0x25, 0xe5, 0x6b, 0x09, 0x8a, 0x9c, 0x0b, 0xed, 0xc1, + 0xee, 0xc8, 0x6c, 0x9b, 0x9a, 0x35, 0xee, 0x8f, 0x86, 0x5a, 0x47, 0xef, 0xea, 0xda, 0x69, 0xed, + 0x15, 0x54, 0x83, 0x1d, 0xbd, 0xaf, 0x9b, 0x7a, 0xbb, 0xa7, 0x3f, 0xd5, 0xfb, 0x8f, 0x6a, 0x12, + 0xaa, 0x02, 0x0c, 0x8d, 0x41, 0x47, 0x1b, 0x8d, 0x98, 0x2d, 0x33, 0xbb, 0xd3, 0xee, 0x77, 0xb4, + 0x5e, 0x8f, 0xd9, 0x05, 0x66, 0x77, 0xf5, 0x7e, 0x1a, 0xbf, 0xc5, 0xec, 0xd1, 0xb8, 0xc3, 0xe2, + 0xbb, 0xe3, 0x5e, 0xad, 0x88, 0x00, 0x4a, 0xdd, 0xb6, 0xde, 0xd3, 0x4e, 0x6b, 0x25, 0x54, 0x81, + 0xb2, 0xc0, 0x6a, 0xa7, 0xb5, 0x6b, 0xca, 0x13, 0xb8, 0x3e, 0x8c, 0xc8, 0x2c, 0xc2, 0x71, 0x8c, + 0xee, 0x41, 0xf5, 0x33, 0x12, 0x5d, 0x58, 0x53, 0xe2, 0x87, 0x1e, 0xa6, 0xd8, 0xe1, 0x07, 0x2a, + 0x18, 0x15, 0xe6, 0xed, 0xa4, 0xce, 0x2c, 0x0c, 0xc7, 0xd4, 0xf5, 0x6d, 0x16, 0x26, 0x2f, 0xc3, + 0xb4, 0xd4, 0xa9, 0xfc, 0x2c, 0xc3, 0x9e, 0xf6, 0x3c, 0x24, 0x11, 0xd5, 0x02, 0xea, 0x52, 0x17, + 0xc7, 0x06, 0xfe, 0x74, 0x8e, 0x63, 0x8a, 0x6e, 0x03, 0x84, 0x11, 0xf9, 0x04, 0x4f, 0xa9, 0xe5, + 0x3a, 0x42, 0xb4, 0xb2, 0xf0, 0xe8, 0x0e, 0x3a, 0xcb, 0x6a, 0x2f, 0xf3, 0xda, 0xb7, 0x37, 0x15, + 0x6a, 0xed, 0x2e, 0x6b, 0xaf, 0xc0, 0x47, 0x50, 0xc1, 0x2c, 0x6c, 0x61, 0x9d, 0xbb, 0x1e, 0xc5, + 0x11, 0xbf, 0xab, 0xdb, 0xad, 0xb7, 0x36, 0xee, 0xc0, 0x41, 0x5d, 0x8e, 0x31, 0x76, 0xf0, 0x8a, + 0x85, 0x0e, 0x61, 0x97, 0xcc, 0x69, 0x38, 0xa7, 0xd6, 0x3c, 0xf2, 0xac, 0x30, 0xc2, 0xe7, 0xee, + 0xf3, 0xfa, 0x16, 0x3f, 0xd3, 0x8d, 0x64, 0x61, 0x1c, 0x79, 0x43, 0xee, 0xbe, 0xca, 0xad, 0xf9, + 0x41, 0x86, 0x3d, 0xdd, 0xff, 0x2f, 0xd4, 0x5c, 0xbb, 0xcb, 0x5a, 0x35, 0x5f, 0x83, 0xb2, 0x1b, + 0x88, 0x93, 0x73, 0x25, 0xcb, 0xc6, 0x75, 0xee, 0x18, 0x47, 0xde, 0x1f, 0xa5, 0xde, 0xba, 0xaa, + 0xd4, 0x57, 0x91, 0xef, 0x3d, 0xb8, 0x75, 0xf9, 0x96, 0xc4, 0x21, 0x09, 0x62, 0xcc, 0xe4, 0x5b, + 0xd6, 0x2f, 0x95, 0x2f, 0x2b, 0x9c, 0xf2, 0x55, 0xe1, 0x32, 0x32, 0x9b, 0xb5, 0x5d, 0x28, 0x4d, + 0xf9, 0x88, 0x10, 0x73, 0x56, 0x7d, 0xb1, 0x81, 0x62, 0x08, 0x34, 0x1a, 0xc3, 0x6e, 0x28, 0x5a, + 0xd0, 0xc2, 0x62, 0x13, 0x31, 0x81, 0x0f, 0x36, 0x51, 0xa6, 0xbd, 0x6b, 0xd4, 0x52, 0x8a, 0x34, + 0x4d, 0x34, 0x80, 0x6a, 0x46, 0x3b, 0x59, 0x50, 0x1c, 0x8b, 0xcb, 0xfe, 0xf7, 0x39, 0x2b, 0x29, + 0xfe, 0x84, 0xc1, 0xff, 0x85, 0x8a, 0xae, 0x6f, 0x9e, 0xe2, 0xda, 0xe6, 0x51, 0x7e, 0x93, 0xe1, + 0x56, 0xfe, 0x6e, 0xfe, 0x5f, 0x89, 0x97, 0x57, 0x89, 0x5c, 0x2f, 0x17, 0xf3, 0xbd, 0xac, 0xe8, + 0xb0, 0xb3, 0x0a, 0x65, 0x7d, 0x76, 0xe1, 0x06, 0x4e, 0x5c, 0x97, 0xf6, 0x0b, 0xac, 0xcf, 0xb8, + 0x81, 0xee, 0x42, 0x25, 0xb0, 0x7d, 0x1c, 0x87, 0xf6, 0x14, 0x5b, 0xae, 0x93, 0x0c, 0x9c, 0xb2, + 0xb1, 0x93, 0x39, 0x75, 0x27, 0x3e, 0x3c, 0x83, 0x4a, 0xee, 0xc7, 0x8f, 0xee, 0x40, 0x63, 0x30, + 0xd4, 0x8c, 0xb6, 0xa9, 0x0f, 0xfa, 0x96, 0x79, 0x36, 0xbc, 0xfc, 0x37, 0xbc, 0x09, 0x37, 0xb4, + 0x27, 0xc3, 0x81, 0x61, 0x5a, 0x5a, 0xdf, 0xd4, 0x4d, 0x5d, 0x1b, 0xd5, 0x24, 0xe6, 0xd4, 0x1f, + 0xe7, 0x9d, 0x72, 0xeb, 0x27, 0x19, 0xaa, 0xa7, 0xe9, 0xc9, 0xdb, 0xec, 0xe0, 0xe8, 0x5b, 0x09, + 0xaa, 0xf9, 0xee, 0x45, 0x0f, 0xfe, 0xd1, 0xdf, 0xa4, 0x71, 0x3b, 0x85, 0xad, 0x3c, 0xd9, 0x96, + 0x4f, 0x18, 0xe5, 0x9d, 0x2f, 0x7f, 0xf9, 0xf5, 0x1b, 0xf9, 0x50, 0xb9, 0x97, 0x3d, 0x1b, 0xc5, + 0x04, 0x8e, 0x9b, 0x9f, 0x2f, 0xa7, 0xf3, 0x17, 0xc7, 0x98, 0x93, 0x1f, 0x4b, 0x87, 0x3c, 0xb5, + 0xfc, 0x75, 0xde, 0x9c, 0xda, 0xda, 0xd1, 0xfc, 0xb2, 0x52, 0x73, 0x7d, 0x91, 0xda, 0xc9, 0x77, + 0x12, 0x28, 0x53, 0xe2, 0x6f, 0xc8, 0xe6, 0xe4, 0x66, 0x5e, 0xec, 0x21, 0x7b, 0x24, 0x0e, 0xa5, + 0xa7, 0x1d, 0x01, 0x9b, 0x11, 0xcf, 0x0e, 0x66, 0x2a, 0x89, 0x66, 0xcd, 0x19, 0x0e, 0xf8, 0x13, + 0xb2, 0x99, 0x2c, 0xd9, 0xa1, 0x1b, 0xff, 0xd9, 0x73, 0xfb, 0x03, 0x6e, 0x7d, 0x2f, 0xbf, 0xf9, + 0x28, 0x61, 0xe9, 0x78, 0x64, 0xee, 0xa8, 0xd9, 0x4e, 0x2a, 0xdf, 0x4a, 0xfd, 0xf8, 0xe8, 0x84, + 0x05, 0x4f, 0x4a, 0x9c, 0xf6, 0xdd, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x77, 0x71, 0x2d, 0x88, + 0xc4, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1/datastore.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1/datastore.pb.go new file mode 100644 index 000000000..1bf5667a8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1/datastore.pb.go @@ -0,0 +1,1596 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1/datastore.proto + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google/datastore/v1/datastore.proto + google/datastore/v1/entity.proto + google/datastore/v1/query.proto + +It has these top-level messages: + LookupRequest + LookupResponse + RunQueryRequest + RunQueryResponse + BeginTransactionRequest + BeginTransactionResponse + RollbackRequest + RollbackResponse + CommitRequest + CommitResponse + AllocateIdsRequest + AllocateIdsResponse + Mutation + MutationResult + ReadOptions + TransactionOptions + PartitionId + Key + ArrayValue + Value + Entity + EntityResult + Query + KindExpression + PropertyReference + Projection + PropertyOrder + Filter + CompositeFilter + PropertyFilter + GqlQuery + GqlQueryParameter + QueryResultBatch +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The modes available for commits. +type CommitRequest_Mode int32 + +const ( + // Unspecified. This value must not be used. + CommitRequest_MODE_UNSPECIFIED CommitRequest_Mode = 0 + // Transactional: The mutations are either all applied, or none are applied. + // Learn about transactions [here](https://cloud.google.com/datastore/docs/concepts/transactions). + CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 + // Non-transactional: The mutations may not apply as all or none. + CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +) + +var CommitRequest_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "TRANSACTIONAL", + 2: "NON_TRANSACTIONAL", +} +var CommitRequest_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "TRANSACTIONAL": 1, + "NON_TRANSACTIONAL": 2, +} + +func (x CommitRequest_Mode) String() string { + return proto.EnumName(CommitRequest_Mode_name, int32(x)) +} +func (CommitRequest_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +// The possible values for read consistencies. +type ReadOptions_ReadConsistency int32 + +const ( + // Unspecified. This value must not be used. + ReadOptions_READ_CONSISTENCY_UNSPECIFIED ReadOptions_ReadConsistency = 0 + // Strong consistency. + ReadOptions_STRONG ReadOptions_ReadConsistency = 1 + // Eventual consistency. + ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +) + +var ReadOptions_ReadConsistency_name = map[int32]string{ + 0: "READ_CONSISTENCY_UNSPECIFIED", + 1: "STRONG", + 2: "EVENTUAL", +} +var ReadOptions_ReadConsistency_value = map[string]int32{ + "READ_CONSISTENCY_UNSPECIFIED": 0, + "STRONG": 1, + "EVENTUAL": 2, +} + +func (x ReadOptions_ReadConsistency) String() string { + return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +} +func (ReadOptions_ReadConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{14, 0} +} + +// The request for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup]. +type LookupRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The options for this lookup request. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options,json=readOptions" json:"read_options,omitempty"` + // Keys of entities to look up. + Keys []*Key `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LookupRequest) Reset() { *m = LookupRequest{} } +func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +func (*LookupRequest) ProtoMessage() {} +func (*LookupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *LookupRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *LookupRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *LookupRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// The response for [Datastore.Lookup][google.datastore.v1.Datastore.Lookup]. +type LookupResponse struct { + // Entities found as `ResultType.FULL` entities. The order of results in this + // field is undefined and has no relation to the order of the keys in the + // input. + Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + // Entities not found as `ResultType.KEY_ONLY` entities. The order of results + // in this field is undefined and has no relation to the order of the keys + // in the input. + Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` + // A list of keys that were not looked up due to resource constraints. The + // order of results in this field is undefined and has no relation to the + // order of the keys in the input. + Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` +} + +func (m *LookupResponse) Reset() { *m = LookupResponse{} } +func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +func (*LookupResponse) ProtoMessage() {} +func (*LookupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *LookupResponse) GetFound() []*EntityResult { + if m != nil { + return m.Found + } + return nil +} + +func (m *LookupResponse) GetMissing() []*EntityResult { + if m != nil { + return m.Missing + } + return nil +} + +func (m *LookupResponse) GetDeferred() []*Key { + if m != nil { + return m.Deferred + } + return nil +} + +// The request for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery]. +type RunQueryRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Entities are partitioned into subsets, identified by a partition ID. + // Queries are scoped to a single partition. + // This partition ID is normalized with the standard default context + // partition ID. + PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The options for this query. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options,json=readOptions" json:"read_options,omitempty"` + // The type of query. + // + // Types that are valid to be assigned to QueryType: + // *RunQueryRequest_Query + // *RunQueryRequest_GqlQuery + QueryType isRunQueryRequest_QueryType `protobuf_oneof:"query_type"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} +func (*RunQueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isRunQueryRequest_QueryType interface { + isRunQueryRequest_QueryType() +} + +type RunQueryRequest_Query struct { + Query *Query `protobuf:"bytes,3,opt,name=query,oneof"` +} +type RunQueryRequest_GqlQuery struct { + GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query,json=gqlQuery,oneof"` +} + +func (*RunQueryRequest_Query) isRunQueryRequest_QueryType() {} +func (*RunQueryRequest_GqlQuery) isRunQueryRequest_QueryType() {} + +func (m *RunQueryRequest) GetQueryType() isRunQueryRequest_QueryType { + if m != nil { + return m.QueryType + } + return nil +} + +func (m *RunQueryRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RunQueryRequest) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *RunQueryRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *RunQueryRequest) GetQuery() *Query { + if x, ok := m.GetQueryType().(*RunQueryRequest_Query); ok { + return x.Query + } + return nil +} + +func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { + if x, ok := m.GetQueryType().(*RunQueryRequest_GqlQuery); ok { + return x.GqlQuery + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RunQueryRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RunQueryRequest_OneofMarshaler, _RunQueryRequest_OneofUnmarshaler, _RunQueryRequest_OneofSizer, []interface{}{ + (*RunQueryRequest_Query)(nil), + (*RunQueryRequest_GqlQuery)(nil), + } +} + +func _RunQueryRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_Query: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *RunQueryRequest_GqlQuery: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GqlQuery); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RunQueryRequest.QueryType has unexpected type %T", x) + } + return nil +} + +func _RunQueryRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RunQueryRequest) + switch tag { + case 3: // query_type.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Query) + err := b.DecodeMessage(msg) + m.QueryType = &RunQueryRequest_Query{msg} + return true, err + case 7: // query_type.gql_query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GqlQuery) + err := b.DecodeMessage(msg) + m.QueryType = &RunQueryRequest_GqlQuery{msg} + return true, err + default: + return false, nil + } +} + +func _RunQueryRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RunQueryRequest_GqlQuery: + s := proto.Size(x.GqlQuery) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Datastore.RunQuery][google.datastore.v1.Datastore.RunQuery]. +type RunQueryResponse struct { + // A batch of query results (always present). + Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` + // The parsed form of the `GqlQuery` from the request, if it was set. + Query *Query `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} +func (*RunQueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RunQueryResponse) GetBatch() *QueryResultBatch { + if m != nil { + return m.Batch + } + return nil +} + +func (m *RunQueryResponse) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +// The request for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. +type BeginTransactionRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Options for a new transaction. + TransactionOptions *TransactionOptions `protobuf:"bytes,10,opt,name=transaction_options,json=transactionOptions" json:"transaction_options,omitempty"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BeginTransactionRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *BeginTransactionRequest) GetTransactionOptions() *TransactionOptions { + if m != nil { + return m.TransactionOptions + } + return nil +} + +// The response for [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. +type BeginTransactionResponse struct { + // The transaction identifier (always present). + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} +func (*BeginTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. +type RollbackRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The transaction identifier, returned by a call to + // [Datastore.BeginTransaction][google.datastore.v1.Datastore.BeginTransaction]. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *RollbackRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for [Datastore.Rollback][google.datastore.v1.Datastore.Rollback]. +// (an empty message). +type RollbackResponse struct { +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} +func (*RollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +// The request for [Datastore.Commit][google.datastore.v1.Datastore.Commit]. +type CommitRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The type of commit to perform. Defaults to `TRANSACTIONAL`. + Mode CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=google.datastore.v1.CommitRequest_Mode" json:"mode,omitempty"` + // Must be set when mode is `TRANSACTIONAL`. + // + // Types that are valid to be assigned to TransactionSelector: + // *CommitRequest_Transaction + TransactionSelector isCommitRequest_TransactionSelector `protobuf_oneof:"transaction_selector"` + // The mutations to perform. + // + // When mode is `TRANSACTIONAL`, mutations affecting a single entity are + // applied in order. The following sequences of mutations affecting a single + // entity are not permitted in a single `Commit` request: + // + // - `insert` followed by `insert` + // - `update` followed by `insert` + // - `upsert` followed by `insert` + // - `delete` followed by `update` + // + // When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single + // entity. + Mutations []*Mutation `protobuf:"bytes,6,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type isCommitRequest_TransactionSelector interface { + isCommitRequest_TransactionSelector() +} + +type CommitRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3,oneof"` +} + +func (*CommitRequest_Transaction) isCommitRequest_TransactionSelector() {} + +func (m *CommitRequest) GetTransactionSelector() isCommitRequest_TransactionSelector { + if m != nil { + return m.TransactionSelector + } + return nil +} + +func (m *CommitRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CommitRequest) GetMode() CommitRequest_Mode { + if m != nil { + return m.Mode + } + return CommitRequest_MODE_UNSPECIFIED +} + +func (m *CommitRequest) GetTransaction() []byte { + if x, ok := m.GetTransactionSelector().(*CommitRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *CommitRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CommitRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CommitRequest_OneofMarshaler, _CommitRequest_OneofUnmarshaler, _CommitRequest_OneofSizer, []interface{}{ + (*CommitRequest_Transaction)(nil), + } +} + +func _CommitRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CommitRequest) + // transaction_selector + switch x := m.TransactionSelector.(type) { + case *CommitRequest_Transaction: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case nil: + default: + return fmt.Errorf("CommitRequest.TransactionSelector has unexpected type %T", x) + } + return nil +} + +func _CommitRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CommitRequest) + switch tag { + case 1: // transaction_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TransactionSelector = &CommitRequest_Transaction{x} + return true, err + default: + return false, nil + } +} + +func _CommitRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CommitRequest) + // transaction_selector + switch x := m.TransactionSelector.(type) { + case *CommitRequest_Transaction: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Datastore.Commit][google.datastore.v1.Datastore.Commit]. +type CommitResponse struct { + // The result of performing the mutations. + // The i-th mutation result corresponds to the i-th mutation in the request. + MutationResults []*MutationResult `protobuf:"bytes,3,rep,name=mutation_results,json=mutationResults" json:"mutation_results,omitempty"` + // The number of index entries updated during the commit, or zero if none were + // updated. + IndexUpdates int32 `protobuf:"varint,4,opt,name=index_updates,json=indexUpdates" json:"index_updates,omitempty"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CommitResponse) GetMutationResults() []*MutationResult { + if m != nil { + return m.MutationResults + } + return nil +} + +func (m *CommitResponse) GetIndexUpdates() int32 { + if m != nil { + return m.IndexUpdates + } + return 0 +} + +// The request for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds]. +type AllocateIdsRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // A list of keys with incomplete key paths for which to allocate IDs. + // No key may be reserved/read-only. + Keys []*Key `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} +func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AllocateIdsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *AllocateIdsRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// The response for [Datastore.AllocateIds][google.datastore.v1.Datastore.AllocateIds]. +type AllocateIdsResponse struct { + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + Keys []*Key `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} +func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AllocateIdsResponse) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// A mutation to apply to an entity. +type Mutation struct { + // The mutation operation. + // + // For `insert`, `update`, and `upsert`: + // - The entity's key must not be reserved/read-only. + // - No property in the entity may have a reserved name, + // not even a property in an entity in a value. + // - No value in the entity may have meaning 18, + // not even a value in an entity in another value. + // + // Types that are valid to be assigned to Operation: + // *Mutation_Insert + // *Mutation_Update + // *Mutation_Upsert + // *Mutation_Delete + Operation isMutation_Operation `protobuf_oneof:"operation"` + // When set, the server will detect whether or not this mutation conflicts + // with the current version of the entity on the server. Conflicting mutations + // are not applied, and are marked as such in MutationResult. + // + // Types that are valid to be assigned to ConflictDetectionStrategy: + // *Mutation_BaseVersion + ConflictDetectionStrategy isMutation_ConflictDetectionStrategy `protobuf_oneof:"conflict_detection_strategy"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +type isMutation_Operation interface { + isMutation_Operation() +} +type isMutation_ConflictDetectionStrategy interface { + isMutation_ConflictDetectionStrategy() +} + +type Mutation_Insert struct { + Insert *Entity `protobuf:"bytes,4,opt,name=insert,oneof"` +} +type Mutation_Update struct { + Update *Entity `protobuf:"bytes,5,opt,name=update,oneof"` +} +type Mutation_Upsert struct { + Upsert *Entity `protobuf:"bytes,6,opt,name=upsert,oneof"` +} +type Mutation_Delete struct { + Delete *Key `protobuf:"bytes,7,opt,name=delete,oneof"` +} +type Mutation_BaseVersion struct { + BaseVersion int64 `protobuf:"varint,8,opt,name=base_version,json=baseVersion,oneof"` +} + +func (*Mutation_Insert) isMutation_Operation() {} +func (*Mutation_Update) isMutation_Operation() {} +func (*Mutation_Upsert) isMutation_Operation() {} +func (*Mutation_Delete) isMutation_Operation() {} +func (*Mutation_BaseVersion) isMutation_ConflictDetectionStrategy() {} + +func (m *Mutation) GetOperation() isMutation_Operation { + if m != nil { + return m.Operation + } + return nil +} +func (m *Mutation) GetConflictDetectionStrategy() isMutation_ConflictDetectionStrategy { + if m != nil { + return m.ConflictDetectionStrategy + } + return nil +} + +func (m *Mutation) GetInsert() *Entity { + if x, ok := m.GetOperation().(*Mutation_Insert); ok { + return x.Insert + } + return nil +} + +func (m *Mutation) GetUpdate() *Entity { + if x, ok := m.GetOperation().(*Mutation_Update); ok { + return x.Update + } + return nil +} + +func (m *Mutation) GetUpsert() *Entity { + if x, ok := m.GetOperation().(*Mutation_Upsert); ok { + return x.Upsert + } + return nil +} + +func (m *Mutation) GetDelete() *Key { + if x, ok := m.GetOperation().(*Mutation_Delete); ok { + return x.Delete + } + return nil +} + +func (m *Mutation) GetBaseVersion() int64 { + if x, ok := m.GetConflictDetectionStrategy().(*Mutation_BaseVersion); ok { + return x.BaseVersion + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_Insert)(nil), + (*Mutation_Update)(nil), + (*Mutation_Upsert)(nil), + (*Mutation_Delete)(nil), + (*Mutation_BaseVersion)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Insert); err != nil { + return err + } + case *Mutation_Update: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Mutation_Upsert: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Upsert); err != nil { + return err + } + case *Mutation_Delete: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Operation has unexpected type %T", x) + } + // conflict_detection_strategy + switch x := m.ConflictDetectionStrategy.(type) { + case *Mutation_BaseVersion: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.BaseVersion)) + case nil: + default: + return fmt.Errorf("Mutation.ConflictDetectionStrategy has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 4: // operation.insert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Insert{msg} + return true, err + case 5: // operation.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Update{msg} + return true, err + case 6: // operation.upsert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Upsert{msg} + return true, err + case 7: // operation.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Key) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Delete{msg} + return true, err + case 8: // conflict_detection_strategy.base_version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConflictDetectionStrategy = &Mutation_BaseVersion{int64(x)} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + s := proto.Size(x.Insert) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Upsert: + s := proto.Size(x.Upsert) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Delete: + s := proto.Size(x.Delete) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // conflict_detection_strategy + switch x := m.ConflictDetectionStrategy.(type) { + case *Mutation_BaseVersion: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.BaseVersion)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The result of applying a mutation. +type MutationResult struct { + // The automatically allocated key. + // Set only when the mutation allocated a key. + Key *Key `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + // The version of the entity on the server after processing the mutation. If + // the mutation doesn't change anything on the server, then the version will + // be the version of the current entity or, if no entity is present, a version + // that is strictly greater than the version of any previous entity and less + // than the version of any possible future entity. + Version int64 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + // Whether a conflict was detected for this mutation. Always false when a + // conflict detection strategy field is not set in the mutation. + ConflictDetected bool `protobuf:"varint,5,opt,name=conflict_detected,json=conflictDetected" json:"conflict_detected,omitempty"` +} + +func (m *MutationResult) Reset() { *m = MutationResult{} } +func (m *MutationResult) String() string { return proto.CompactTextString(m) } +func (*MutationResult) ProtoMessage() {} +func (*MutationResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *MutationResult) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *MutationResult) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *MutationResult) GetConflictDetected() bool { + if m != nil { + return m.ConflictDetected + } + return false +} + +// The options shared by read requests. +type ReadOptions struct { + // If not specified, lookups and ancestor queries default to + // `read_consistency`=`STRONG`, global queries default to + // `read_consistency`=`EVENTUAL`. + // + // Types that are valid to be assigned to ConsistencyType: + // *ReadOptions_ReadConsistency_ + // *ReadOptions_Transaction + ConsistencyType isReadOptions_ConsistencyType `protobuf_oneof:"consistency_type"` +} + +func (m *ReadOptions) Reset() { *m = ReadOptions{} } +func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +func (*ReadOptions) ProtoMessage() {} +func (*ReadOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type isReadOptions_ConsistencyType interface { + isReadOptions_ConsistencyType() +} + +type ReadOptions_ReadConsistency_ struct { + ReadConsistency ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,json=readConsistency,enum=google.datastore.v1.ReadOptions_ReadConsistency,oneof"` +} +type ReadOptions_Transaction struct { + Transaction []byte `protobuf:"bytes,2,opt,name=transaction,proto3,oneof"` +} + +func (*ReadOptions_ReadConsistency_) isReadOptions_ConsistencyType() {} +func (*ReadOptions_Transaction) isReadOptions_ConsistencyType() {} + +func (m *ReadOptions) GetConsistencyType() isReadOptions_ConsistencyType { + if m != nil { + return m.ConsistencyType + } + return nil +} + +func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { + if x, ok := m.GetConsistencyType().(*ReadOptions_ReadConsistency_); ok { + return x.ReadConsistency + } + return ReadOptions_READ_CONSISTENCY_UNSPECIFIED +} + +func (m *ReadOptions) GetTransaction() []byte { + if x, ok := m.GetConsistencyType().(*ReadOptions_Transaction); ok { + return x.Transaction + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadOptions_OneofMarshaler, _ReadOptions_OneofUnmarshaler, _ReadOptions_OneofSizer, []interface{}{ + (*ReadOptions_ReadConsistency_)(nil), + (*ReadOptions_Transaction)(nil), + } +} + +func _ReadOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadOptions) + // consistency_type + switch x := m.ConsistencyType.(type) { + case *ReadOptions_ReadConsistency_: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.ReadConsistency)) + case *ReadOptions_Transaction: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case nil: + default: + return fmt.Errorf("ReadOptions.ConsistencyType has unexpected type %T", x) + } + return nil +} + +func _ReadOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadOptions) + switch tag { + case 1: // consistency_type.read_consistency + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConsistencyType = &ReadOptions_ReadConsistency_{ReadOptions_ReadConsistency(x)} + return true, err + case 2: // consistency_type.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencyType = &ReadOptions_Transaction{x} + return true, err + default: + return false, nil + } +} + +func _ReadOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadOptions) + // consistency_type + switch x := m.ConsistencyType.(type) { + case *ReadOptions_ReadConsistency_: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ReadConsistency)) + case *ReadOptions_Transaction: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for beginning a new transaction. +type TransactionOptions struct { + // The `mode` of the transaction, indicating whether write operations are + // supported. + // + // Types that are valid to be assigned to Mode: + // *TransactionOptions_ReadWrite_ + // *TransactionOptions_ReadOnly_ + Mode isTransactionOptions_Mode `protobuf_oneof:"mode"` +} + +func (m *TransactionOptions) Reset() { *m = TransactionOptions{} } +func (m *TransactionOptions) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions) ProtoMessage() {} +func (*TransactionOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type isTransactionOptions_Mode interface { + isTransactionOptions_Mode() +} + +type TransactionOptions_ReadWrite_ struct { + ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,oneof"` +} +type TransactionOptions_ReadOnly_ struct { + ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,oneof"` +} + +func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {} +func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {} + +func (m *TransactionOptions) GetMode() isTransactionOptions_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite { + if x, ok := m.GetMode().(*TransactionOptions_ReadWrite_); ok { + return x.ReadWrite + } + return nil +} + +func (m *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly { + if x, ok := m.GetMode().(*TransactionOptions_ReadOnly_); ok { + return x.ReadOnly + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_OneofMarshaler, _TransactionOptions_OneofUnmarshaler, _TransactionOptions_OneofSizer, []interface{}{ + (*TransactionOptions_ReadWrite_)(nil), + (*TransactionOptions_ReadOnly_)(nil), + } +} + +func _TransactionOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadWrite); err != nil { + return err + } + case *TransactionOptions_ReadOnly_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadOnly); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions.Mode has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions) + switch tag { + case 1: // mode.read_write + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadWrite) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadWrite_{msg} + return true, err + case 2: // mode.read_only + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadOnly) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadOnly_{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + s := proto.Size(x.ReadWrite) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_: + s := proto.Size(x.ReadOnly) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options specific to read / write transactions. +type TransactionOptions_ReadWrite struct { + // The transaction identifier of the transaction being retried. + PreviousTransaction []byte `protobuf:"bytes,1,opt,name=previous_transaction,json=previousTransaction,proto3" json:"previous_transaction,omitempty"` +} + +func (m *TransactionOptions_ReadWrite) Reset() { *m = TransactionOptions_ReadWrite{} } +func (m *TransactionOptions_ReadWrite) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadWrite) ProtoMessage() {} +func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15, 0} +} + +func (m *TransactionOptions_ReadWrite) GetPreviousTransaction() []byte { + if m != nil { + return m.PreviousTransaction + } + return nil +} + +// Options specific to read-only transactions. +type TransactionOptions_ReadOnly struct { +} + +func (m *TransactionOptions_ReadOnly) Reset() { *m = TransactionOptions_ReadOnly{} } +func (m *TransactionOptions_ReadOnly) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadOnly) ProtoMessage() {} +func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 1} } + +func init() { + proto.RegisterType((*LookupRequest)(nil), "google.datastore.v1.LookupRequest") + proto.RegisterType((*LookupResponse)(nil), "google.datastore.v1.LookupResponse") + proto.RegisterType((*RunQueryRequest)(nil), "google.datastore.v1.RunQueryRequest") + proto.RegisterType((*RunQueryResponse)(nil), "google.datastore.v1.RunQueryResponse") + proto.RegisterType((*BeginTransactionRequest)(nil), "google.datastore.v1.BeginTransactionRequest") + proto.RegisterType((*BeginTransactionResponse)(nil), "google.datastore.v1.BeginTransactionResponse") + proto.RegisterType((*RollbackRequest)(nil), "google.datastore.v1.RollbackRequest") + proto.RegisterType((*RollbackResponse)(nil), "google.datastore.v1.RollbackResponse") + proto.RegisterType((*CommitRequest)(nil), "google.datastore.v1.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "google.datastore.v1.CommitResponse") + proto.RegisterType((*AllocateIdsRequest)(nil), "google.datastore.v1.AllocateIdsRequest") + proto.RegisterType((*AllocateIdsResponse)(nil), "google.datastore.v1.AllocateIdsResponse") + proto.RegisterType((*Mutation)(nil), "google.datastore.v1.Mutation") + proto.RegisterType((*MutationResult)(nil), "google.datastore.v1.MutationResult") + proto.RegisterType((*ReadOptions)(nil), "google.datastore.v1.ReadOptions") + proto.RegisterType((*TransactionOptions)(nil), "google.datastore.v1.TransactionOptions") + proto.RegisterType((*TransactionOptions_ReadWrite)(nil), "google.datastore.v1.TransactionOptions.ReadWrite") + proto.RegisterType((*TransactionOptions_ReadOnly)(nil), "google.datastore.v1.TransactionOptions.ReadOnly") + proto.RegisterEnum("google.datastore.v1.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) + proto.RegisterEnum("google.datastore.v1.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Datastore service + +type DatastoreClient interface { + // Looks up entities by key. + Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) + // Queries for entities. + RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (*RunQueryResponse, error) + // Begins a new transaction. + BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) + // Commits a transaction, optionally creating, deleting or modifying some + // entities. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*RollbackResponse, error) + // Allocates IDs for the given keys, which is useful for referencing an entity + // before it is inserted. + AllocateIds(ctx context.Context, in *AllocateIdsRequest, opts ...grpc.CallOption) (*AllocateIdsResponse, error) +} + +type datastoreClient struct { + cc *grpc.ClientConn +} + +func NewDatastoreClient(cc *grpc.ClientConn) DatastoreClient { + return &datastoreClient{cc} +} + +func (c *datastoreClient) Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) { + out := new(LookupResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/Lookup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (*RunQueryResponse, error) { + out := new(RunQueryResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/RunQuery", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) { + out := new(BeginTransactionResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/BeginTransaction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*RollbackResponse, error) { + out := new(RollbackResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/Rollback", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) AllocateIds(ctx context.Context, in *AllocateIdsRequest, opts ...grpc.CallOption) (*AllocateIdsResponse, error) { + out := new(AllocateIdsResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1.Datastore/AllocateIds", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Datastore service + +type DatastoreServer interface { + // Looks up entities by key. + Lookup(context.Context, *LookupRequest) (*LookupResponse, error) + // Queries for entities. + RunQuery(context.Context, *RunQueryRequest) (*RunQueryResponse, error) + // Begins a new transaction. + BeginTransaction(context.Context, *BeginTransactionRequest) (*BeginTransactionResponse, error) + // Commits a transaction, optionally creating, deleting or modifying some + // entities. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(context.Context, *RollbackRequest) (*RollbackResponse, error) + // Allocates IDs for the given keys, which is useful for referencing an entity + // before it is inserted. + AllocateIds(context.Context, *AllocateIdsRequest) (*AllocateIdsResponse, error) +} + +func RegisterDatastoreServer(s *grpc.Server, srv DatastoreServer) { + s.RegisterService(&_Datastore_serviceDesc, srv) +} + +func _Datastore_Lookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Lookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/Lookup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Lookup(ctx, req.(*LookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_RunQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunQueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).RunQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/RunQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).RunQuery(ctx, req.(*RunQueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BeginTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).BeginTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/BeginTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Rollback(ctx, req.(*RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_AllocateIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateIdsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).AllocateIds(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1.Datastore/AllocateIds", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).AllocateIds(ctx, req.(*AllocateIdsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Datastore_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.datastore.v1.Datastore", + HandlerType: (*DatastoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lookup", + Handler: _Datastore_Lookup_Handler, + }, + { + MethodName: "RunQuery", + Handler: _Datastore_RunQuery_Handler, + }, + { + MethodName: "BeginTransaction", + Handler: _Datastore_BeginTransaction_Handler, + }, + { + MethodName: "Commit", + Handler: _Datastore_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Datastore_Rollback_Handler, + }, + { + MethodName: "AllocateIds", + Handler: _Datastore_AllocateIds_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/datastore/v1/datastore.proto", +} + +func init() { proto.RegisterFile("google/datastore/v1/datastore.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1312 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0xef, 0x4d, 0xdb, 0x2c, 0x39, 0xe9, 0x9f, 0xec, 0x76, 0xb0, 0x90, 0x6d, 0x22, 0xb8, 0x54, + 0xab, 0xba, 0x2d, 0x69, 0x03, 0x13, 0xd2, 0x3a, 0x21, 0x35, 0x69, 0xb6, 0x46, 0xac, 0x49, 0xb9, + 0xed, 0xc6, 0x1f, 0x09, 0x45, 0xae, 0x7d, 0x1b, 0x4c, 0x1d, 0x5f, 0xd7, 0xbe, 0x29, 0x44, 0x88, + 0x49, 0x80, 0xc6, 0x1b, 0x2f, 0xe3, 0x13, 0xf0, 0xc2, 0x03, 0xe2, 0x91, 0x27, 0xc4, 0xb7, 0xe0, + 0x91, 0x57, 0xbe, 0x01, 0x5f, 0x00, 0xf9, 0xfa, 0x3a, 0x89, 0x53, 0xbb, 0xf1, 0x24, 0xde, 0x7c, + 0xaf, 0x7f, 0xbf, 0x73, 0x7e, 0xe7, 0x9c, 0xeb, 0x73, 0xae, 0x61, 0xb5, 0xcb, 0x58, 0xd7, 0xa4, + 0x15, 0x5d, 0xe5, 0xaa, 0xcb, 0x99, 0x43, 0x2b, 0xe7, 0x5b, 0xa3, 0x45, 0xd9, 0x76, 0x18, 0x67, + 0x78, 0xc5, 0x07, 0x95, 0x47, 0xfb, 0xe7, 0x5b, 0xc5, 0x9b, 0x92, 0xa9, 0xda, 0x46, 0x45, 0xb5, + 0x2c, 0xc6, 0x55, 0x6e, 0x30, 0xcb, 0xf5, 0x29, 0xc5, 0x52, 0x94, 0x5d, 0x6a, 0x71, 0x83, 0x0f, + 0x24, 0xe2, 0xcd, 0x28, 0xc4, 0x59, 0x9f, 0x3a, 0x12, 0xa0, 0xfc, 0x8c, 0x60, 0xf1, 0x09, 0x63, + 0xa7, 0x7d, 0x9b, 0xd0, 0xb3, 0x3e, 0x75, 0x39, 0xbe, 0x05, 0x60, 0x3b, 0xec, 0x0b, 0xaa, 0xf1, + 0x8e, 0xa1, 0x17, 0x32, 0x25, 0xb4, 0x9e, 0x25, 0x59, 0xb9, 0xd3, 0xd4, 0x71, 0x1d, 0x16, 0x1c, + 0xaa, 0xea, 0x1d, 0x66, 0x0b, 0x25, 0x05, 0x54, 0x42, 0xeb, 0xb9, 0x6a, 0xa9, 0x1c, 0xa1, 0xbe, + 0x4c, 0xa8, 0xaa, 0xb7, 0x7d, 0x1c, 0xc9, 0x39, 0xa3, 0x05, 0xbe, 0x0b, 0x73, 0xa7, 0x74, 0xe0, + 0x16, 0x66, 0x4b, 0xb3, 0xeb, 0xb9, 0x6a, 0x21, 0x92, 0xfc, 0x01, 0x1d, 0x10, 0x81, 0x52, 0xfe, + 0x44, 0xb0, 0x14, 0x68, 0x74, 0x6d, 0x66, 0xb9, 0x14, 0xbf, 0x07, 0xf3, 0x27, 0xac, 0x6f, 0xe9, + 0x05, 0x24, 0x2c, 0xbc, 0x15, 0x69, 0xa1, 0x21, 0x32, 0x41, 0xa8, 0xdb, 0x37, 0x39, 0xf1, 0xf1, + 0x78, 0x1b, 0xae, 0xf4, 0x0c, 0xd7, 0x35, 0xac, 0x6e, 0x21, 0x95, 0x94, 0x1a, 0x30, 0xf0, 0xbb, + 0x90, 0xd1, 0xe9, 0x09, 0x75, 0x1c, 0xaa, 0x4f, 0x95, 0x3e, 0x44, 0x2a, 0x7f, 0xa4, 0x60, 0x99, + 0xf4, 0xad, 0x0f, 0xbd, 0xac, 0x27, 0x4f, 0xb2, 0xad, 0x3a, 0xdc, 0xf0, 0xb2, 0xe5, 0x01, 0x52, + 0x97, 0x24, 0xf9, 0x20, 0x00, 0x36, 0x75, 0x92, 0xb3, 0x47, 0x8b, 0xff, 0xa7, 0x52, 0x55, 0x98, + 0x17, 0xc7, 0xa5, 0x30, 0x2b, 0xd8, 0xc5, 0x48, 0xb6, 0x08, 0x6d, 0x6f, 0x86, 0xf8, 0x50, 0xfc, + 0x10, 0xb2, 0xdd, 0x33, 0xb3, 0xe3, 0xf3, 0xae, 0x08, 0xde, 0xad, 0x48, 0xde, 0xe3, 0x33, 0x33, + 0xa0, 0x66, 0xba, 0xf2, 0xb9, 0xb6, 0x00, 0x20, 0x98, 0x1d, 0x3e, 0xb0, 0xa9, 0xf2, 0x2d, 0x82, + 0xfc, 0x28, 0x79, 0xb2, 0xfa, 0xdb, 0x30, 0x7f, 0xac, 0x72, 0xed, 0x73, 0x19, 0xd2, 0x5a, 0xbc, + 0x28, 0xbf, 0x82, 0x35, 0x0f, 0x4c, 0x7c, 0x0e, 0xde, 0x0c, 0x22, 0x4a, 0x4d, 0x8b, 0x48, 0xc6, + 0xa3, 0xbc, 0x44, 0x70, 0xbd, 0x46, 0xbb, 0x86, 0x75, 0xe4, 0xa8, 0x96, 0xab, 0x6a, 0x5e, 0x66, + 0x12, 0x16, 0xf2, 0x63, 0x58, 0xe1, 0x23, 0xd2, 0xb0, 0x14, 0x20, 0x5c, 0xdf, 0x8e, 0x74, 0x3d, + 0xe6, 0x24, 0xa8, 0x08, 0xe6, 0x17, 0xf6, 0x94, 0x87, 0x50, 0xb8, 0xa8, 0x49, 0xe6, 0xa7, 0x04, + 0xb9, 0x31, 0x86, 0xc8, 0xd2, 0x02, 0x19, 0xdf, 0x52, 0x08, 0x2c, 0x13, 0x66, 0x9a, 0xc7, 0xaa, + 0x76, 0x9a, 0x30, 0x92, 0xe9, 0x36, 0x31, 0xe4, 0x47, 0x36, 0x7d, 0x25, 0xca, 0x6f, 0x29, 0x58, + 0xac, 0xb3, 0x5e, 0xcf, 0xe0, 0x09, 0xdd, 0x6c, 0xc3, 0x5c, 0x8f, 0xe9, 0xb4, 0x30, 0x5f, 0x42, + 0xeb, 0x4b, 0x31, 0x19, 0x0a, 0x19, 0x2c, 0xef, 0x33, 0x9d, 0x12, 0x41, 0xc2, 0x4a, 0x84, 0xc6, + 0xbd, 0x99, 0x90, 0x4a, 0xbc, 0x0d, 0xd9, 0x5e, 0x5f, 0xb6, 0xd1, 0x42, 0x5a, 0x7c, 0xc4, 0xd1, + 0x87, 0x73, 0x5f, 0xa2, 0xc8, 0x08, 0xaf, 0x3c, 0x82, 0x39, 0xcf, 0x1d, 0xbe, 0x06, 0xf9, 0xfd, + 0xf6, 0x6e, 0xa3, 0xf3, 0xb4, 0x75, 0x78, 0xd0, 0xa8, 0x37, 0x1f, 0x35, 0x1b, 0xbb, 0xf9, 0x19, + 0x7c, 0x15, 0x16, 0x8f, 0xc8, 0x4e, 0xeb, 0x70, 0xa7, 0x7e, 0xd4, 0x6c, 0xb7, 0x76, 0x9e, 0xe4, + 0x11, 0x7e, 0x0d, 0xae, 0xb6, 0xda, 0xad, 0x4e, 0x78, 0x3b, 0x55, 0x7b, 0x1d, 0xae, 0x8d, 0x1f, + 0x0b, 0x97, 0x9a, 0x54, 0xe3, 0xcc, 0x51, 0x5e, 0x20, 0x58, 0x0a, 0xa2, 0x93, 0xb5, 0x6c, 0x41, + 0x3e, 0xf0, 0xdf, 0x71, 0xc4, 0x69, 0x0e, 0xda, 0xe6, 0xea, 0xe5, 0xb2, 0xfd, 0xde, 0xb5, 0xdc, + 0x0b, 0xad, 0x5d, 0xbc, 0x0a, 0x8b, 0x86, 0xa5, 0xd3, 0xaf, 0x3a, 0x7d, 0x5b, 0x57, 0x39, 0x75, + 0x0b, 0x73, 0x25, 0xb4, 0x3e, 0x4f, 0x16, 0xc4, 0xe6, 0x53, 0x7f, 0x4f, 0x51, 0x01, 0xef, 0x98, + 0x26, 0xd3, 0x54, 0x4e, 0x9b, 0xba, 0x9b, 0xb0, 0x74, 0x41, 0x53, 0x47, 0x89, 0x9a, 0x7a, 0x1d, + 0x56, 0x42, 0x2e, 0x64, 0xb8, 0xaf, 0x66, 0xe4, 0xf7, 0x14, 0x64, 0x82, 0x80, 0xf1, 0x7d, 0x48, + 0x1b, 0x96, 0x4b, 0x1d, 0x2e, 0x42, 0xca, 0x55, 0x6f, 0x5c, 0xd2, 0xd9, 0xf7, 0x66, 0x88, 0x04, + 0x7b, 0x34, 0x3f, 0x15, 0xe2, 0xcc, 0x4d, 0xa7, 0xf9, 0x60, 0x9f, 0x26, 0xbc, 0xa5, 0x13, 0xd2, + 0x84, 0xb7, 0x2a, 0xa4, 0x75, 0x6a, 0x52, 0x4e, 0x65, 0x63, 0x8c, 0x8d, 0xd0, 0xe3, 0xf8, 0x48, + 0xbc, 0x0a, 0x0b, 0xc7, 0xaa, 0x4b, 0x3b, 0xe7, 0xd4, 0x71, 0xbd, 0x73, 0xed, 0x65, 0x7e, 0x76, + 0x0f, 0x91, 0x9c, 0xb7, 0xfb, 0xcc, 0xdf, 0xac, 0xe5, 0x20, 0xcb, 0x6c, 0xea, 0x88, 0x54, 0xd4, + 0x6e, 0xc1, 0x0d, 0x8d, 0x59, 0x27, 0xa6, 0xa1, 0xf1, 0x8e, 0x4e, 0x39, 0x95, 0xc7, 0x8c, 0x3b, + 0x2a, 0xa7, 0xdd, 0x81, 0xf2, 0x3d, 0x82, 0xa5, 0xf0, 0x39, 0xc1, 0x1b, 0x30, 0x7b, 0x4a, 0x83, + 0x2e, 0x1f, 0x9f, 0x76, 0x0f, 0x84, 0x0b, 0x70, 0x25, 0x90, 0xe2, 0x65, 0x7a, 0x96, 0x04, 0x4b, + 0x7c, 0x07, 0xae, 0x4e, 0xf8, 0xa5, 0xba, 0x48, 0x6b, 0x86, 0xe4, 0x83, 0x17, 0xbb, 0x72, 0x5f, + 0xf9, 0x17, 0x41, 0x6e, 0x6c, 0xee, 0xe0, 0xcf, 0x20, 0x2f, 0xe6, 0x95, 0xc6, 0x2c, 0xd7, 0x70, + 0x39, 0xb5, 0xb4, 0x81, 0xf8, 0x84, 0x97, 0xaa, 0x9b, 0xd3, 0x66, 0x96, 0x78, 0xae, 0x8f, 0x78, + 0x7b, 0x33, 0x64, 0xd9, 0x09, 0x6f, 0x4d, 0x36, 0x87, 0x54, 0x44, 0x73, 0x50, 0xf6, 0x61, 0x79, + 0xc2, 0x12, 0x2e, 0xc1, 0x4d, 0xd2, 0xd8, 0xd9, 0xed, 0xd4, 0xdb, 0xad, 0xc3, 0xe6, 0xe1, 0x51, + 0xa3, 0x55, 0xff, 0x64, 0xe2, 0xb3, 0x07, 0x48, 0x1f, 0x1e, 0x91, 0x76, 0xeb, 0x71, 0x1e, 0xe1, + 0x05, 0xc8, 0x34, 0x9e, 0x35, 0x5a, 0x47, 0x4f, 0xc5, 0x67, 0x8e, 0x21, 0x3f, 0x16, 0x8c, 0x3f, + 0xd0, 0x7e, 0x4c, 0x01, 0xbe, 0xd8, 0xe2, 0x31, 0x01, 0x10, 0xc1, 0x7f, 0xe9, 0x18, 0x9c, 0xca, + 0xb9, 0xb6, 0x95, 0x70, 0x3e, 0x88, 0xe8, 0x3f, 0xf2, 0x88, 0x7b, 0x33, 0x24, 0xeb, 0x04, 0x0b, + 0xdc, 0x86, 0xac, 0x7f, 0x01, 0xb0, 0xcc, 0x60, 0xda, 0x6d, 0xbe, 0x8a, 0xc9, 0xb6, 0x65, 0x8a, + 0xd1, 0xec, 0xc8, 0xe7, 0xe2, 0xfb, 0x90, 0x1d, 0xba, 0xc2, 0x5b, 0x70, 0xcd, 0x76, 0xe8, 0xb9, + 0xc1, 0xfa, 0x6e, 0xe7, 0xe2, 0x64, 0x58, 0x09, 0xde, 0x8d, 0xd9, 0x2e, 0x02, 0x64, 0x02, 0xbb, + 0xb5, 0xb4, 0xdf, 0xe8, 0xab, 0x7f, 0xa7, 0x21, 0xbb, 0x1b, 0x88, 0xc1, 0xcf, 0x21, 0xed, 0xdf, + 0xf4, 0xb0, 0x12, 0xa9, 0x34, 0x74, 0x55, 0x2d, 0xae, 0x5e, 0x8a, 0x91, 0x23, 0xe8, 0xce, 0x77, + 0x7f, 0xfd, 0xf3, 0x53, 0x6a, 0x4d, 0x29, 0x79, 0x57, 0x5f, 0xd9, 0xad, 0xdc, 0xca, 0xd7, 0xa3, + 0x4e, 0xf6, 0xcd, 0x03, 0x53, 0x30, 0x1e, 0xa0, 0x0d, 0xfc, 0x03, 0x82, 0x4c, 0x70, 0xdd, 0xc0, + 0x6f, 0x47, 0x1f, 0xbb, 0xf0, 0x55, 0xae, 0xb8, 0x36, 0x05, 0x25, 0x65, 0xdc, 0x13, 0x32, 0x6e, + 0x2b, 0x4a, 0xbc, 0x0c, 0x47, 0x72, 0x3c, 0x21, 0xbf, 0x20, 0xc8, 0x4f, 0xce, 0x77, 0x7c, 0x37, + 0xd2, 0x55, 0xcc, 0xd5, 0xa4, 0x78, 0x2f, 0x21, 0x5a, 0x0a, 0xbc, 0x2f, 0x04, 0x56, 0x94, 0x8d, + 0x78, 0x81, 0xc7, 0x13, 0x5c, 0x4f, 0xe8, 0x73, 0x48, 0xfb, 0x13, 0x2b, 0xa6, 0x62, 0xa1, 0x61, + 0x1d, 0x53, 0xb1, 0xf0, 0xc8, 0x4b, 0x52, 0x31, 0x4d, 0x30, 0x86, 0x15, 0x93, 0xd7, 0x8e, 0xb8, + 0x8a, 0x85, 0x6f, 0x3a, 0x71, 0x15, 0x9b, 0xbc, 0xbb, 0x24, 0xa9, 0x98, 0xe4, 0x78, 0x42, 0x5e, + 0x22, 0xc8, 0x8d, 0x4d, 0x34, 0x1c, 0x7d, 0x77, 0xb9, 0x38, 0x56, 0x8b, 0xeb, 0xd3, 0x81, 0x52, + 0xd1, 0xa6, 0x50, 0xb4, 0xa1, 0xac, 0xc5, 0x2b, 0x52, 0x47, 0xb4, 0x07, 0x68, 0xa3, 0xf6, 0x02, + 0xc1, 0x75, 0x8d, 0xf5, 0xa2, 0x3c, 0xd4, 0x96, 0x86, 0x9f, 0xdd, 0x81, 0xf7, 0x2b, 0x78, 0x80, + 0x3e, 0x7d, 0x28, 0x61, 0x5d, 0x66, 0xaa, 0x56, 0xb7, 0xcc, 0x9c, 0x6e, 0xa5, 0x4b, 0x2d, 0xf1, + 0xa3, 0x58, 0xf1, 0x5f, 0xa9, 0xb6, 0xe1, 0x86, 0x7e, 0x26, 0xb7, 0x87, 0x8b, 0x5f, 0x53, 0x6f, + 0x3c, 0xf6, 0xe9, 0x75, 0x93, 0xf5, 0xf5, 0xf2, 0xd0, 0x7a, 0xf9, 0xd9, 0xd6, 0x71, 0x5a, 0x18, + 0x79, 0xe7, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x0f, 0xc7, 0x4b, 0x0a, 0x0f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1/entity.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1/entity.pb.go new file mode 100644 index 000000000..84ce59181 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1/entity.pb.go @@ -0,0 +1,762 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1/entity.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_type "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A partition ID identifies a grouping of entities. The grouping is always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +// +// Partition dimensions: +// +// - May be `""`. +// - Must be valid UTF-8 bytes. +// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` +// If the value of any dimension matches regex `__.*__`, the partition is +// reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented +// contexts. +// +// Foreign partition IDs (in which the project ID does +// not match the context project ID ) are discouraged. +// Reads and writes of foreign partition IDs may fail if the project is not in an active state. +type PartitionId struct { + // The ID of the project to which the entities belong. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // If not empty, the ID of the namespace to which the entities belong. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId" json:"namespace_id,omitempty"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} +func (*PartitionId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *PartitionId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PartitionId) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition ID or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's _ancestors_. + // + // An entity path is always fully complete: *all* of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. For example, + // the last path element of the key of `Mutation.insert` may have no + // identifier. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPath() []*Key_PathElement { + if m != nil { + return m.Path + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 encoded. + // Cannot be `""`. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The type of ID. + // + // Types that are valid to be assigned to IdType: + // *Key_PathElement_Id + // *Key_PathElement_Name + IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} +func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 0} } + +type isKey_PathElement_IdType interface { + isKey_PathElement_IdType() +} + +type Key_PathElement_Id struct { + Id int64 `protobuf:"varint,2,opt,name=id,oneof"` +} +type Key_PathElement_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,oneof"` +} + +func (*Key_PathElement_Id) isKey_PathElement_IdType() {} +func (*Key_PathElement_Name) isKey_PathElement_IdType() {} + +func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { + if m != nil { + return m.IdType + } + return nil +} + +func (m *Key_PathElement) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { + return x.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { + return x.Name + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ + (*Key_PathElement_Id)(nil), + (*Key_PathElement_Name)(nil), + } +} + +func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) + } + return nil +} + +func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key_PathElement) + switch tag { + case 2: // id_type.id + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.IdType = &Key_PathElement_Id{int64(x)} + return true, err + case 3: // id_type.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdType = &Key_PathElement_Name{x} + return true, err + default: + return false, nil + } +} + +func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An array value. +type ArrayValue struct { + // Values in the array. + // The order of this array may not be preserved if it contains a mix of + // indexed and unindexed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ArrayValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// A message that can hold any of the supported value types and associated +// metadata. +type Value struct { + // Must have a value set. + // + // Types that are valid to be assigned to ValueType: + // *Value_NullValue + // *Value_BooleanValue + // *Value_IntegerValue + // *Value_DoubleValue + // *Value_TimestampValue + // *Value_KeyValue + // *Value_StringValue + // *Value_BlobValue + // *Value_GeoPointValue + // *Value_EntityValue + // *Value_ArrayValue + ValueType isValue_ValueType `protobuf_oneof:"value_type"` + // The `meaning` field should only be populated for backwards compatibility. + Meaning int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` + // If the value should be excluded from all indexes including those defined + // explicitly. + ExcludeFromIndexes bool `protobuf:"varint,19,opt,name=exclude_from_indexes,json=excludeFromIndexes" json:"exclude_from_indexes,omitempty"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +type isValue_ValueType interface { + isValue_ValueType() +} + +type Value_NullValue struct { + NullValue google_protobuf1.NullValue `protobuf:"varint,11,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_BooleanValue struct { + BooleanValue bool `protobuf:"varint,1,opt,name=boolean_value,json=booleanValue,oneof"` +} +type Value_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,2,opt,name=integer_value,json=integerValue,oneof"` +} +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type Value_TimestampValue struct { + TimestampValue *google_protobuf2.Timestamp `protobuf:"bytes,10,opt,name=timestamp_value,json=timestampValue,oneof"` +} +type Value_KeyValue struct { + KeyValue *Key `protobuf:"bytes,5,opt,name=key_value,json=keyValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,17,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BlobValue struct { + BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value,json=blobValue,proto3,oneof"` +} +type Value_GeoPointValue struct { + GeoPointValue *google_type.LatLng `protobuf:"bytes,8,opt,name=geo_point_value,json=geoPointValue,oneof"` +} +type Value_EntityValue struct { + EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value,json=entityValue,oneof"` +} +type Value_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,9,opt,name=array_value,json=arrayValue,oneof"` +} + +func (*Value_NullValue) isValue_ValueType() {} +func (*Value_BooleanValue) isValue_ValueType() {} +func (*Value_IntegerValue) isValue_ValueType() {} +func (*Value_DoubleValue) isValue_ValueType() {} +func (*Value_TimestampValue) isValue_ValueType() {} +func (*Value_KeyValue) isValue_ValueType() {} +func (*Value_StringValue) isValue_ValueType() {} +func (*Value_BlobValue) isValue_ValueType() {} +func (*Value_GeoPointValue) isValue_ValueType() {} +func (*Value_EntityValue) isValue_ValueType() {} +func (*Value_ArrayValue) isValue_ValueType() {} + +func (m *Value) GetValueType() isValue_ValueType { + if m != nil { + return m.ValueType + } + return nil +} + +func (m *Value) GetNullValue() google_protobuf1.NullValue { + if x, ok := m.GetValueType().(*Value_NullValue); ok { + return x.NullValue + } + return google_protobuf1.NullValue_NULL_VALUE +} + +func (m *Value) GetBooleanValue() bool { + if x, ok := m.GetValueType().(*Value_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if x, ok := m.GetValueType().(*Value_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if x, ok := m.GetValueType().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampValue() *google_protobuf2.Timestamp { + if x, ok := m.GetValueType().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (m *Value) GetKeyValue() *Key { + if x, ok := m.GetValueType().(*Value_KeyValue); ok { + return x.KeyValue + } + return nil +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetValueType().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBlobValue() []byte { + if x, ok := m.GetValueType().(*Value_BlobValue); ok { + return x.BlobValue + } + return nil +} + +func (m *Value) GetGeoPointValue() *google_type.LatLng { + if x, ok := m.GetValueType().(*Value_GeoPointValue); ok { + return x.GeoPointValue + } + return nil +} + +func (m *Value) GetEntityValue() *Entity { + if x, ok := m.GetValueType().(*Value_EntityValue); ok { + return x.EntityValue + } + return nil +} + +func (m *Value) GetArrayValue() *ArrayValue { + if x, ok := m.GetValueType().(*Value_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *Value) GetMeaning() int32 { + if m != nil { + return m.Meaning + } + return 0 +} + +func (m *Value) GetExcludeFromIndexes() bool { + if m != nil { + return m.ExcludeFromIndexes + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_BooleanValue)(nil), + (*Value_IntegerValue)(nil), + (*Value_DoubleValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_KeyValue)(nil), + (*Value_StringValue)(nil), + (*Value_BlobValue)(nil), + (*Value_GeoPointValue)(nil), + (*Value_EntityValue)(nil), + (*Value_ArrayValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_IntegerValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *Value_TimestampValue: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampValue); err != nil { + return err + } + case *Value_KeyValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KeyValue); err != nil { + return err + } + case *Value_StringValue: + b.EncodeVarint(17<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BlobValue: + b.EncodeVarint(18<<3 | proto.WireBytes) + b.EncodeRawBytes(x.BlobValue) + case *Value_GeoPointValue: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GeoPointValue); err != nil { + return err + } + case *Value_EntityValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EntityValue); err != nil { + return err + } + case *Value_ArrayValue: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ArrayValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.ValueType has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 11: // value_type.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_NullValue{google_protobuf1.NullValue(x)} + return true, err + case 1: // value_type.boolean_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_BooleanValue{x != 0} + return true, err + case 2: // value_type.integer_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_IntegerValue{int64(x)} + return true, err + case 3: // value_type.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.ValueType = &Value_DoubleValue{math.Float64frombits(x)} + return true, err + case 10: // value_type.timestamp_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Timestamp) + err := b.DecodeMessage(msg) + m.ValueType = &Value_TimestampValue{msg} + return true, err + case 5: // value_type.key_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Key) + err := b.DecodeMessage(msg) + m.ValueType = &Value_KeyValue{msg} + return true, err + case 17: // value_type.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ValueType = &Value_StringValue{x} + return true, err + case 18: // value_type.blob_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ValueType = &Value_BlobValue{x} + return true, err + case 8: // value_type.geo_point_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.LatLng) + err := b.DecodeMessage(msg) + m.ValueType = &Value_GeoPointValue{msg} + return true, err + case 6: // value_type.entity_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.ValueType = &Value_EntityValue{msg} + return true, err + case 9: // value_type.array_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ArrayValue) + err := b.DecodeMessage(msg) + m.ValueType = &Value_ArrayValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Value_IntegerValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *Value_TimestampValue: + s := proto.Size(x.TimestampValue) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_KeyValue: + s := proto.Size(x.KeyValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_StringValue: + n += proto.SizeVarint(17<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BlobValue: + n += proto.SizeVarint(18<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.BlobValue))) + n += len(x.BlobValue) + case *Value_GeoPointValue: + s := proto.Size(x.GeoPointValue) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_EntityValue: + s := proto.Size(x.EntityValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ArrayValue: + s := proto.Size(x.ArrayValue) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Datastore data object. +// +// An entity is limited to 1 megabyte when stored. That _roughly_ +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +type Entity struct { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in `Value.entity_value` may have no key). + // An entity's kind is its key path's last element's kind, + // or null if it has no key. + Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The entity's properties. + // The map's keys are property names. + // A property name matching regex `__.*__` is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // The name cannot be `""`. + Properties map[string]*Value `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Entity) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetProperties() map[string]*Value { + if m != nil { + return m.Properties + } + return nil +} + +func init() { + proto.RegisterType((*PartitionId)(nil), "google.datastore.v1.PartitionId") + proto.RegisterType((*Key)(nil), "google.datastore.v1.Key") + proto.RegisterType((*Key_PathElement)(nil), "google.datastore.v1.Key.PathElement") + proto.RegisterType((*ArrayValue)(nil), "google.datastore.v1.ArrayValue") + proto.RegisterType((*Value)(nil), "google.datastore.v1.Value") + proto.RegisterType((*Entity)(nil), "google.datastore.v1.Entity") +} + +func init() { proto.RegisterFile("google/datastore/v1/entity.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 767 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xdf, 0x8e, 0xdb, 0x44, + 0x14, 0xc6, 0xed, 0x64, 0xb3, 0x5d, 0x1f, 0xbb, 0xbb, 0x65, 0xb6, 0x12, 0x26, 0x80, 0xd6, 0x04, + 0x90, 0x22, 0x90, 0xec, 0xee, 0x72, 0x41, 0x45, 0x41, 0x2a, 0x29, 0x01, 0x47, 0x5b, 0x41, 0x34, + 0xaa, 0x2a, 0xc1, 0x4d, 0x34, 0x89, 0xa7, 0xee, 0x10, 0x7b, 0xc6, 0xb2, 0xc7, 0xab, 0xfa, 0x96, + 0xe7, 0xe0, 0x09, 0x78, 0x24, 0xc4, 0xc3, 0xa0, 0xf9, 0x13, 0x67, 0x29, 0xe9, 0xde, 0x79, 0xce, + 0xf9, 0x7d, 0xc7, 0xdf, 0xf1, 0x39, 0x63, 0x88, 0x72, 0x21, 0xf2, 0x82, 0x26, 0x19, 0x91, 0xa4, + 0x91, 0xa2, 0xa6, 0xc9, 0xcd, 0x65, 0x42, 0xb9, 0x64, 0xb2, 0x8b, 0xab, 0x5a, 0x48, 0x81, 0xce, + 0x0d, 0x11, 0xf7, 0x44, 0x7c, 0x73, 0x39, 0xfe, 0xc8, 0xca, 0x48, 0xc5, 0x12, 0xc2, 0xb9, 0x90, + 0x44, 0x32, 0xc1, 0x1b, 0x23, 0xe9, 0xb3, 0xfa, 0xb4, 0x6e, 0x5f, 0x25, 0x8d, 0xac, 0xdb, 0x8d, + 0xb4, 0xd9, 0x8b, 0xb7, 0xb3, 0x92, 0x95, 0xb4, 0x91, 0xa4, 0xac, 0x2c, 0x10, 0x5a, 0x40, 0x76, + 0x15, 0x4d, 0x0a, 0x22, 0x0b, 0x9e, 0x9b, 0xcc, 0xe4, 0x17, 0xf0, 0x97, 0xa4, 0x96, 0x4c, 0xbd, + 0x6c, 0x91, 0xa1, 0x8f, 0x01, 0xaa, 0x5a, 0xfc, 0x4e, 0x37, 0x72, 0xc5, 0xb2, 0x70, 0x10, 0xb9, + 0x53, 0x0f, 0x7b, 0x36, 0xb2, 0xc8, 0xd0, 0x27, 0x10, 0x70, 0x52, 0xd2, 0xa6, 0x22, 0x1b, 0xaa, + 0x80, 0x23, 0x0d, 0xf8, 0x7d, 0x6c, 0x91, 0x4d, 0xfe, 0x76, 0x61, 0x78, 0x4d, 0x3b, 0xf4, 0x0c, + 0x82, 0x6a, 0x57, 0x58, 0xa1, 0x6e, 0xe4, 0x4e, 0xfd, 0xab, 0x28, 0x3e, 0xd0, 0x7b, 0x7c, 0xcb, + 0x01, 0xf6, 0xab, 0x5b, 0x76, 0x1e, 0xc3, 0x51, 0x45, 0xe4, 0xeb, 0x70, 0x10, 0x0d, 0xa7, 0xfe, + 0xd5, 0x67, 0x07, 0xc5, 0xd7, 0xb4, 0x8b, 0x97, 0x44, 0xbe, 0x9e, 0x17, 0xb4, 0xa4, 0x5c, 0x62, + 0xad, 0x18, 0xbf, 0x50, 0x7d, 0xf5, 0x41, 0x84, 0xe0, 0x68, 0xcb, 0xb8, 0x71, 0xe1, 0x61, 0xfd, + 0x8c, 0x1e, 0xc0, 0xc0, 0xf6, 0x38, 0x4c, 0x1d, 0x3c, 0x60, 0x19, 0x7a, 0x08, 0x47, 0xaa, 0x95, + 0x70, 0xa8, 0xa8, 0xd4, 0xc1, 0xfa, 0x34, 0xf3, 0xe0, 0x1e, 0xcb, 0x56, 0xea, 0xd3, 0x4d, 0x9e, + 0x02, 0x7c, 0x5f, 0xd7, 0xa4, 0x7b, 0x49, 0x8a, 0x96, 0xa2, 0x2b, 0x38, 0xbe, 0x51, 0x0f, 0x4d, + 0xe8, 0x6a, 0x7f, 0xe3, 0x83, 0xfe, 0x34, 0x8b, 0x2d, 0x39, 0xf9, 0x73, 0x04, 0x23, 0xa3, 0x7e, + 0x02, 0xc0, 0xdb, 0xa2, 0x58, 0xe9, 0x44, 0xe8, 0x47, 0xee, 0xf4, 0x74, 0x5f, 0x61, 0x37, 0xc9, + 0xf8, 0xe7, 0xb6, 0x28, 0x34, 0x9f, 0x3a, 0xd8, 0xe3, 0xbb, 0x03, 0xfa, 0x1c, 0xee, 0xaf, 0x85, + 0x28, 0x28, 0xe1, 0x56, 0xaf, 0x1a, 0x3b, 0x49, 0x1d, 0x1c, 0xd8, 0x70, 0x8f, 0x31, 0x2e, 0x69, + 0x4e, 0x6b, 0x8b, 0xed, 0xba, 0x0d, 0x6c, 0xd8, 0x60, 0x9f, 0x42, 0x90, 0x89, 0x76, 0x5d, 0x50, + 0x4b, 0xa9, 0xfe, 0xdd, 0xd4, 0xc1, 0xbe, 0x89, 0x1a, 0x68, 0x0e, 0x67, 0xfd, 0x5a, 0x59, 0x0e, + 0xf4, 0x4c, 0xff, 0x6f, 0xfa, 0xc5, 0x8e, 0x4b, 0x1d, 0x7c, 0xda, 0x8b, 0x4c, 0x99, 0xaf, 0xc1, + 0xdb, 0xd2, 0xce, 0x16, 0x18, 0xe9, 0x02, 0xe1, 0xbb, 0xe6, 0x9a, 0x3a, 0xf8, 0x64, 0x4b, 0xbb, + 0xde, 0x64, 0x23, 0x6b, 0xc6, 0x73, 0xab, 0x7d, 0xcf, 0x0e, 0xc9, 0x37, 0x51, 0x03, 0x5d, 0x00, + 0xac, 0x0b, 0xb1, 0xb6, 0x08, 0x8a, 0xdc, 0x69, 0xa0, 0x3e, 0x9c, 0x8a, 0x19, 0xe0, 0x3b, 0x38, + 0xcb, 0xa9, 0x58, 0x55, 0x82, 0x71, 0x69, 0xa9, 0x13, 0x6d, 0xe2, 0x7c, 0x67, 0x42, 0x0d, 0x3a, + 0x7e, 0x4e, 0xe4, 0x73, 0x9e, 0xa7, 0x0e, 0xbe, 0x9f, 0x53, 0xb1, 0x54, 0xb0, 0x91, 0x3f, 0x85, + 0xc0, 0x5c, 0x65, 0xab, 0x3d, 0xd6, 0xda, 0x0f, 0x0f, 0x36, 0x30, 0xd7, 0xa0, 0x72, 0x68, 0x24, + 0xa6, 0xc2, 0x0c, 0x7c, 0xa2, 0x56, 0xc8, 0x16, 0xf0, 0x74, 0x81, 0x8b, 0x83, 0x05, 0xf6, 0xab, + 0x96, 0x3a, 0x18, 0xc8, 0x7e, 0xf1, 0x42, 0xb8, 0x57, 0x52, 0xc2, 0x19, 0xcf, 0xc3, 0xd3, 0xc8, + 0x9d, 0x8e, 0xf0, 0xee, 0x88, 0x1e, 0xc1, 0x43, 0xfa, 0x66, 0x53, 0xb4, 0x19, 0x5d, 0xbd, 0xaa, + 0x45, 0xb9, 0x62, 0x3c, 0xa3, 0x6f, 0x68, 0x13, 0x9e, 0xab, 0xf5, 0xc0, 0xc8, 0xe6, 0x7e, 0xac, + 0x45, 0xb9, 0x30, 0x99, 0x59, 0x00, 0xa0, 0x9d, 0x98, 0x05, 0xff, 0xc7, 0x85, 0x63, 0xe3, 0x1b, + 0x7d, 0x01, 0xc3, 0x2d, 0xed, 0xec, 0xbd, 0x7d, 0xe7, 0x88, 0xb0, 0x82, 0xd0, 0xb5, 0xfe, 0x6d, + 0x54, 0xb4, 0x96, 0x8c, 0x36, 0xe1, 0x50, 0xdf, 0x86, 0x2f, 0xef, 0xf8, 0x28, 0xf1, 0xb2, 0xa7, + 0xe7, 0x5c, 0xd6, 0x1d, 0xbe, 0x25, 0x1f, 0xff, 0x0a, 0x67, 0x6f, 0xa5, 0xd1, 0x83, 0xbd, 0x17, + 0xcf, 0xbc, 0xf1, 0x11, 0x8c, 0xf6, 0x1b, 0x7d, 0xf7, 0xd5, 0x33, 0xe0, 0x37, 0x83, 0xc7, 0xee, + 0xec, 0x0f, 0x17, 0xde, 0xdf, 0x88, 0xf2, 0x10, 0x3c, 0xf3, 0x8d, 0xb5, 0xa5, 0x5a, 0xe2, 0xa5, + 0xfb, 0xdb, 0xb7, 0x96, 0xc9, 0x45, 0x41, 0x78, 0x1e, 0x8b, 0x3a, 0x4f, 0x72, 0xca, 0xf5, 0x8a, + 0x27, 0x26, 0x45, 0x2a, 0xd6, 0xfc, 0xe7, 0x2f, 0xff, 0xa4, 0x3f, 0xfc, 0x35, 0xf8, 0xe0, 0x27, + 0x23, 0x7f, 0x56, 0x88, 0x36, 0x8b, 0x7f, 0xe8, 0x5f, 0xf4, 0xf2, 0x72, 0x7d, 0xac, 0x8b, 0x7c, + 0xf5, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0x5b, 0xc5, 0xcd, 0x29, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1/query.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1/query.pb.go new file mode 100644 index 000000000..977e6d8af --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1/query.pb.go @@ -0,0 +1,969 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1/query.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies what data the 'entity' field contains. +// A `ResultType` is either implied (for example, in `LookupResponse.missing` +// from `datastore.proto`, it is always `KEY_ONLY`) or specified by context +// (for example, in message `QueryResultBatch`, field `entity_result_type` +// specifies a `ResultType` for all the values in field `entity_results`). +type EntityResult_ResultType int32 + +const ( + // Unspecified. This value is never used. + EntityResult_RESULT_TYPE_UNSPECIFIED EntityResult_ResultType = 0 + // The key and properties. + EntityResult_FULL EntityResult_ResultType = 1 + // A projected subset of properties. The entity may have no key. + EntityResult_PROJECTION EntityResult_ResultType = 2 + // Only the key. + EntityResult_KEY_ONLY EntityResult_ResultType = 3 +) + +var EntityResult_ResultType_name = map[int32]string{ + 0: "RESULT_TYPE_UNSPECIFIED", + 1: "FULL", + 2: "PROJECTION", + 3: "KEY_ONLY", +} +var EntityResult_ResultType_value = map[string]int32{ + "RESULT_TYPE_UNSPECIFIED": 0, + "FULL": 1, + "PROJECTION": 2, + "KEY_ONLY": 3, +} + +func (x EntityResult_ResultType) String() string { + return proto.EnumName(EntityResult_ResultType_name, int32(x)) +} +func (EntityResult_ResultType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +// The sort direction. +type PropertyOrder_Direction int32 + +const ( + // Unspecified. This value must not be used. + PropertyOrder_DIRECTION_UNSPECIFIED PropertyOrder_Direction = 0 + // Ascending. + PropertyOrder_ASCENDING PropertyOrder_Direction = 1 + // Descending. + PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +) + +var PropertyOrder_Direction_name = map[int32]string{ + 0: "DIRECTION_UNSPECIFIED", + 1: "ASCENDING", + 2: "DESCENDING", +} +var PropertyOrder_Direction_value = map[string]int32{ + "DIRECTION_UNSPECIFIED": 0, + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x PropertyOrder_Direction) String() string { + return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +} +func (PropertyOrder_Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{5, 0} } + +// A composite filter operator. +type CompositeFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + CompositeFilter_OPERATOR_UNSPECIFIED CompositeFilter_Operator = 0 + // The results are required to satisfy each of the combined filters. + CompositeFilter_AND CompositeFilter_Operator = 1 +) + +var CompositeFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "AND", +} +var CompositeFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "AND": 1, +} + +func (x CompositeFilter_Operator) String() string { + return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +} +func (CompositeFilter_Operator) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{7, 0} } + +// A property filter operator. +type PropertyFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + PropertyFilter_OPERATOR_UNSPECIFIED PropertyFilter_Operator = 0 + // Less than. + PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 + // Less than or equal. + PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 + // Greater than. + PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 + // Greater than or equal. + PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 + // Equal. + PropertyFilter_EQUAL PropertyFilter_Operator = 5 + // Has ancestor. + PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +) + +var PropertyFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 11: "HAS_ANCESTOR", +} +var PropertyFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "HAS_ANCESTOR": 11, +} + +func (x PropertyFilter_Operator) String() string { + return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +} +func (PropertyFilter_Operator) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{8, 0} } + +// The possible values for the `more_results` field. +type QueryResultBatch_MoreResultsType int32 + +const ( + // Unspecified. This value is never used. + QueryResultBatch_MORE_RESULTS_TYPE_UNSPECIFIED QueryResultBatch_MoreResultsType = 0 + // There may be additional batches to fetch from this query. + QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 + // The query is finished, but there may be more results after the limit. + QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 + // The query is finished, but there may be more results after the end + // cursor. + QueryResultBatch_MORE_RESULTS_AFTER_CURSOR QueryResultBatch_MoreResultsType = 4 + // The query is finished, and there are no more results. + QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +) + +var QueryResultBatch_MoreResultsType_name = map[int32]string{ + 0: "MORE_RESULTS_TYPE_UNSPECIFIED", + 1: "NOT_FINISHED", + 2: "MORE_RESULTS_AFTER_LIMIT", + 4: "MORE_RESULTS_AFTER_CURSOR", + 3: "NO_MORE_RESULTS", +} +var QueryResultBatch_MoreResultsType_value = map[string]int32{ + "MORE_RESULTS_TYPE_UNSPECIFIED": 0, + "NOT_FINISHED": 1, + "MORE_RESULTS_AFTER_LIMIT": 2, + "MORE_RESULTS_AFTER_CURSOR": 4, + "NO_MORE_RESULTS": 3, +} + +func (x QueryResultBatch_MoreResultsType) String() string { + return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +} +func (QueryResultBatch_MoreResultsType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{11, 0} +} + +// The result of fetching an entity from Datastore. +type EntityResult struct { + // The resulting entity. + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + // The version of the entity, a strictly positive number that monotonically + // increases with changes to the entity. + // + // This field is set for [`FULL`][google.datastore.v1.EntityResult.ResultType.FULL] entity + // results. + // + // For [missing][google.datastore.v1.LookupResponse.missing] entities in `LookupResponse`, this + // is the version of the snapshot that was used to look up the entity, and it + // is always set except for eventually consistent reads. + Version int64 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + // A cursor that points to the position after the result entity. + // Set only when the `EntityResult` is part of a `QueryResultBatch` message. + Cursor []byte `protobuf:"bytes,3,opt,name=cursor,proto3" json:"cursor,omitempty"` +} + +func (m *EntityResult) Reset() { *m = EntityResult{} } +func (m *EntityResult) String() string { return proto.CompactTextString(m) } +func (*EntityResult) ProtoMessage() {} +func (*EntityResult) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *EntityResult) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *EntityResult) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *EntityResult) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +// A query for entities. +type Query struct { + // The projection to return. Defaults to returning all properties. + Projection []*Projection `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` + // The kinds to query (if empty, returns entities of all kinds). + // Currently at most 1 kind may be specified. + Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` + // The filter to apply. + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // The order to apply to the query results (if empty, order is unspecified). + Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` + // The properties to make distinct. The query results will contain the first + // result for each distinct combination of values for the given properties + // (if empty, all results are returned). + DistinctOn []*PropertyReference `protobuf:"bytes,6,rep,name=distinct_on,json=distinctOn" json:"distinct_on,omitempty"` + // A starting point for the query results. Query cursors are + // returned in query result batches and + // [can only be used to continue the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets). + StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor,json=startCursor,proto3" json:"start_cursor,omitempty"` + // An ending point for the query results. Query cursors are + // returned in query result batches and + // [can only be used to limit the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets). + EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor,json=endCursor,proto3" json:"end_cursor,omitempty"` + // The number of results to skip. Applies before limit, but after all other + // constraints. Optional. Must be >= 0 if specified. + Offset int32 `protobuf:"varint,10,opt,name=offset" json:"offset,omitempty"` + // The maximum number of results to return. Applies after all other + // constraints. Optional. + // Unspecified is interpreted as no limit. + // Must be >= 0 if specified. + Limit *google_protobuf3.Int32Value `protobuf:"bytes,12,opt,name=limit" json:"limit,omitempty"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *Query) GetProjection() []*Projection { + if m != nil { + return m.Projection + } + return nil +} + +func (m *Query) GetKind() []*KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Query) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetOrder() []*PropertyOrder { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetDistinctOn() []*PropertyReference { + if m != nil { + return m.DistinctOn + } + return nil +} + +func (m *Query) GetStartCursor() []byte { + if m != nil { + return m.StartCursor + } + return nil +} + +func (m *Query) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *Query) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *Query) GetLimit() *google_protobuf3.Int32Value { + if m != nil { + return m.Limit + } + return nil +} + +// A representation of a kind. +type KindExpression struct { + // The name of the kind. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} +func (*KindExpression) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *KindExpression) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A reference to a property relative to the kind expressions. +type PropertyReference struct { + // The name of the property. + // If name includes "."s, it may be interpreted as a property name path. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} +func (*PropertyReference) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *PropertyReference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A representation of a property in a projection. +type Projection struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` +} + +func (m *Projection) Reset() { *m = Projection{} } +func (m *Projection) String() string { return proto.CompactTextString(m) } +func (*Projection) ProtoMessage() {} +func (*Projection) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *Projection) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +// The desired order for a specific property. +type PropertyOrder struct { + // The property to order by. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The direction to order by. Defaults to `ASCENDING`. + Direction PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=google.datastore.v1.PropertyOrder_Direction" json:"direction,omitempty"` +} + +func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +func (*PropertyOrder) ProtoMessage() {} +func (*PropertyOrder) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *PropertyOrder) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { + if m != nil { + return m.Direction + } + return PropertyOrder_DIRECTION_UNSPECIFIED +} + +// A holder for any type of filter. +type Filter struct { + // The type of filter. + // + // Types that are valid to be assigned to FilterType: + // *Filter_CompositeFilter + // *Filter_PropertyFilter + FilterType isFilter_FilterType `protobuf_oneof:"filter_type"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +type isFilter_FilterType interface { + isFilter_FilterType() +} + +type Filter_CompositeFilter struct { + CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter,json=compositeFilter,oneof"` +} +type Filter_PropertyFilter struct { + PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter,json=propertyFilter,oneof"` +} + +func (*Filter_CompositeFilter) isFilter_FilterType() {} +func (*Filter_PropertyFilter) isFilter_FilterType() {} + +func (m *Filter) GetFilterType() isFilter_FilterType { + if m != nil { + return m.FilterType + } + return nil +} + +func (m *Filter) GetCompositeFilter() *CompositeFilter { + if x, ok := m.GetFilterType().(*Filter_CompositeFilter); ok { + return x.CompositeFilter + } + return nil +} + +func (m *Filter) GetPropertyFilter() *PropertyFilter { + if x, ok := m.GetFilterType().(*Filter_PropertyFilter); ok { + return x.PropertyFilter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Filter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Filter_OneofMarshaler, _Filter_OneofUnmarshaler, _Filter_OneofSizer, []interface{}{ + (*Filter_CompositeFilter)(nil), + (*Filter_PropertyFilter)(nil), + } +} + +func _Filter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Filter) + // filter_type + switch x := m.FilterType.(type) { + case *Filter_CompositeFilter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CompositeFilter); err != nil { + return err + } + case *Filter_PropertyFilter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PropertyFilter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Filter.FilterType has unexpected type %T", x) + } + return nil +} + +func _Filter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Filter) + switch tag { + case 1: // filter_type.composite_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CompositeFilter) + err := b.DecodeMessage(msg) + m.FilterType = &Filter_CompositeFilter{msg} + return true, err + case 2: // filter_type.property_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PropertyFilter) + err := b.DecodeMessage(msg) + m.FilterType = &Filter_PropertyFilter{msg} + return true, err + default: + return false, nil + } +} + +func _Filter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Filter) + // filter_type + switch x := m.FilterType.(type) { + case *Filter_CompositeFilter: + s := proto.Size(x.CompositeFilter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Filter_PropertyFilter: + s := proto.Size(x.PropertyFilter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A filter that merges multiple other filters using the given operator. +type CompositeFilter struct { + // The operator for combining multiple filters. + Op CompositeFilter_Operator `protobuf:"varint,1,opt,name=op,enum=google.datastore.v1.CompositeFilter_Operator" json:"op,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filters []*Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"` +} + +func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeFilter) ProtoMessage() {} +func (*CompositeFilter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *CompositeFilter) GetOp() CompositeFilter_Operator { + if m != nil { + return m.Op + } + return CompositeFilter_OPERATOR_UNSPECIFIED +} + +func (m *CompositeFilter) GetFilters() []*Filter { + if m != nil { + return m.Filters + } + return nil +} + +// A filter on a specific property. +type PropertyFilter struct { + // The property to filter by. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The operator to filter by. + Op PropertyFilter_Operator `protobuf:"varint,2,opt,name=op,enum=google.datastore.v1.PropertyFilter_Operator" json:"op,omitempty"` + // The value to compare the property to. + Value *Value `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +func (*PropertyFilter) ProtoMessage() {} +func (*PropertyFilter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *PropertyFilter) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyFilter) GetOp() PropertyFilter_Operator { + if m != nil { + return m.Op + } + return PropertyFilter_OPERATOR_UNSPECIFIED +} + +func (m *PropertyFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A [GQL query](https://cloud.google.com/datastore/docs/apis/gql/gql_reference). +type GqlQuery struct { + // A string of the format described + // [here](https://cloud.google.com/datastore/docs/apis/gql/gql_reference). + QueryString string `protobuf:"bytes,1,opt,name=query_string,json=queryString" json:"query_string,omitempty"` + // When false, the query string must not contain any literals and instead must + // bind all values. For example, + // `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while + // `SELECT * FROM Kind WHERE a = @value` is. + AllowLiterals bool `protobuf:"varint,2,opt,name=allow_literals,json=allowLiterals" json:"allow_literals,omitempty"` + // For each non-reserved named binding site in the query string, there must be + // a named parameter with that name, but not necessarily the inverse. + // + // Key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex + // `__.*__`, and must not be `""`. + NamedBindings map[string]*GqlQueryParameter `protobuf:"bytes,5,rep,name=named_bindings,json=namedBindings" json:"named_bindings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Numbered binding site @1 references the first numbered parameter, + // effectively using 1-based indexing, rather than the usual 0. + // + // For each binding site numbered i in `query_string`, there must be an i-th + // numbered parameter. The inverse must also be true. + PositionalBindings []*GqlQueryParameter `protobuf:"bytes,4,rep,name=positional_bindings,json=positionalBindings" json:"positional_bindings,omitempty"` +} + +func (m *GqlQuery) Reset() { *m = GqlQuery{} } +func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +func (*GqlQuery) ProtoMessage() {} +func (*GqlQuery) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *GqlQuery) GetQueryString() string { + if m != nil { + return m.QueryString + } + return "" +} + +func (m *GqlQuery) GetAllowLiterals() bool { + if m != nil { + return m.AllowLiterals + } + return false +} + +func (m *GqlQuery) GetNamedBindings() map[string]*GqlQueryParameter { + if m != nil { + return m.NamedBindings + } + return nil +} + +func (m *GqlQuery) GetPositionalBindings() []*GqlQueryParameter { + if m != nil { + return m.PositionalBindings + } + return nil +} + +// A binding parameter for a GQL query. +type GqlQueryParameter struct { + // The type of parameter. + // + // Types that are valid to be assigned to ParameterType: + // *GqlQueryParameter_Value + // *GqlQueryParameter_Cursor + ParameterType isGqlQueryParameter_ParameterType `protobuf_oneof:"parameter_type"` +} + +func (m *GqlQueryParameter) Reset() { *m = GqlQueryParameter{} } +func (m *GqlQueryParameter) String() string { return proto.CompactTextString(m) } +func (*GqlQueryParameter) ProtoMessage() {} +func (*GqlQueryParameter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +type isGqlQueryParameter_ParameterType interface { + isGqlQueryParameter_ParameterType() +} + +type GqlQueryParameter_Value struct { + Value *Value `protobuf:"bytes,2,opt,name=value,oneof"` +} +type GqlQueryParameter_Cursor struct { + Cursor []byte `protobuf:"bytes,3,opt,name=cursor,proto3,oneof"` +} + +func (*GqlQueryParameter_Value) isGqlQueryParameter_ParameterType() {} +func (*GqlQueryParameter_Cursor) isGqlQueryParameter_ParameterType() {} + +func (m *GqlQueryParameter) GetParameterType() isGqlQueryParameter_ParameterType { + if m != nil { + return m.ParameterType + } + return nil +} + +func (m *GqlQueryParameter) GetValue() *Value { + if x, ok := m.GetParameterType().(*GqlQueryParameter_Value); ok { + return x.Value + } + return nil +} + +func (m *GqlQueryParameter) GetCursor() []byte { + if x, ok := m.GetParameterType().(*GqlQueryParameter_Cursor); ok { + return x.Cursor + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GqlQueryParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GqlQueryParameter_OneofMarshaler, _GqlQueryParameter_OneofUnmarshaler, _GqlQueryParameter_OneofSizer, []interface{}{ + (*GqlQueryParameter_Value)(nil), + (*GqlQueryParameter_Cursor)(nil), + } +} + +func _GqlQueryParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GqlQueryParameter) + // parameter_type + switch x := m.ParameterType.(type) { + case *GqlQueryParameter_Value: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Value); err != nil { + return err + } + case *GqlQueryParameter_Cursor: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Cursor) + case nil: + default: + return fmt.Errorf("GqlQueryParameter.ParameterType has unexpected type %T", x) + } + return nil +} + +func _GqlQueryParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GqlQueryParameter) + switch tag { + case 2: // parameter_type.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Value) + err := b.DecodeMessage(msg) + m.ParameterType = &GqlQueryParameter_Value{msg} + return true, err + case 3: // parameter_type.cursor + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ParameterType = &GqlQueryParameter_Cursor{x} + return true, err + default: + return false, nil + } +} + +func _GqlQueryParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GqlQueryParameter) + // parameter_type + switch x := m.ParameterType.(type) { + case *GqlQueryParameter_Value: + s := proto.Size(x.Value) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GqlQueryParameter_Cursor: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Cursor))) + n += len(x.Cursor) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A batch of results produced by a query. +type QueryResultBatch struct { + // The number of results skipped, typically because of an offset. + SkippedResults int32 `protobuf:"varint,6,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` + // A cursor that points to the position after the last skipped result. + // Will be set when `skipped_results` != 0. + SkippedCursor []byte `protobuf:"bytes,3,opt,name=skipped_cursor,json=skippedCursor,proto3" json:"skipped_cursor,omitempty"` + // The result type for every entity in `entity_results`. + EntityResultType EntityResult_ResultType `protobuf:"varint,1,opt,name=entity_result_type,json=entityResultType,enum=google.datastore.v1.EntityResult_ResultType" json:"entity_result_type,omitempty"` + // The results for this batch. + EntityResults []*EntityResult `protobuf:"bytes,2,rep,name=entity_results,json=entityResults" json:"entity_results,omitempty"` + // A cursor that points to the position after the last result in the batch. + EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor,json=endCursor,proto3" json:"end_cursor,omitempty"` + // The state of the query after the current batch. + MoreResults QueryResultBatch_MoreResultsType `protobuf:"varint,5,opt,name=more_results,json=moreResults,enum=google.datastore.v1.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` + // The version number of the snapshot this batch was returned from. + // This applies to the range of results from the query's `start_cursor` (or + // the beginning of the query if no cursor was given) to this batch's + // `end_cursor` (not the query's `end_cursor`). + // + // In a single transaction, subsequent query result batches for the same query + // can have a greater snapshot version number. Each batch's snapshot version + // is valid for all preceding batches. + // The value will be zero for eventually consistent queries. + SnapshotVersion int64 `protobuf:"varint,7,opt,name=snapshot_version,json=snapshotVersion" json:"snapshot_version,omitempty"` +} + +func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +func (*QueryResultBatch) ProtoMessage() {} +func (*QueryResultBatch) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func (m *QueryResultBatch) GetSkippedResults() int32 { + if m != nil { + return m.SkippedResults + } + return 0 +} + +func (m *QueryResultBatch) GetSkippedCursor() []byte { + if m != nil { + return m.SkippedCursor + } + return nil +} + +func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { + if m != nil { + return m.EntityResultType + } + return EntityResult_RESULT_TYPE_UNSPECIFIED +} + +func (m *QueryResultBatch) GetEntityResults() []*EntityResult { + if m != nil { + return m.EntityResults + } + return nil +} + +func (m *QueryResultBatch) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { + if m != nil { + return m.MoreResults + } + return QueryResultBatch_MORE_RESULTS_TYPE_UNSPECIFIED +} + +func (m *QueryResultBatch) GetSnapshotVersion() int64 { + if m != nil { + return m.SnapshotVersion + } + return 0 +} + +func init() { + proto.RegisterType((*EntityResult)(nil), "google.datastore.v1.EntityResult") + proto.RegisterType((*Query)(nil), "google.datastore.v1.Query") + proto.RegisterType((*KindExpression)(nil), "google.datastore.v1.KindExpression") + proto.RegisterType((*PropertyReference)(nil), "google.datastore.v1.PropertyReference") + proto.RegisterType((*Projection)(nil), "google.datastore.v1.Projection") + proto.RegisterType((*PropertyOrder)(nil), "google.datastore.v1.PropertyOrder") + proto.RegisterType((*Filter)(nil), "google.datastore.v1.Filter") + proto.RegisterType((*CompositeFilter)(nil), "google.datastore.v1.CompositeFilter") + proto.RegisterType((*PropertyFilter)(nil), "google.datastore.v1.PropertyFilter") + proto.RegisterType((*GqlQuery)(nil), "google.datastore.v1.GqlQuery") + proto.RegisterType((*GqlQueryParameter)(nil), "google.datastore.v1.GqlQueryParameter") + proto.RegisterType((*QueryResultBatch)(nil), "google.datastore.v1.QueryResultBatch") + proto.RegisterEnum("google.datastore.v1.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) + proto.RegisterEnum("google.datastore.v1.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) + proto.RegisterEnum("google.datastore.v1.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) + proto.RegisterEnum("google.datastore.v1.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) + proto.RegisterEnum("google.datastore.v1.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) +} + +func init() { proto.RegisterFile("google/datastore/v1/query.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 1301 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x92, 0xd3, 0xc6, + 0x12, 0x5e, 0xc9, 0x3f, 0x6b, 0xb7, 0xff, 0xc4, 0x70, 0x0e, 0x88, 0x05, 0x0e, 0xbb, 0x82, 0x73, + 0xd8, 0x53, 0x05, 0x36, 0x6b, 0x8a, 0x0a, 0x95, 0x90, 0x4a, 0xf9, 0x47, 0xbb, 0x36, 0x18, 0xcb, + 0x3b, 0xf6, 0x42, 0xe0, 0x46, 0x25, 0xec, 0x59, 0xa3, 0x20, 0x4b, 0x62, 0xa4, 0x5d, 0xb2, 0x97, + 0x79, 0x88, 0x54, 0xe5, 0x19, 0xf2, 0x00, 0xb9, 0xc8, 0x03, 0x24, 0x79, 0x88, 0x5c, 0xe7, 0x3a, + 0x8f, 0x90, 0xd2, 0xcc, 0xc8, 0x7f, 0x6b, 0xcc, 0x5e, 0x70, 0xa7, 0xe9, 0xf9, 0xbe, 0xaf, 0xa7, + 0x7b, 0x7a, 0x5a, 0x0d, 0xb7, 0xc6, 0x9e, 0x37, 0x76, 0x48, 0x65, 0x64, 0x85, 0x56, 0x10, 0x7a, + 0x94, 0x54, 0x4e, 0xf7, 0x2a, 0xef, 0x4f, 0x08, 0x3d, 0x2b, 0xfb, 0xd4, 0x0b, 0x3d, 0x74, 0x99, + 0x03, 0xca, 0x53, 0x40, 0xf9, 0x74, 0x6f, 0xeb, 0x86, 0x60, 0x59, 0xbe, 0x5d, 0xb1, 0x5c, 0xd7, + 0x0b, 0xad, 0xd0, 0xf6, 0xdc, 0x80, 0x53, 0xb6, 0xb6, 0x57, 0x69, 0x12, 0x37, 0xb4, 0x43, 0x21, + 0xba, 0xf5, 0x1f, 0x81, 0x60, 0xab, 0x37, 0x27, 0xc7, 0x95, 0x0f, 0xd4, 0xf2, 0x7d, 0x42, 0x63, + 0x05, 0x55, 0xec, 0x87, 0x67, 0x3e, 0xa9, 0x38, 0x56, 0xe8, 0xb8, 0x63, 0xbe, 0xa3, 0xfd, 0x21, + 0x41, 0x5e, 0x67, 0x52, 0x98, 0x04, 0x27, 0x4e, 0x88, 0x1e, 0x42, 0x9a, 0x4b, 0xab, 0xd2, 0xb6, + 0xb4, 0x9b, 0xab, 0x5e, 0x2f, 0xaf, 0x38, 0x70, 0x59, 0x50, 0x04, 0x14, 0xa9, 0xb0, 0x79, 0x4a, + 0x68, 0x60, 0x7b, 0xae, 0x9a, 0xdc, 0x96, 0x76, 0x13, 0x38, 0x5e, 0xa2, 0x2b, 0x90, 0x1e, 0x9e, + 0xd0, 0xc0, 0xa3, 0x6a, 0x62, 0x5b, 0xda, 0xcd, 0x63, 0xb1, 0xd2, 0x0e, 0x01, 0xb8, 0xc3, 0xc1, + 0x99, 0x4f, 0xd0, 0x75, 0xb8, 0x8a, 0xf5, 0xfe, 0x51, 0x67, 0x60, 0x0e, 0x5e, 0xf5, 0x74, 0xf3, + 0xa8, 0xdb, 0xef, 0xe9, 0x8d, 0xf6, 0x7e, 0x5b, 0x6f, 0x2a, 0x1b, 0x28, 0x03, 0xc9, 0xfd, 0xa3, + 0x4e, 0x47, 0x91, 0x50, 0x11, 0xa0, 0x87, 0x8d, 0xa7, 0x7a, 0x63, 0xd0, 0x36, 0xba, 0x8a, 0x8c, + 0xf2, 0x90, 0x79, 0xa6, 0xbf, 0x32, 0x8d, 0x6e, 0xe7, 0x95, 0x92, 0xd0, 0x7e, 0x4b, 0x40, 0xea, + 0x30, 0xca, 0x34, 0xfa, 0x06, 0xc0, 0xa7, 0xde, 0x77, 0x64, 0x18, 0x65, 0x51, 0x95, 0xb7, 0x13, + 0xbb, 0xb9, 0xea, 0xad, 0x95, 0x71, 0xf4, 0xa6, 0x30, 0x3c, 0x47, 0x41, 0x5f, 0x40, 0xf2, 0x9d, + 0xed, 0x8e, 0xd4, 0x04, 0xa3, 0xde, 0x5e, 0x49, 0x7d, 0x66, 0xbb, 0x23, 0xfd, 0x7b, 0x9f, 0x92, + 0x20, 0x0a, 0x14, 0x33, 0x42, 0x94, 0xbd, 0x63, 0xdb, 0x09, 0x09, 0x65, 0x79, 0xf8, 0x58, 0xf6, + 0xf6, 0x19, 0x04, 0x0b, 0x28, 0x7a, 0x0c, 0x29, 0x8f, 0x8e, 0x08, 0x55, 0x53, 0xcc, 0x9d, 0xf6, + 0xb1, 0x93, 0xfa, 0x84, 0x86, 0x67, 0x46, 0x84, 0xc4, 0x9c, 0x80, 0x0e, 0x20, 0x37, 0xb2, 0x83, + 0xd0, 0x76, 0x87, 0xa1, 0xe9, 0xb9, 0x6a, 0x9a, 0xf1, 0xff, 0xb7, 0x96, 0x8f, 0xc9, 0x31, 0xa1, + 0xc4, 0x1d, 0x12, 0x0c, 0x31, 0xd5, 0x70, 0xd1, 0x0e, 0xe4, 0x83, 0xd0, 0xa2, 0xa1, 0x29, 0x2e, + 0x6b, 0x93, 0x5d, 0x56, 0x8e, 0xd9, 0x1a, 0xcc, 0x84, 0x6e, 0x02, 0x10, 0x77, 0x14, 0x03, 0x32, + 0x0c, 0x90, 0x25, 0xee, 0x48, 0x6c, 0x5f, 0x81, 0xb4, 0x77, 0x7c, 0x1c, 0x90, 0x50, 0x85, 0x6d, + 0x69, 0x37, 0x85, 0xc5, 0x0a, 0xed, 0x41, 0xca, 0xb1, 0x27, 0x76, 0xa8, 0xe6, 0x17, 0x13, 0x12, + 0x97, 0x6a, 0xb9, 0xed, 0x86, 0x0f, 0xab, 0x2f, 0x2c, 0xe7, 0x84, 0x60, 0x8e, 0xd4, 0xee, 0x40, + 0x71, 0x31, 0xb9, 0x08, 0x41, 0xd2, 0xb5, 0x26, 0x84, 0x95, 0x64, 0x16, 0xb3, 0x6f, 0xed, 0x2e, + 0x5c, 0x3a, 0x17, 0xd3, 0x14, 0x28, 0xcf, 0x01, 0x7b, 0x00, 0xb3, 0x6b, 0x46, 0x75, 0xc8, 0xf8, + 0x82, 0x26, 0x2a, 0xfc, 0xa2, 0xf9, 0x9a, 0xf2, 0xb4, 0xbf, 0x24, 0x28, 0x2c, 0xdc, 0xc7, 0xe7, + 0x50, 0x45, 0x4f, 0x21, 0x3b, 0xb2, 0xe9, 0xb4, 0x68, 0xa5, 0xdd, 0x62, 0xf5, 0xde, 0xa7, 0x4b, + 0xa1, 0xdc, 0x8c, 0x39, 0x78, 0x46, 0xd7, 0x74, 0xc8, 0x4e, 0xed, 0xe8, 0x1a, 0xfc, 0xbb, 0xd9, + 0xc6, 0xfc, 0xd5, 0x2c, 0xbd, 0xad, 0x02, 0x64, 0x6b, 0xfd, 0x86, 0xde, 0x6d, 0xb6, 0xbb, 0x07, + 0xfc, 0x81, 0x35, 0xf5, 0xe9, 0x5a, 0xd6, 0x7e, 0x95, 0x20, 0xcd, 0x8b, 0x15, 0x1d, 0x82, 0x32, + 0xf4, 0x26, 0xbe, 0x17, 0xd8, 0x21, 0x31, 0x45, 0x8d, 0xf3, 0x48, 0xef, 0xac, 0x3c, 0x64, 0x23, + 0x06, 0x73, 0x7e, 0x6b, 0x03, 0x97, 0x86, 0x8b, 0x26, 0xd4, 0x85, 0x52, 0x1c, 0x7c, 0xac, 0x28, + 0x33, 0xc5, 0xdb, 0x6b, 0xc3, 0x9e, 0x0a, 0x16, 0xfd, 0x05, 0x4b, 0xbd, 0x00, 0x39, 0x2e, 0x63, + 0x46, 0x7d, 0x4e, 0xfb, 0x45, 0x82, 0xd2, 0xd2, 0x29, 0xd0, 0xd7, 0x20, 0x7b, 0x3e, 0x3b, 0x77, + 0xb1, 0x7a, 0xff, 0x22, 0xe7, 0x2e, 0x1b, 0x3e, 0xa1, 0x56, 0xe8, 0x51, 0x2c, 0x7b, 0x3e, 0x7a, + 0x04, 0x9b, 0xdc, 0x43, 0x20, 0xba, 0xca, 0xda, 0xf7, 0x1d, 0x63, 0xb5, 0xfb, 0x90, 0x89, 0x65, + 0x90, 0x0a, 0xff, 0x32, 0x7a, 0x3a, 0xae, 0x0d, 0x0c, 0xbc, 0x74, 0x17, 0x9b, 0x90, 0xa8, 0x75, + 0x9b, 0x8a, 0xa4, 0xfd, 0x29, 0x43, 0x71, 0x31, 0xd8, 0xcf, 0x52, 0x5f, 0x4f, 0x58, 0xec, 0x17, + 0x29, 0xac, 0x55, 0xa1, 0x3f, 0x80, 0xd4, 0x69, 0xf4, 0x48, 0x59, 0x1f, 0xcf, 0x55, 0xb7, 0x56, + 0x0a, 0x88, 0x67, 0xcc, 0x80, 0xda, 0x8f, 0xd2, 0x85, 0xc2, 0x2e, 0x40, 0xb6, 0xa3, 0xf7, 0xfb, + 0xe6, 0xa0, 0x55, 0xeb, 0x2a, 0x12, 0xba, 0x02, 0x68, 0xba, 0x34, 0x0d, 0x6c, 0xea, 0x87, 0x47, + 0xb5, 0x8e, 0x22, 0x23, 0x05, 0xf2, 0x07, 0x58, 0xaf, 0x0d, 0x74, 0xcc, 0x91, 0x89, 0xa8, 0xac, + 0xe7, 0x2d, 0x33, 0x70, 0x12, 0x65, 0x21, 0xc5, 0x3f, 0x53, 0x11, 0xaf, 0x55, 0xeb, 0x9b, 0xb5, + 0x6e, 0x43, 0xef, 0x0f, 0x0c, 0xac, 0xe4, 0xb4, 0xbf, 0x65, 0xc8, 0x1c, 0xbc, 0x77, 0xf8, 0xaf, + 0x62, 0x07, 0xf2, 0xec, 0xef, 0x6c, 0x06, 0x21, 0xb5, 0xdd, 0xb1, 0xe8, 0x30, 0x39, 0x66, 0xeb, + 0x33, 0x13, 0xfa, 0x2f, 0x14, 0x2d, 0xc7, 0xf1, 0x3e, 0x98, 0x8e, 0x1d, 0x12, 0x6a, 0x39, 0x01, + 0xcb, 0x61, 0x06, 0x17, 0x98, 0xb5, 0x23, 0x8c, 0xe8, 0x25, 0x14, 0xa3, 0x76, 0x33, 0x32, 0xdf, + 0xd8, 0xee, 0xc8, 0x76, 0xc7, 0x81, 0x68, 0xe7, 0x0f, 0x56, 0x66, 0x2a, 0x3e, 0x40, 0xb9, 0x1b, + 0x71, 0xea, 0x82, 0xa2, 0xbb, 0x21, 0x3d, 0xc3, 0x05, 0x77, 0xde, 0x86, 0x5e, 0xc2, 0x65, 0x56, + 0x91, 0xb6, 0xe7, 0x5a, 0xce, 0x4c, 0x3d, 0xb9, 0xa6, 0xd9, 0xc7, 0xea, 0x3d, 0x8b, 0x5a, 0x13, + 0x12, 0xd5, 0x22, 0x9a, 0x49, 0xc4, 0xc2, 0x5b, 0x6f, 0x01, 0x9d, 0xf7, 0x8e, 0x14, 0x48, 0xbc, + 0x23, 0x67, 0x22, 0x11, 0xd1, 0x27, 0x7a, 0x12, 0x5f, 0xbd, 0xbc, 0xa6, 0xf2, 0xce, 0xbb, 0xe4, + 0xa4, 0x2f, 0xe5, 0xc7, 0x92, 0x16, 0xc0, 0xa5, 0x73, 0xfb, 0xa8, 0xba, 0x28, 0xbb, 0xa6, 0xa2, + 0x5a, 0x1b, 0x42, 0x0c, 0xa9, 0x8b, 0xe3, 0x44, 0x6b, 0x23, 0x1e, 0x28, 0xea, 0x0a, 0x14, 0xfd, + 0x58, 0x9a, 0xbf, 0xff, 0xdf, 0x93, 0xa0, 0x30, 0x97, 0x7c, 0xd0, 0xa8, 0x5b, 0xe1, 0xf0, 0x2d, + 0xba, 0x0b, 0xa5, 0xe0, 0x9d, 0xed, 0xfb, 0x64, 0x64, 0x52, 0x66, 0x0e, 0xd4, 0x34, 0xfb, 0x5f, + 0x15, 0x85, 0x99, 0x83, 0x83, 0xe8, 0xd6, 0x63, 0xe0, 0xc2, 0x00, 0x53, 0x10, 0x56, 0xf1, 0xdb, + 0x7b, 0x0d, 0x88, 0xcf, 0x40, 0x42, 0x8e, 0xb9, 0x16, 0x0d, 0xe6, 0xde, 0xba, 0xd1, 0x89, 0xa1, + 0xcb, 0xb3, 0x19, 0x08, 0x2b, 0x64, 0x6e, 0x83, 0x4d, 0x45, 0x2d, 0x28, 0x2e, 0x68, 0xc7, 0x4d, + 0x67, 0xe7, 0x93, 0xba, 0xb8, 0x30, 0x2f, 0x16, 0x2c, 0xfd, 0xbb, 0x93, 0xcb, 0xff, 0xee, 0x6f, + 0x21, 0x3f, 0xf1, 0x28, 0x99, 0xba, 0x49, 0xb1, 0xe3, 0x3f, 0x5a, 0xe9, 0x66, 0x39, 0xa3, 0xe5, + 0xe7, 0x1e, 0x25, 0xc2, 0x0f, 0x8b, 0x23, 0x37, 0x99, 0x19, 0xd0, 0xff, 0x41, 0x09, 0x5c, 0xcb, + 0x0f, 0xde, 0x7a, 0xa1, 0x19, 0x4f, 0x88, 0x9b, 0x6c, 0x42, 0x2c, 0xc5, 0xf6, 0x17, 0xdc, 0xac, + 0xfd, 0x24, 0x41, 0x69, 0x49, 0x0b, 0xed, 0xc0, 0xcd, 0xe7, 0x06, 0xd6, 0x4d, 0x3e, 0x1c, 0xf6, + 0x57, 0x4d, 0x87, 0x0a, 0xe4, 0xbb, 0xc6, 0xc0, 0xdc, 0x6f, 0x77, 0xdb, 0xfd, 0x96, 0xde, 0x54, + 0x24, 0x74, 0x03, 0xd4, 0x05, 0x52, 0x6d, 0x3f, 0x6a, 0x11, 0x9d, 0xf6, 0xf3, 0xf6, 0x40, 0x91, + 0xd1, 0x4d, 0xb8, 0xb6, 0x62, 0xb7, 0x71, 0x84, 0xfb, 0x06, 0x56, 0x92, 0xe8, 0x32, 0x94, 0xba, + 0x86, 0x39, 0x8f, 0x50, 0x12, 0xf5, 0x1f, 0x24, 0xb8, 0x3a, 0xf4, 0x26, 0xab, 0xf2, 0x51, 0x07, + 0x5e, 0xd5, 0xd1, 0x34, 0xd3, 0x93, 0x5e, 0x3f, 0x11, 0x90, 0xb1, 0xe7, 0x58, 0xee, 0xb8, 0xec, + 0xd1, 0x71, 0x65, 0x4c, 0x5c, 0x36, 0xeb, 0x54, 0xf8, 0x96, 0xe5, 0xdb, 0xc1, 0xc2, 0x24, 0xff, + 0xd5, 0x74, 0xf1, 0xb3, 0x7c, 0xed, 0x80, 0xd3, 0x1b, 0x8e, 0x77, 0x32, 0x2a, 0x37, 0xa7, 0x7e, + 0x5e, 0xec, 0xbd, 0x49, 0x33, 0x91, 0x87, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x2f, 0x61, + 0x5a, 0x61, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/datastore.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/datastore.pb.go new file mode 100644 index 000000000..0e5191bdd --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/datastore.pb.go @@ -0,0 +1,1597 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1beta3/datastore.proto + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google/datastore/v1beta3/datastore.proto + google/datastore/v1beta3/entity.proto + google/datastore/v1beta3/query.proto + +It has these top-level messages: + LookupRequest + LookupResponse + RunQueryRequest + RunQueryResponse + BeginTransactionRequest + BeginTransactionResponse + RollbackRequest + RollbackResponse + CommitRequest + CommitResponse + AllocateIdsRequest + AllocateIdsResponse + Mutation + MutationResult + ReadOptions + TransactionOptions + PartitionId + Key + ArrayValue + Value + Entity + EntityResult + Query + KindExpression + PropertyReference + Projection + PropertyOrder + Filter + CompositeFilter + PropertyFilter + GqlQuery + GqlQueryParameter + QueryResultBatch +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The modes available for commits. +type CommitRequest_Mode int32 + +const ( + // Unspecified. This value must not be used. + CommitRequest_MODE_UNSPECIFIED CommitRequest_Mode = 0 + // Transactional: The mutations are either all applied, or none are applied. + // Learn about transactions [here](https://cloud.google.com/datastore/docs/concepts/transactions). + CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1 + // Non-transactional: The mutations may not apply as all or none. + CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2 +) + +var CommitRequest_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "TRANSACTIONAL", + 2: "NON_TRANSACTIONAL", +} +var CommitRequest_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "TRANSACTIONAL": 1, + "NON_TRANSACTIONAL": 2, +} + +func (x CommitRequest_Mode) String() string { + return proto.EnumName(CommitRequest_Mode_name, int32(x)) +} +func (CommitRequest_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +// The possible values for read consistencies. +type ReadOptions_ReadConsistency int32 + +const ( + // Unspecified. This value must not be used. + ReadOptions_READ_CONSISTENCY_UNSPECIFIED ReadOptions_ReadConsistency = 0 + // Strong consistency. + ReadOptions_STRONG ReadOptions_ReadConsistency = 1 + // Eventual consistency. + ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2 +) + +var ReadOptions_ReadConsistency_name = map[int32]string{ + 0: "READ_CONSISTENCY_UNSPECIFIED", + 1: "STRONG", + 2: "EVENTUAL", +} +var ReadOptions_ReadConsistency_value = map[string]int32{ + "READ_CONSISTENCY_UNSPECIFIED": 0, + "STRONG": 1, + "EVENTUAL": 2, +} + +func (x ReadOptions_ReadConsistency) String() string { + return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x)) +} +func (ReadOptions_ReadConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{14, 0} +} + +// The request for [Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup]. +type LookupRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The options for this lookup request. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options,json=readOptions" json:"read_options,omitempty"` + // Keys of entities to look up. + Keys []*Key `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LookupRequest) Reset() { *m = LookupRequest{} } +func (m *LookupRequest) String() string { return proto.CompactTextString(m) } +func (*LookupRequest) ProtoMessage() {} +func (*LookupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *LookupRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *LookupRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *LookupRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// The response for [Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup]. +type LookupResponse struct { + // Entities found as `ResultType.FULL` entities. The order of results in this + // field is undefined and has no relation to the order of the keys in the + // input. + Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + // Entities not found as `ResultType.KEY_ONLY` entities. The order of results + // in this field is undefined and has no relation to the order of the keys + // in the input. + Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"` + // A list of keys that were not looked up due to resource constraints. The + // order of results in this field is undefined and has no relation to the + // order of the keys in the input. + Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"` +} + +func (m *LookupResponse) Reset() { *m = LookupResponse{} } +func (m *LookupResponse) String() string { return proto.CompactTextString(m) } +func (*LookupResponse) ProtoMessage() {} +func (*LookupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *LookupResponse) GetFound() []*EntityResult { + if m != nil { + return m.Found + } + return nil +} + +func (m *LookupResponse) GetMissing() []*EntityResult { + if m != nil { + return m.Missing + } + return nil +} + +func (m *LookupResponse) GetDeferred() []*Key { + if m != nil { + return m.Deferred + } + return nil +} + +// The request for [Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery]. +type RunQueryRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Entities are partitioned into subsets, identified by a partition ID. + // Queries are scoped to a single partition. + // This partition ID is normalized with the standard default context + // partition ID. + PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The options for this query. + ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options,json=readOptions" json:"read_options,omitempty"` + // The type of query. + // + // Types that are valid to be assigned to QueryType: + // *RunQueryRequest_Query + // *RunQueryRequest_GqlQuery + QueryType isRunQueryRequest_QueryType `protobuf_oneof:"query_type"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} +func (*RunQueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isRunQueryRequest_QueryType interface { + isRunQueryRequest_QueryType() +} + +type RunQueryRequest_Query struct { + Query *Query `protobuf:"bytes,3,opt,name=query,oneof"` +} +type RunQueryRequest_GqlQuery struct { + GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query,json=gqlQuery,oneof"` +} + +func (*RunQueryRequest_Query) isRunQueryRequest_QueryType() {} +func (*RunQueryRequest_GqlQuery) isRunQueryRequest_QueryType() {} + +func (m *RunQueryRequest) GetQueryType() isRunQueryRequest_QueryType { + if m != nil { + return m.QueryType + } + return nil +} + +func (m *RunQueryRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RunQueryRequest) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *RunQueryRequest) GetReadOptions() *ReadOptions { + if m != nil { + return m.ReadOptions + } + return nil +} + +func (m *RunQueryRequest) GetQuery() *Query { + if x, ok := m.GetQueryType().(*RunQueryRequest_Query); ok { + return x.Query + } + return nil +} + +func (m *RunQueryRequest) GetGqlQuery() *GqlQuery { + if x, ok := m.GetQueryType().(*RunQueryRequest_GqlQuery); ok { + return x.GqlQuery + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RunQueryRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RunQueryRequest_OneofMarshaler, _RunQueryRequest_OneofUnmarshaler, _RunQueryRequest_OneofSizer, []interface{}{ + (*RunQueryRequest_Query)(nil), + (*RunQueryRequest_GqlQuery)(nil), + } +} + +func _RunQueryRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_Query: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *RunQueryRequest_GqlQuery: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GqlQuery); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RunQueryRequest.QueryType has unexpected type %T", x) + } + return nil +} + +func _RunQueryRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RunQueryRequest) + switch tag { + case 3: // query_type.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Query) + err := b.DecodeMessage(msg) + m.QueryType = &RunQueryRequest_Query{msg} + return true, err + case 7: // query_type.gql_query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GqlQuery) + err := b.DecodeMessage(msg) + m.QueryType = &RunQueryRequest_GqlQuery{msg} + return true, err + default: + return false, nil + } +} + +func _RunQueryRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RunQueryRequest_GqlQuery: + s := proto.Size(x.GqlQuery) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery]. +type RunQueryResponse struct { + // A batch of query results (always present). + Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"` + // The parsed form of the `GqlQuery` from the request, if it was set. + Query *Query `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} +func (*RunQueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RunQueryResponse) GetBatch() *QueryResultBatch { + if m != nil { + return m.Batch + } + return nil +} + +func (m *RunQueryResponse) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +// The request for [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. +type BeginTransactionRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Options for a new transaction. + TransactionOptions *TransactionOptions `protobuf:"bytes,10,opt,name=transaction_options,json=transactionOptions" json:"transaction_options,omitempty"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BeginTransactionRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *BeginTransactionRequest) GetTransactionOptions() *TransactionOptions { + if m != nil { + return m.TransactionOptions + } + return nil +} + +// The response for [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. +type BeginTransactionResponse struct { + // The transaction identifier (always present). + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} +func (*BeginTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for [Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback]. +type RollbackRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The transaction identifier, returned by a call to + // [Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *RollbackRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for [Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback]. +// (an empty message). +type RollbackResponse struct { +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} +func (*RollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +// The request for [Datastore.Commit][google.datastore.v1beta3.Datastore.Commit]. +type CommitRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The type of commit to perform. Defaults to `TRANSACTIONAL`. + Mode CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=google.datastore.v1beta3.CommitRequest_Mode" json:"mode,omitempty"` + // Must be set when mode is `TRANSACTIONAL`. + // + // Types that are valid to be assigned to TransactionSelector: + // *CommitRequest_Transaction + TransactionSelector isCommitRequest_TransactionSelector `protobuf_oneof:"transaction_selector"` + // The mutations to perform. + // + // When mode is `TRANSACTIONAL`, mutations affecting a single entity are + // applied in order. The following sequences of mutations affecting a single + // entity are not permitted in a single `Commit` request: + // + // - `insert` followed by `insert` + // - `update` followed by `insert` + // - `upsert` followed by `insert` + // - `delete` followed by `update` + // + // When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single + // entity. + Mutations []*Mutation `protobuf:"bytes,6,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type isCommitRequest_TransactionSelector interface { + isCommitRequest_TransactionSelector() +} + +type CommitRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3,oneof"` +} + +func (*CommitRequest_Transaction) isCommitRequest_TransactionSelector() {} + +func (m *CommitRequest) GetTransactionSelector() isCommitRequest_TransactionSelector { + if m != nil { + return m.TransactionSelector + } + return nil +} + +func (m *CommitRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CommitRequest) GetMode() CommitRequest_Mode { + if m != nil { + return m.Mode + } + return CommitRequest_MODE_UNSPECIFIED +} + +func (m *CommitRequest) GetTransaction() []byte { + if x, ok := m.GetTransactionSelector().(*CommitRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *CommitRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CommitRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CommitRequest_OneofMarshaler, _CommitRequest_OneofUnmarshaler, _CommitRequest_OneofSizer, []interface{}{ + (*CommitRequest_Transaction)(nil), + } +} + +func _CommitRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CommitRequest) + // transaction_selector + switch x := m.TransactionSelector.(type) { + case *CommitRequest_Transaction: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case nil: + default: + return fmt.Errorf("CommitRequest.TransactionSelector has unexpected type %T", x) + } + return nil +} + +func _CommitRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CommitRequest) + switch tag { + case 1: // transaction_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TransactionSelector = &CommitRequest_Transaction{x} + return true, err + default: + return false, nil + } +} + +func _CommitRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CommitRequest) + // transaction_selector + switch x := m.TransactionSelector.(type) { + case *CommitRequest_Transaction: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Datastore.Commit][google.datastore.v1beta3.Datastore.Commit]. +type CommitResponse struct { + // The result of performing the mutations. + // The i-th mutation result corresponds to the i-th mutation in the request. + MutationResults []*MutationResult `protobuf:"bytes,3,rep,name=mutation_results,json=mutationResults" json:"mutation_results,omitempty"` + // The number of index entries updated during the commit, or zero if none were + // updated. + IndexUpdates int32 `protobuf:"varint,4,opt,name=index_updates,json=indexUpdates" json:"index_updates,omitempty"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CommitResponse) GetMutationResults() []*MutationResult { + if m != nil { + return m.MutationResults + } + return nil +} + +func (m *CommitResponse) GetIndexUpdates() int32 { + if m != nil { + return m.IndexUpdates + } + return 0 +} + +// The request for [Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds]. +type AllocateIdsRequest struct { + // The ID of the project against which to make the request. + ProjectId string `protobuf:"bytes,8,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // A list of keys with incomplete key paths for which to allocate IDs. + // No key may be reserved/read-only. + Keys []*Key `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} +func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *AllocateIdsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *AllocateIdsRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// The response for [Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds]. +type AllocateIdsResponse struct { + // The keys specified in the request (in the same order), each with + // its key path completed with a newly allocated ID. + Keys []*Key `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} +func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *AllocateIdsResponse) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// A mutation to apply to an entity. +type Mutation struct { + // The mutation operation. + // + // For `insert`, `update`, and `upsert`: + // - The entity's key must not be reserved/read-only. + // - No property in the entity may have a reserved name, + // not even a property in an entity in a value. + // - No value in the entity may have meaning 18, + // not even a value in an entity in another value. + // + // Types that are valid to be assigned to Operation: + // *Mutation_Insert + // *Mutation_Update + // *Mutation_Upsert + // *Mutation_Delete + Operation isMutation_Operation `protobuf_oneof:"operation"` + // When set, the server will detect whether or not this mutation conflicts + // with the current version of the entity on the server. Conflicting mutations + // are not applied, and are marked as such in MutationResult. + // + // Types that are valid to be assigned to ConflictDetectionStrategy: + // *Mutation_BaseVersion + ConflictDetectionStrategy isMutation_ConflictDetectionStrategy `protobuf_oneof:"conflict_detection_strategy"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +type isMutation_Operation interface { + isMutation_Operation() +} +type isMutation_ConflictDetectionStrategy interface { + isMutation_ConflictDetectionStrategy() +} + +type Mutation_Insert struct { + Insert *Entity `protobuf:"bytes,4,opt,name=insert,oneof"` +} +type Mutation_Update struct { + Update *Entity `protobuf:"bytes,5,opt,name=update,oneof"` +} +type Mutation_Upsert struct { + Upsert *Entity `protobuf:"bytes,6,opt,name=upsert,oneof"` +} +type Mutation_Delete struct { + Delete *Key `protobuf:"bytes,7,opt,name=delete,oneof"` +} +type Mutation_BaseVersion struct { + BaseVersion int64 `protobuf:"varint,8,opt,name=base_version,json=baseVersion,oneof"` +} + +func (*Mutation_Insert) isMutation_Operation() {} +func (*Mutation_Update) isMutation_Operation() {} +func (*Mutation_Upsert) isMutation_Operation() {} +func (*Mutation_Delete) isMutation_Operation() {} +func (*Mutation_BaseVersion) isMutation_ConflictDetectionStrategy() {} + +func (m *Mutation) GetOperation() isMutation_Operation { + if m != nil { + return m.Operation + } + return nil +} +func (m *Mutation) GetConflictDetectionStrategy() isMutation_ConflictDetectionStrategy { + if m != nil { + return m.ConflictDetectionStrategy + } + return nil +} + +func (m *Mutation) GetInsert() *Entity { + if x, ok := m.GetOperation().(*Mutation_Insert); ok { + return x.Insert + } + return nil +} + +func (m *Mutation) GetUpdate() *Entity { + if x, ok := m.GetOperation().(*Mutation_Update); ok { + return x.Update + } + return nil +} + +func (m *Mutation) GetUpsert() *Entity { + if x, ok := m.GetOperation().(*Mutation_Upsert); ok { + return x.Upsert + } + return nil +} + +func (m *Mutation) GetDelete() *Key { + if x, ok := m.GetOperation().(*Mutation_Delete); ok { + return x.Delete + } + return nil +} + +func (m *Mutation) GetBaseVersion() int64 { + if x, ok := m.GetConflictDetectionStrategy().(*Mutation_BaseVersion); ok { + return x.BaseVersion + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_Insert)(nil), + (*Mutation_Update)(nil), + (*Mutation_Upsert)(nil), + (*Mutation_Delete)(nil), + (*Mutation_BaseVersion)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Insert); err != nil { + return err + } + case *Mutation_Update: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Mutation_Upsert: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Upsert); err != nil { + return err + } + case *Mutation_Delete: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Operation has unexpected type %T", x) + } + // conflict_detection_strategy + switch x := m.ConflictDetectionStrategy.(type) { + case *Mutation_BaseVersion: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.BaseVersion)) + case nil: + default: + return fmt.Errorf("Mutation.ConflictDetectionStrategy has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 4: // operation.insert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Insert{msg} + return true, err + case 5: // operation.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Update{msg} + return true, err + case 6: // operation.upsert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Upsert{msg} + return true, err + case 7: // operation.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Key) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Delete{msg} + return true, err + case 8: // conflict_detection_strategy.base_version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConflictDetectionStrategy = &Mutation_BaseVersion{int64(x)} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + s := proto.Size(x.Insert) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Upsert: + s := proto.Size(x.Upsert) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Delete: + s := proto.Size(x.Delete) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // conflict_detection_strategy + switch x := m.ConflictDetectionStrategy.(type) { + case *Mutation_BaseVersion: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.BaseVersion)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The result of applying a mutation. +type MutationResult struct { + // The automatically allocated key. + // Set only when the mutation allocated a key. + Key *Key `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + // The version of the entity on the server after processing the mutation. If + // the mutation doesn't change anything on the server, then the version will + // be the version of the current entity or, if no entity is present, a version + // that is strictly greater than the version of any previous entity and less + // than the version of any possible future entity. + Version int64 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + // Whether a conflict was detected for this mutation. Always false when a + // conflict detection strategy field is not set in the mutation. + ConflictDetected bool `protobuf:"varint,5,opt,name=conflict_detected,json=conflictDetected" json:"conflict_detected,omitempty"` +} + +func (m *MutationResult) Reset() { *m = MutationResult{} } +func (m *MutationResult) String() string { return proto.CompactTextString(m) } +func (*MutationResult) ProtoMessage() {} +func (*MutationResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *MutationResult) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *MutationResult) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *MutationResult) GetConflictDetected() bool { + if m != nil { + return m.ConflictDetected + } + return false +} + +// The options shared by read requests. +type ReadOptions struct { + // If not specified, lookups and ancestor queries default to + // `read_consistency`=`STRONG`, global queries default to + // `read_consistency`=`EVENTUAL`. + // + // Types that are valid to be assigned to ConsistencyType: + // *ReadOptions_ReadConsistency_ + // *ReadOptions_Transaction + ConsistencyType isReadOptions_ConsistencyType `protobuf_oneof:"consistency_type"` +} + +func (m *ReadOptions) Reset() { *m = ReadOptions{} } +func (m *ReadOptions) String() string { return proto.CompactTextString(m) } +func (*ReadOptions) ProtoMessage() {} +func (*ReadOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type isReadOptions_ConsistencyType interface { + isReadOptions_ConsistencyType() +} + +type ReadOptions_ReadConsistency_ struct { + ReadConsistency ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,json=readConsistency,enum=google.datastore.v1beta3.ReadOptions_ReadConsistency,oneof"` +} +type ReadOptions_Transaction struct { + Transaction []byte `protobuf:"bytes,2,opt,name=transaction,proto3,oneof"` +} + +func (*ReadOptions_ReadConsistency_) isReadOptions_ConsistencyType() {} +func (*ReadOptions_Transaction) isReadOptions_ConsistencyType() {} + +func (m *ReadOptions) GetConsistencyType() isReadOptions_ConsistencyType { + if m != nil { + return m.ConsistencyType + } + return nil +} + +func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency { + if x, ok := m.GetConsistencyType().(*ReadOptions_ReadConsistency_); ok { + return x.ReadConsistency + } + return ReadOptions_READ_CONSISTENCY_UNSPECIFIED +} + +func (m *ReadOptions) GetTransaction() []byte { + if x, ok := m.GetConsistencyType().(*ReadOptions_Transaction); ok { + return x.Transaction + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadOptions_OneofMarshaler, _ReadOptions_OneofUnmarshaler, _ReadOptions_OneofSizer, []interface{}{ + (*ReadOptions_ReadConsistency_)(nil), + (*ReadOptions_Transaction)(nil), + } +} + +func _ReadOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadOptions) + // consistency_type + switch x := m.ConsistencyType.(type) { + case *ReadOptions_ReadConsistency_: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.ReadConsistency)) + case *ReadOptions_Transaction: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case nil: + default: + return fmt.Errorf("ReadOptions.ConsistencyType has unexpected type %T", x) + } + return nil +} + +func _ReadOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadOptions) + switch tag { + case 1: // consistency_type.read_consistency + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConsistencyType = &ReadOptions_ReadConsistency_{ReadOptions_ReadConsistency(x)} + return true, err + case 2: // consistency_type.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencyType = &ReadOptions_Transaction{x} + return true, err + default: + return false, nil + } +} + +func _ReadOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadOptions) + // consistency_type + switch x := m.ConsistencyType.(type) { + case *ReadOptions_ReadConsistency_: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ReadConsistency)) + case *ReadOptions_Transaction: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for beginning a new transaction. +type TransactionOptions struct { + // The `mode` of the transaction, indicating whether write operations are + // supported. + // + // Types that are valid to be assigned to Mode: + // *TransactionOptions_ReadWrite_ + // *TransactionOptions_ReadOnly_ + Mode isTransactionOptions_Mode `protobuf_oneof:"mode"` +} + +func (m *TransactionOptions) Reset() { *m = TransactionOptions{} } +func (m *TransactionOptions) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions) ProtoMessage() {} +func (*TransactionOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type isTransactionOptions_Mode interface { + isTransactionOptions_Mode() +} + +type TransactionOptions_ReadWrite_ struct { + ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,oneof"` +} +type TransactionOptions_ReadOnly_ struct { + ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,oneof"` +} + +func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {} +func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {} + +func (m *TransactionOptions) GetMode() isTransactionOptions_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite { + if x, ok := m.GetMode().(*TransactionOptions_ReadWrite_); ok { + return x.ReadWrite + } + return nil +} + +func (m *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly { + if x, ok := m.GetMode().(*TransactionOptions_ReadOnly_); ok { + return x.ReadOnly + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_OneofMarshaler, _TransactionOptions_OneofUnmarshaler, _TransactionOptions_OneofSizer, []interface{}{ + (*TransactionOptions_ReadWrite_)(nil), + (*TransactionOptions_ReadOnly_)(nil), + } +} + +func _TransactionOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadWrite); err != nil { + return err + } + case *TransactionOptions_ReadOnly_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadOnly); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions.Mode has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions) + switch tag { + case 1: // mode.read_write + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadWrite) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadWrite_{msg} + return true, err + case 2: // mode.read_only + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadOnly) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadOnly_{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + s := proto.Size(x.ReadWrite) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_: + s := proto.Size(x.ReadOnly) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options specific to read / write transactions. +type TransactionOptions_ReadWrite struct { + // The transaction identifier of the transaction being retried. + PreviousTransaction []byte `protobuf:"bytes,1,opt,name=previous_transaction,json=previousTransaction,proto3" json:"previous_transaction,omitempty"` +} + +func (m *TransactionOptions_ReadWrite) Reset() { *m = TransactionOptions_ReadWrite{} } +func (m *TransactionOptions_ReadWrite) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadWrite) ProtoMessage() {} +func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15, 0} +} + +func (m *TransactionOptions_ReadWrite) GetPreviousTransaction() []byte { + if m != nil { + return m.PreviousTransaction + } + return nil +} + +// Options specific to read-only transactions. +type TransactionOptions_ReadOnly struct { +} + +func (m *TransactionOptions_ReadOnly) Reset() { *m = TransactionOptions_ReadOnly{} } +func (m *TransactionOptions_ReadOnly) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadOnly) ProtoMessage() {} +func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15, 1} } + +func init() { + proto.RegisterType((*LookupRequest)(nil), "google.datastore.v1beta3.LookupRequest") + proto.RegisterType((*LookupResponse)(nil), "google.datastore.v1beta3.LookupResponse") + proto.RegisterType((*RunQueryRequest)(nil), "google.datastore.v1beta3.RunQueryRequest") + proto.RegisterType((*RunQueryResponse)(nil), "google.datastore.v1beta3.RunQueryResponse") + proto.RegisterType((*BeginTransactionRequest)(nil), "google.datastore.v1beta3.BeginTransactionRequest") + proto.RegisterType((*BeginTransactionResponse)(nil), "google.datastore.v1beta3.BeginTransactionResponse") + proto.RegisterType((*RollbackRequest)(nil), "google.datastore.v1beta3.RollbackRequest") + proto.RegisterType((*RollbackResponse)(nil), "google.datastore.v1beta3.RollbackResponse") + proto.RegisterType((*CommitRequest)(nil), "google.datastore.v1beta3.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "google.datastore.v1beta3.CommitResponse") + proto.RegisterType((*AllocateIdsRequest)(nil), "google.datastore.v1beta3.AllocateIdsRequest") + proto.RegisterType((*AllocateIdsResponse)(nil), "google.datastore.v1beta3.AllocateIdsResponse") + proto.RegisterType((*Mutation)(nil), "google.datastore.v1beta3.Mutation") + proto.RegisterType((*MutationResult)(nil), "google.datastore.v1beta3.MutationResult") + proto.RegisterType((*ReadOptions)(nil), "google.datastore.v1beta3.ReadOptions") + proto.RegisterType((*TransactionOptions)(nil), "google.datastore.v1beta3.TransactionOptions") + proto.RegisterType((*TransactionOptions_ReadWrite)(nil), "google.datastore.v1beta3.TransactionOptions.ReadWrite") + proto.RegisterType((*TransactionOptions_ReadOnly)(nil), "google.datastore.v1beta3.TransactionOptions.ReadOnly") + proto.RegisterEnum("google.datastore.v1beta3.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value) + proto.RegisterEnum("google.datastore.v1beta3.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Datastore service + +type DatastoreClient interface { + // Looks up entities by key. + Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) + // Queries for entities. + RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (*RunQueryResponse, error) + // Begins a new transaction. + BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) + // Commits a transaction, optionally creating, deleting or modifying some + // entities. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*RollbackResponse, error) + // Allocates IDs for the given keys, which is useful for referencing an entity + // before it is inserted. + AllocateIds(ctx context.Context, in *AllocateIdsRequest, opts ...grpc.CallOption) (*AllocateIdsResponse, error) +} + +type datastoreClient struct { + cc *grpc.ClientConn +} + +func NewDatastoreClient(cc *grpc.ClientConn) DatastoreClient { + return &datastoreClient{cc} +} + +func (c *datastoreClient) Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) { + out := new(LookupResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/Lookup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (*RunQueryResponse, error) { + out := new(RunQueryResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/RunQuery", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) { + out := new(BeginTransactionResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/BeginTransaction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*RollbackResponse, error) { + out := new(RollbackResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/Rollback", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datastoreClient) AllocateIds(ctx context.Context, in *AllocateIdsRequest, opts ...grpc.CallOption) (*AllocateIdsResponse, error) { + out := new(AllocateIdsResponse) + err := grpc.Invoke(ctx, "/google.datastore.v1beta3.Datastore/AllocateIds", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Datastore service + +type DatastoreServer interface { + // Looks up entities by key. + Lookup(context.Context, *LookupRequest) (*LookupResponse, error) + // Queries for entities. + RunQuery(context.Context, *RunQueryRequest) (*RunQueryResponse, error) + // Begins a new transaction. + BeginTransaction(context.Context, *BeginTransactionRequest) (*BeginTransactionResponse, error) + // Commits a transaction, optionally creating, deleting or modifying some + // entities. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(context.Context, *RollbackRequest) (*RollbackResponse, error) + // Allocates IDs for the given keys, which is useful for referencing an entity + // before it is inserted. + AllocateIds(context.Context, *AllocateIdsRequest) (*AllocateIdsResponse, error) +} + +func RegisterDatastoreServer(s *grpc.Server, srv DatastoreServer) { + s.RegisterService(&_Datastore_serviceDesc, srv) +} + +func _Datastore_Lookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Lookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/Lookup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Lookup(ctx, req.(*LookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_RunQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunQueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).RunQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/RunQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).RunQuery(ctx, req.(*RunQueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BeginTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).BeginTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/BeginTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).Rollback(ctx, req.(*RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Datastore_AllocateIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateIdsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatastoreServer).AllocateIds(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.datastore.v1beta3.Datastore/AllocateIds", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatastoreServer).AllocateIds(ctx, req.(*AllocateIdsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Datastore_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.datastore.v1beta3.Datastore", + HandlerType: (*DatastoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lookup", + Handler: _Datastore_Lookup_Handler, + }, + { + MethodName: "RunQuery", + Handler: _Datastore_RunQuery_Handler, + }, + { + MethodName: "BeginTransaction", + Handler: _Datastore_BeginTransaction_Handler, + }, + { + MethodName: "Commit", + Handler: _Datastore_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Datastore_Rollback_Handler, + }, + { + MethodName: "AllocateIds", + Handler: _Datastore_AllocateIds_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/datastore/v1beta3/datastore.proto", +} + +func init() { proto.RegisterFile("google/datastore/v1beta3/datastore.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdf, 0x6f, 0x1b, 0xc5, + 0x13, 0xcf, 0x3a, 0x89, 0x63, 0x8f, 0xf3, 0xc3, 0xdd, 0xf6, 0xfb, 0xc5, 0x32, 0x2d, 0x58, 0x57, + 0x4a, 0x4d, 0x28, 0x36, 0x71, 0x09, 0x15, 0xa1, 0x42, 0xb1, 0x1d, 0xb7, 0xb6, 0x68, 0xec, 0xb0, + 0x71, 0x5b, 0x81, 0x84, 0xac, 0xb3, 0x6f, 0x63, 0x8e, 0x9c, 0x6f, 0x2f, 0x77, 0xeb, 0x42, 0x84, + 0x78, 0xe1, 0x09, 0x81, 0x78, 0x02, 0xd4, 0x67, 0x5e, 0xe1, 0x19, 0xf8, 0x17, 0x90, 0x90, 0x78, + 0xe1, 0x5f, 0xe0, 0x8f, 0xe0, 0x11, 0xdd, 0xde, 0x9e, 0xed, 0xb3, 0x7b, 0xf6, 0x05, 0xf1, 0x76, + 0x3b, 0x37, 0x9f, 0x99, 0xcf, 0xcc, 0xec, 0xcd, 0xcc, 0x41, 0xbe, 0xcf, 0x58, 0xdf, 0xa0, 0x45, + 0x4d, 0xe5, 0xaa, 0xc3, 0x99, 0x4d, 0x8b, 0x4f, 0x76, 0xba, 0x94, 0xab, 0xb7, 0xc7, 0x92, 0x82, + 0x65, 0x33, 0xce, 0x70, 0xc6, 0xd3, 0x2c, 0x8c, 0xe5, 0x52, 0x33, 0x7b, 0x55, 0xda, 0x50, 0x2d, + 0xbd, 0xa8, 0x9a, 0x26, 0xe3, 0x2a, 0xd7, 0x99, 0xe9, 0x78, 0xb8, 0xec, 0x8d, 0x50, 0x0f, 0xd4, + 0xe4, 0x3a, 0x3f, 0x97, 0x6a, 0x2f, 0x85, 0xaa, 0x9d, 0x0d, 0xa9, 0x2d, 0xb5, 0x94, 0x9f, 0x10, + 0x6c, 0x3c, 0x60, 0xec, 0x74, 0x68, 0x11, 0x7a, 0x36, 0xa4, 0x0e, 0xc7, 0xd7, 0x00, 0x2c, 0x9b, + 0x7d, 0x4c, 0x7b, 0xbc, 0xa3, 0x6b, 0x99, 0x44, 0x0e, 0xe5, 0x93, 0x24, 0x29, 0x25, 0x0d, 0x0d, + 0xd7, 0x61, 0xdd, 0xa6, 0xaa, 0xd6, 0x61, 0x96, 0xe0, 0x94, 0x41, 0x39, 0x94, 0x4f, 0x95, 0x6e, + 0x14, 0xc2, 0x82, 0x29, 0x10, 0xaa, 0x6a, 0x2d, 0x4f, 0x99, 0xa4, 0xec, 0xf1, 0x01, 0xef, 0xc0, + 0xca, 0x29, 0x3d, 0x77, 0x32, 0xcb, 0xb9, 0xe5, 0x7c, 0xaa, 0x74, 0x2d, 0xdc, 0xc2, 0xbb, 0xf4, + 0x9c, 0x08, 0x55, 0xe5, 0x77, 0x04, 0x9b, 0x3e, 0x5b, 0xc7, 0x62, 0xa6, 0x43, 0xf1, 0x5d, 0x58, + 0x3d, 0x61, 0x43, 0x53, 0xcb, 0x20, 0x61, 0xe6, 0xe5, 0x70, 0x33, 0x35, 0x91, 0x1d, 0x42, 0x9d, + 0xa1, 0xc1, 0x89, 0x07, 0xc2, 0xfb, 0xb0, 0x36, 0xd0, 0x1d, 0x47, 0x37, 0xfb, 0x99, 0xd8, 0x85, + 0xf0, 0x3e, 0x0c, 0xbf, 0x05, 0x09, 0x8d, 0x9e, 0x50, 0xdb, 0xa6, 0x5a, 0xb4, 0x48, 0x46, 0xea, + 0xca, 0x1f, 0x31, 0xd8, 0x22, 0x43, 0xf3, 0x3d, 0xb7, 0x1c, 0xd1, 0xb3, 0x6f, 0xa9, 0x36, 0xd7, + 0xdd, 0x0c, 0xba, 0x0a, 0xb1, 0x45, 0xd9, 0x3f, 0xf2, 0xb5, 0x1b, 0x1a, 0x49, 0x59, 0xe3, 0xc3, + 0x7f, 0x58, 0xc7, 0x3b, 0xb0, 0x2a, 0x6e, 0x54, 0x66, 0x59, 0x98, 0x78, 0x31, 0xdc, 0x84, 0x88, + 0xb4, 0xbe, 0x44, 0x3c, 0x7d, 0x5c, 0x86, 0x64, 0xff, 0xcc, 0xe8, 0x78, 0xe0, 0x35, 0x01, 0x56, + 0xc2, 0xc1, 0xf7, 0xcf, 0x0c, 0x1f, 0x9f, 0xe8, 0xcb, 0xe7, 0xca, 0x3a, 0x80, 0x80, 0x77, 0xf8, + 0xb9, 0x45, 0x95, 0xaf, 0x11, 0xa4, 0xc7, 0x09, 0x95, 0x17, 0x64, 0x1f, 0x56, 0xbb, 0x2a, 0xef, + 0x7d, 0x24, 0x23, 0xdc, 0x5e, 0x40, 0xcf, 0xab, 0x6f, 0xc5, 0x45, 0x10, 0x0f, 0x88, 0x77, 0xfd, + 0x00, 0x63, 0x91, 0x02, 0x94, 0xe1, 0x29, 0x4f, 0x11, 0x3c, 0x57, 0xa1, 0x7d, 0xdd, 0x6c, 0xdb, + 0xaa, 0xe9, 0xa8, 0x3d, 0x37, 0x5b, 0x11, 0xcb, 0xfc, 0x21, 0x5c, 0xe6, 0x63, 0xd0, 0xa8, 0x46, + 0x20, 0xfc, 0xdf, 0x0a, 0xf7, 0x3f, 0xe1, 0xc9, 0x2f, 0x15, 0xe6, 0x33, 0x32, 0xe5, 0x2e, 0x64, + 0x66, 0x89, 0xc9, 0x74, 0xe5, 0x20, 0x35, 0x81, 0x10, 0x49, 0x5b, 0x27, 0x93, 0x22, 0x85, 0xc0, + 0x16, 0x61, 0x86, 0xd1, 0x55, 0x7b, 0xa7, 0x11, 0xc3, 0x59, 0x6c, 0x13, 0x43, 0x7a, 0x6c, 0xd3, + 0x63, 0xa2, 0xfc, 0x12, 0x83, 0x8d, 0x2a, 0x1b, 0x0c, 0x74, 0x1e, 0xd1, 0xcd, 0x3e, 0xac, 0x0c, + 0x98, 0x46, 0x33, 0xab, 0x39, 0x94, 0xdf, 0x9c, 0x97, 0xa6, 0x80, 0xd5, 0xc2, 0x21, 0xd3, 0x28, + 0x11, 0x48, 0xac, 0x3c, 0x83, 0x68, 0x7d, 0x29, 0x40, 0x15, 0xef, 0x43, 0x72, 0x30, 0x94, 0x1d, + 0x39, 0x13, 0x17, 0x5f, 0xfc, 0x9c, 0x5b, 0x7b, 0x28, 0x55, 0xc9, 0x18, 0xa4, 0xdc, 0x83, 0x15, + 0xd7, 0x27, 0xbe, 0x02, 0xe9, 0xc3, 0xd6, 0x41, 0xad, 0xf3, 0xb0, 0x79, 0x7c, 0x54, 0xab, 0x36, + 0xee, 0x35, 0x6a, 0x07, 0xe9, 0x25, 0x7c, 0x09, 0x36, 0xda, 0xa4, 0xdc, 0x3c, 0x2e, 0x57, 0xdb, + 0x8d, 0x56, 0xb3, 0xfc, 0x20, 0x8d, 0xf0, 0xff, 0xe0, 0x52, 0xb3, 0xd5, 0xec, 0x04, 0xc5, 0xb1, + 0xca, 0xff, 0xe1, 0xca, 0xe4, 0x2d, 0x71, 0xa8, 0x41, 0x7b, 0x9c, 0xd9, 0xca, 0x57, 0x08, 0x36, + 0xfd, 0x10, 0x65, 0x55, 0x8f, 0x21, 0xed, 0xfb, 0xef, 0xd8, 0xe2, 0x86, 0xfb, 0x7d, 0x37, 0x1f, + 0x81, 0xbb, 0xd7, 0xf2, 0xb6, 0x06, 0x81, 0xb3, 0x83, 0xaf, 0xc3, 0x86, 0x6e, 0x6a, 0xf4, 0xd3, + 0xce, 0xd0, 0xd2, 0x54, 0x4e, 0x9d, 0xcc, 0x4a, 0x0e, 0xe5, 0x57, 0xc9, 0xba, 0x10, 0x3e, 0xf4, + 0x64, 0xca, 0x09, 0xe0, 0xb2, 0x61, 0xb0, 0x9e, 0xca, 0x69, 0x43, 0x73, 0x22, 0x56, 0xd2, 0x1f, + 0x0d, 0x28, 0xfa, 0x68, 0xa8, 0xc3, 0xe5, 0x80, 0x1f, 0x19, 0xf8, 0xbf, 0xb0, 0xf4, 0x5b, 0x0c, + 0x12, 0x7e, 0xe8, 0x78, 0x0f, 0xe2, 0xba, 0xe9, 0x50, 0x9b, 0x8b, 0xe0, 0x52, 0xa5, 0xdc, 0xa2, + 0xf9, 0x50, 0x5f, 0x22, 0x12, 0xe1, 0x62, 0xbd, 0xcc, 0x88, 0x1b, 0x19, 0x11, 0xeb, 0x21, 0x3c, + 0xac, 0xf0, 0x1b, 0xbf, 0x08, 0x56, 0xf8, 0xbd, 0x03, 0x71, 0x8d, 0x1a, 0x94, 0x53, 0xd9, 0x54, + 0xe7, 0x47, 0xed, 0x02, 0x3d, 0x75, 0x7c, 0x1d, 0xd6, 0xbb, 0xaa, 0x43, 0x3b, 0x4f, 0xa8, 0xed, + 0xb8, 0xf7, 0xdf, 0xad, 0xcb, 0x72, 0x1d, 0x91, 0x94, 0x2b, 0x7d, 0xe4, 0x09, 0x2b, 0x29, 0x48, + 0x32, 0x8b, 0xda, 0x22, 0x3d, 0x95, 0x6b, 0xf0, 0x7c, 0x8f, 0x99, 0x27, 0x86, 0xde, 0xe3, 0x1d, + 0x8d, 0x72, 0x2a, 0x6f, 0x22, 0xb7, 0x55, 0x4e, 0xfb, 0xe7, 0xca, 0x97, 0x08, 0x36, 0x83, 0xb7, + 0x08, 0x17, 0x61, 0xf9, 0x94, 0xfa, 0xb3, 0x62, 0x41, 0x3d, 0x5c, 0x4d, 0x9c, 0x81, 0x35, 0x9f, + 0x8f, 0x5b, 0x82, 0x65, 0xe2, 0x1f, 0xf1, 0xab, 0x70, 0x69, 0xca, 0x39, 0xd5, 0x44, 0xaa, 0x13, + 0x24, 0xed, 0xbf, 0x38, 0x90, 0x72, 0xe5, 0x6f, 0x04, 0xa9, 0x89, 0x11, 0x86, 0xbb, 0x90, 0x16, + 0xf3, 0xaf, 0xc7, 0x4c, 0x47, 0x77, 0x38, 0x35, 0x7b, 0xe7, 0xe2, 0x7b, 0xdf, 0x2c, 0xed, 0x46, + 0x9a, 0x81, 0xe2, 0xb9, 0x3a, 0x06, 0xd7, 0x97, 0xc8, 0x96, 0x1d, 0x14, 0x4d, 0xb7, 0x93, 0xd8, + 0x33, 0xda, 0x89, 0x72, 0x08, 0x5b, 0x53, 0x96, 0x70, 0x0e, 0xae, 0x92, 0x5a, 0xf9, 0xa0, 0x53, + 0x6d, 0x35, 0x8f, 0x1b, 0xc7, 0xed, 0x5a, 0xb3, 0xfa, 0xfe, 0x54, 0x8f, 0x00, 0x88, 0x1f, 0xb7, + 0x49, 0xab, 0x79, 0x3f, 0x8d, 0xf0, 0x3a, 0x24, 0x6a, 0x8f, 0x6a, 0xcd, 0xf6, 0x43, 0xd1, 0x13, + 0x30, 0xa4, 0x27, 0x22, 0xf2, 0xc6, 0xe2, 0xd3, 0x18, 0xe0, 0xd9, 0xc9, 0x80, 0x1f, 0x03, 0x88, + 0x0c, 0x7c, 0x62, 0xeb, 0x9c, 0xca, 0xe9, 0xf8, 0xe6, 0x45, 0x66, 0x8b, 0x48, 0xc1, 0x63, 0x17, + 0x5d, 0x5f, 0x22, 0x49, 0xdb, 0x3f, 0xe0, 0x36, 0x24, 0xbd, 0xd5, 0xc2, 0x34, 0xfc, 0x99, 0xb9, + 0x7b, 0x61, 0xbb, 0x2d, 0xd3, 0x10, 0xa3, 0xde, 0x96, 0xcf, 0xd9, 0x77, 0x20, 0x39, 0xf2, 0x87, + 0x77, 0xe0, 0x8a, 0x65, 0xd3, 0x27, 0x3a, 0x1b, 0x3a, 0x9d, 0xd9, 0xd1, 0x72, 0xd9, 0x7f, 0x37, + 0x61, 0x3b, 0x0b, 0x90, 0xf0, 0xed, 0x56, 0xe2, 0xde, 0xa4, 0x28, 0xfd, 0xba, 0x06, 0xc9, 0x03, + 0x9f, 0x11, 0xfe, 0x06, 0x41, 0xdc, 0xdb, 0x2e, 0xf1, 0xcd, 0x70, 0xbe, 0x81, 0x6d, 0x39, 0x9b, + 0x5f, 0xac, 0x28, 0xc7, 0xd9, 0xeb, 0x5f, 0xfc, 0xf9, 0xd7, 0xb7, 0xb1, 0x6d, 0xe5, 0xc6, 0x68, + 0x0f, 0x97, 0xfd, 0xce, 0x29, 0x7e, 0x36, 0xee, 0x85, 0x9f, 0xef, 0x19, 0x02, 0xb6, 0x87, 0xb6, + 0xf1, 0xf7, 0x08, 0x12, 0xfe, 0x3a, 0x83, 0x5f, 0x99, 0x73, 0x2b, 0x83, 0x3b, 0x64, 0x76, 0x3b, + 0x8a, 0xaa, 0x64, 0x55, 0x12, 0xac, 0x6e, 0x29, 0x37, 0x17, 0xb0, 0xb2, 0x25, 0xd0, 0xe5, 0xf5, + 0x33, 0x82, 0xf4, 0xf4, 0xfe, 0x80, 0x77, 0xc2, 0x9d, 0x86, 0x2c, 0x41, 0xd9, 0xd2, 0x45, 0x20, + 0x92, 0xef, 0x9e, 0xe0, 0xfb, 0x86, 0x52, 0x5c, 0xc0, 0xb7, 0x3b, 0x65, 0xc0, 0xe5, 0xed, 0xd6, + 0xd7, 0x9b, 0x8b, 0xf3, 0xea, 0x1b, 0x58, 0x0e, 0xe6, 0xd5, 0x37, 0x38, 0x62, 0x23, 0xd7, 0xb7, + 0x27, 0x60, 0xa3, 0xfa, 0xca, 0xad, 0x67, 0x6e, 0x7d, 0x83, 0xdb, 0xd6, 0xdc, 0xfa, 0x4e, 0x2f, + 0x51, 0x91, 0xeb, 0x2b, 0x81, 0x2e, 0xaf, 0x1f, 0x10, 0xa4, 0x26, 0x66, 0x29, 0x9e, 0xb3, 0x49, + 0xcd, 0x8e, 0xf6, 0xec, 0x6b, 0x11, 0xb5, 0x25, 0xc1, 0x5d, 0x41, 0xb0, 0xa8, 0x6c, 0x2f, 0x20, + 0xa8, 0x8e, 0xb1, 0x7b, 0x68, 0xbb, 0xf2, 0x1d, 0x82, 0xab, 0x3d, 0x36, 0x08, 0xf5, 0x55, 0xd9, + 0x1c, 0x7d, 0xd7, 0x47, 0xee, 0x8f, 0xee, 0x11, 0xfa, 0xa0, 0x2c, 0x75, 0xfb, 0xcc, 0x50, 0xcd, + 0x7e, 0x81, 0xd9, 0xfd, 0x62, 0x9f, 0x9a, 0xe2, 0x37, 0xb8, 0xe8, 0xbd, 0x52, 0x2d, 0xdd, 0x99, + 0xfd, 0x5f, 0x7e, 0x7b, 0x24, 0xf9, 0x31, 0xf6, 0xc2, 0x7d, 0xcf, 0x46, 0xd5, 0x60, 0x43, 0xad, + 0x30, 0x72, 0x51, 0x78, 0xb4, 0x53, 0x71, 0x55, 0xbb, 0x71, 0x61, 0xee, 0xf6, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xfe, 0xa6, 0x17, 0x82, 0x06, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/entity.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/entity.pb.go new file mode 100644 index 000000000..c44916df6 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/entity.pb.go @@ -0,0 +1,763 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1beta3/entity.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_type "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A partition ID identifies a grouping of entities. The grouping is always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +// +// Partition dimensions: +// +// - May be `""`. +// - Must be valid UTF-8 bytes. +// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` +// If the value of any dimension matches regex `__.*__`, the partition is +// reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented +// contexts. +// +// Foreign partition IDs (in which the project ID does +// not match the context project ID ) are discouraged. +// Reads and writes of foreign partition IDs may fail if the project is not in an active state. +type PartitionId struct { + // The ID of the project to which the entities belong. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // If not empty, the ID of the namespace to which the entities belong. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId" json:"namespace_id,omitempty"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} +func (*PartitionId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *PartitionId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PartitionId) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition ID or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's _ancestors_. + // + // An entity path is always fully complete: *all* of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. For example, + // the last path element of the key of `Mutation.insert` may have no + // identifier. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPath() []*Key_PathElement { + if m != nil { + return m.Path + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 encoded. + // Cannot be `""`. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The type of ID. + // + // Types that are valid to be assigned to IdType: + // *Key_PathElement_Id + // *Key_PathElement_Name + IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} +func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 0} } + +type isKey_PathElement_IdType interface { + isKey_PathElement_IdType() +} + +type Key_PathElement_Id struct { + Id int64 `protobuf:"varint,2,opt,name=id,oneof"` +} +type Key_PathElement_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,oneof"` +} + +func (*Key_PathElement_Id) isKey_PathElement_IdType() {} +func (*Key_PathElement_Name) isKey_PathElement_IdType() {} + +func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { + if m != nil { + return m.IdType + } + return nil +} + +func (m *Key_PathElement) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { + return x.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { + return x.Name + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ + (*Key_PathElement_Id)(nil), + (*Key_PathElement_Name)(nil), + } +} + +func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) + } + return nil +} + +func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key_PathElement) + switch tag { + case 2: // id_type.id + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.IdType = &Key_PathElement_Id{int64(x)} + return true, err + case 3: // id_type.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdType = &Key_PathElement_Name{x} + return true, err + default: + return false, nil + } +} + +func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An array value. +type ArrayValue struct { + // Values in the array. + // The order of this array may not be preserved if it contains a mix of + // indexed and unindexed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ArrayValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// A message that can hold any of the supported value types and associated +// metadata. +type Value struct { + // Must have a value set. + // + // Types that are valid to be assigned to ValueType: + // *Value_NullValue + // *Value_BooleanValue + // *Value_IntegerValue + // *Value_DoubleValue + // *Value_TimestampValue + // *Value_KeyValue + // *Value_StringValue + // *Value_BlobValue + // *Value_GeoPointValue + // *Value_EntityValue + // *Value_ArrayValue + ValueType isValue_ValueType `protobuf_oneof:"value_type"` + // The `meaning` field should only be populated for backwards compatibility. + Meaning int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"` + // If the value should be excluded from all indexes including those defined + // explicitly. + ExcludeFromIndexes bool `protobuf:"varint,19,opt,name=exclude_from_indexes,json=excludeFromIndexes" json:"exclude_from_indexes,omitempty"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +type isValue_ValueType interface { + isValue_ValueType() +} + +type Value_NullValue struct { + NullValue google_protobuf1.NullValue `protobuf:"varint,11,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_BooleanValue struct { + BooleanValue bool `protobuf:"varint,1,opt,name=boolean_value,json=booleanValue,oneof"` +} +type Value_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,2,opt,name=integer_value,json=integerValue,oneof"` +} +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type Value_TimestampValue struct { + TimestampValue *google_protobuf2.Timestamp `protobuf:"bytes,10,opt,name=timestamp_value,json=timestampValue,oneof"` +} +type Value_KeyValue struct { + KeyValue *Key `protobuf:"bytes,5,opt,name=key_value,json=keyValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,17,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BlobValue struct { + BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value,json=blobValue,proto3,oneof"` +} +type Value_GeoPointValue struct { + GeoPointValue *google_type.LatLng `protobuf:"bytes,8,opt,name=geo_point_value,json=geoPointValue,oneof"` +} +type Value_EntityValue struct { + EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value,json=entityValue,oneof"` +} +type Value_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,9,opt,name=array_value,json=arrayValue,oneof"` +} + +func (*Value_NullValue) isValue_ValueType() {} +func (*Value_BooleanValue) isValue_ValueType() {} +func (*Value_IntegerValue) isValue_ValueType() {} +func (*Value_DoubleValue) isValue_ValueType() {} +func (*Value_TimestampValue) isValue_ValueType() {} +func (*Value_KeyValue) isValue_ValueType() {} +func (*Value_StringValue) isValue_ValueType() {} +func (*Value_BlobValue) isValue_ValueType() {} +func (*Value_GeoPointValue) isValue_ValueType() {} +func (*Value_EntityValue) isValue_ValueType() {} +func (*Value_ArrayValue) isValue_ValueType() {} + +func (m *Value) GetValueType() isValue_ValueType { + if m != nil { + return m.ValueType + } + return nil +} + +func (m *Value) GetNullValue() google_protobuf1.NullValue { + if x, ok := m.GetValueType().(*Value_NullValue); ok { + return x.NullValue + } + return google_protobuf1.NullValue_NULL_VALUE +} + +func (m *Value) GetBooleanValue() bool { + if x, ok := m.GetValueType().(*Value_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if x, ok := m.GetValueType().(*Value_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if x, ok := m.GetValueType().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampValue() *google_protobuf2.Timestamp { + if x, ok := m.GetValueType().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (m *Value) GetKeyValue() *Key { + if x, ok := m.GetValueType().(*Value_KeyValue); ok { + return x.KeyValue + } + return nil +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetValueType().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBlobValue() []byte { + if x, ok := m.GetValueType().(*Value_BlobValue); ok { + return x.BlobValue + } + return nil +} + +func (m *Value) GetGeoPointValue() *google_type.LatLng { + if x, ok := m.GetValueType().(*Value_GeoPointValue); ok { + return x.GeoPointValue + } + return nil +} + +func (m *Value) GetEntityValue() *Entity { + if x, ok := m.GetValueType().(*Value_EntityValue); ok { + return x.EntityValue + } + return nil +} + +func (m *Value) GetArrayValue() *ArrayValue { + if x, ok := m.GetValueType().(*Value_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *Value) GetMeaning() int32 { + if m != nil { + return m.Meaning + } + return 0 +} + +func (m *Value) GetExcludeFromIndexes() bool { + if m != nil { + return m.ExcludeFromIndexes + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_BooleanValue)(nil), + (*Value_IntegerValue)(nil), + (*Value_DoubleValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_KeyValue)(nil), + (*Value_StringValue)(nil), + (*Value_BlobValue)(nil), + (*Value_GeoPointValue)(nil), + (*Value_EntityValue)(nil), + (*Value_ArrayValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_IntegerValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *Value_TimestampValue: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampValue); err != nil { + return err + } + case *Value_KeyValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KeyValue); err != nil { + return err + } + case *Value_StringValue: + b.EncodeVarint(17<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BlobValue: + b.EncodeVarint(18<<3 | proto.WireBytes) + b.EncodeRawBytes(x.BlobValue) + case *Value_GeoPointValue: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GeoPointValue); err != nil { + return err + } + case *Value_EntityValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EntityValue); err != nil { + return err + } + case *Value_ArrayValue: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ArrayValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.ValueType has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 11: // value_type.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_NullValue{google_protobuf1.NullValue(x)} + return true, err + case 1: // value_type.boolean_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_BooleanValue{x != 0} + return true, err + case 2: // value_type.integer_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_IntegerValue{int64(x)} + return true, err + case 3: // value_type.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.ValueType = &Value_DoubleValue{math.Float64frombits(x)} + return true, err + case 10: // value_type.timestamp_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Timestamp) + err := b.DecodeMessage(msg) + m.ValueType = &Value_TimestampValue{msg} + return true, err + case 5: // value_type.key_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Key) + err := b.DecodeMessage(msg) + m.ValueType = &Value_KeyValue{msg} + return true, err + case 17: // value_type.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ValueType = &Value_StringValue{x} + return true, err + case 18: // value_type.blob_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ValueType = &Value_BlobValue{x} + return true, err + case 8: // value_type.geo_point_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.LatLng) + err := b.DecodeMessage(msg) + m.ValueType = &Value_GeoPointValue{msg} + return true, err + case 6: // value_type.entity_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Entity) + err := b.DecodeMessage(msg) + m.ValueType = &Value_EntityValue{msg} + return true, err + case 9: // value_type.array_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ArrayValue) + err := b.DecodeMessage(msg) + m.ValueType = &Value_ArrayValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Value_IntegerValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *Value_TimestampValue: + s := proto.Size(x.TimestampValue) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_KeyValue: + s := proto.Size(x.KeyValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_StringValue: + n += proto.SizeVarint(17<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BlobValue: + n += proto.SizeVarint(18<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.BlobValue))) + n += len(x.BlobValue) + case *Value_GeoPointValue: + s := proto.Size(x.GeoPointValue) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_EntityValue: + s := proto.Size(x.EntityValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ArrayValue: + s := proto.Size(x.ArrayValue) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Datastore data object. +// +// An entity is limited to 1 megabyte when stored. That _roughly_ +// corresponds to a limit of 1 megabyte for the serialized form of this +// message. +type Entity struct { + // The entity's key. + // + // An entity must have a key, unless otherwise documented (for example, + // an entity in `Value.entity_value` may have no key). + // An entity's kind is its key path's last element's kind, + // or null if it has no key. + Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The entity's properties. + // The map's keys are property names. + // A property name matching regex `__.*__` is reserved. + // A reserved property name is forbidden in certain documented contexts. + // The name must not contain more than 500 characters. + // The name cannot be `""`. + Properties map[string]*Value `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Entity) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +func (m *Entity) GetProperties() map[string]*Value { + if m != nil { + return m.Properties + } + return nil +} + +func init() { + proto.RegisterType((*PartitionId)(nil), "google.datastore.v1beta3.PartitionId") + proto.RegisterType((*Key)(nil), "google.datastore.v1beta3.Key") + proto.RegisterType((*Key_PathElement)(nil), "google.datastore.v1beta3.Key.PathElement") + proto.RegisterType((*ArrayValue)(nil), "google.datastore.v1beta3.ArrayValue") + proto.RegisterType((*Value)(nil), "google.datastore.v1beta3.Value") + proto.RegisterType((*Entity)(nil), "google.datastore.v1beta3.Entity") +} + +func init() { proto.RegisterFile("google/datastore/v1beta3/entity.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 776 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x8e, 0xdb, 0x36, + 0x10, 0xc6, 0x25, 0x7b, 0xed, 0xac, 0x46, 0xca, 0x6e, 0xca, 0xe4, 0x20, 0x18, 0x49, 0xd7, 0x75, + 0xbb, 0x80, 0x7b, 0x91, 0x92, 0x0d, 0x8a, 0x16, 0x4d, 0x73, 0x88, 0x5b, 0x37, 0x32, 0x12, 0xb4, + 0x06, 0x11, 0xe4, 0xd0, 0x43, 0x0d, 0xda, 0x62, 0x14, 0xd6, 0x12, 0x29, 0x48, 0x54, 0x10, 0x3d, + 0x46, 0x5f, 0xa3, 0x7d, 0xad, 0xde, 0xfa, 0x12, 0x05, 0xff, 0x48, 0x5e, 0x24, 0xf0, 0x36, 0x37, + 0x71, 0xe6, 0x37, 0x1f, 0x3f, 0x72, 0x86, 0x36, 0x5c, 0x66, 0x42, 0x64, 0x39, 0x8d, 0x53, 0x22, + 0x49, 0x2d, 0x45, 0x45, 0xe3, 0x77, 0x8f, 0xb6, 0x54, 0x92, 0xc7, 0x31, 0xe5, 0x92, 0xc9, 0x36, + 0x2a, 0x2b, 0x21, 0x05, 0x0a, 0x0d, 0x16, 0xf5, 0x58, 0x64, 0xb1, 0xc9, 0x7d, 0x2b, 0x40, 0x4a, + 0x16, 0x13, 0xce, 0x85, 0x24, 0x92, 0x09, 0x5e, 0x9b, 0xba, 0x3e, 0xab, 0x57, 0xdb, 0xe6, 0x4d, + 0x5c, 0xcb, 0xaa, 0xd9, 0x49, 0x9b, 0xbd, 0xf8, 0x30, 0x2b, 0x59, 0x41, 0x6b, 0x49, 0x8a, 0xd2, + 0x02, 0x76, 0xdb, 0x58, 0xb6, 0x25, 0x8d, 0x73, 0x22, 0x73, 0x9e, 0x99, 0xcc, 0xec, 0x57, 0xf0, + 0xd7, 0xa4, 0x92, 0x4c, 0x6d, 0xb6, 0x4a, 0xd1, 0x03, 0x80, 0xb2, 0x12, 0x7f, 0xd0, 0x9d, 0xdc, + 0xb0, 0x34, 0x1c, 0x4c, 0xdd, 0xb9, 0x87, 0x3d, 0x1b, 0x59, 0xa5, 0xe8, 0x0b, 0x08, 0x38, 0x29, + 0x68, 0x5d, 0x92, 0x1d, 0x55, 0xc0, 0x89, 0x06, 0xfc, 0x3e, 0xb6, 0x4a, 0x67, 0xff, 0xb8, 0x30, + 0x7c, 0x41, 0x5b, 0x94, 0x40, 0x50, 0x76, 0xc2, 0x0a, 0x75, 0xa7, 0xee, 0xdc, 0xbf, 0xba, 0x8c, + 0x8e, 0x5d, 0x40, 0x74, 0xcd, 0x06, 0xf6, 0xcb, 0x6b, 0x9e, 0x9e, 0xc2, 0x49, 0x49, 0xe4, 0xdb, + 0x70, 0x30, 0x1d, 0xce, 0xfd, 0xab, 0xaf, 0x8f, 0x2b, 0xbc, 0xa0, 0x6d, 0xb4, 0x26, 0xf2, 0xed, + 0x32, 0xa7, 0x05, 0xe5, 0x12, 0xeb, 0xb2, 0xc9, 0x2b, 0x75, 0xc2, 0x3e, 0x88, 0x10, 0x9c, 0xec, + 0x19, 0x37, 0x7e, 0x3c, 0xac, 0xbf, 0xd1, 0x1d, 0x18, 0xd8, 0xd3, 0x0e, 0x13, 0x07, 0x0f, 0x58, + 0x8a, 0xee, 0xc1, 0x89, 0x3a, 0x54, 0x38, 0x54, 0x54, 0xe2, 0x60, 0xbd, 0x5a, 0x78, 0x70, 0x8b, + 0xa5, 0x1b, 0x75, 0x89, 0xb3, 0x25, 0xc0, 0xb3, 0xaa, 0x22, 0xed, 0x6b, 0x92, 0x37, 0x14, 0x7d, + 0x0b, 0xe3, 0x77, 0xea, 0xa3, 0x0e, 0x5d, 0x6d, 0xf2, 0xe2, 0xb8, 0x49, 0x5d, 0x80, 0x2d, 0x3e, + 0xfb, 0x7b, 0x04, 0x23, 0x23, 0xf1, 0x04, 0x80, 0x37, 0x79, 0xbe, 0xd1, 0x89, 0xd0, 0x9f, 0xba, + 0xf3, 0xb3, 0xab, 0x49, 0x27, 0xd3, 0x35, 0x36, 0xfa, 0xa5, 0xc9, 0x73, 0xcd, 0x27, 0x0e, 0xf6, + 0x78, 0xb7, 0x40, 0x97, 0x70, 0x7b, 0x2b, 0x44, 0x4e, 0x09, 0xb7, 0xf5, 0xea, 0x74, 0xa7, 0x89, + 0x83, 0x03, 0x1b, 0xee, 0x31, 0xc6, 0x25, 0xcd, 0x68, 0x65, 0xb1, 0xee, 0xc8, 0x81, 0x0d, 0x1b, + 0xec, 0x4b, 0x08, 0x52, 0xd1, 0x6c, 0x73, 0x6a, 0x29, 0x75, 0x09, 0x6e, 0xe2, 0x60, 0xdf, 0x44, + 0x0d, 0xb4, 0x84, 0xf3, 0x7e, 0xca, 0x2c, 0x07, 0xba, 0xc5, 0x1f, 0x9b, 0x7e, 0xd5, 0x71, 0x89, + 0x83, 0xcf, 0xfa, 0x22, 0x23, 0xf3, 0x03, 0x78, 0x7b, 0xda, 0x5a, 0x81, 0x91, 0x16, 0x78, 0x70, + 0x63, 0x87, 0x13, 0x07, 0x9f, 0xee, 0x69, 0xdb, 0x3b, 0xad, 0x65, 0xc5, 0x78, 0x66, 0x05, 0x3e, + 0xb3, 0xed, 0xf2, 0x4d, 0xd4, 0x40, 0x17, 0x00, 0xdb, 0x5c, 0x6c, 0x2d, 0x82, 0xa6, 0xee, 0x3c, + 0x50, 0xb7, 0xa7, 0x62, 0x06, 0x78, 0x0a, 0xe7, 0x19, 0x15, 0x9b, 0x52, 0x30, 0x2e, 0x2d, 0x75, + 0xaa, 0x9d, 0xdc, 0xed, 0x9c, 0xa8, 0x96, 0x47, 0x2f, 0x89, 0x7c, 0xc9, 0xb3, 0xc4, 0xc1, 0xb7, + 0x33, 0x2a, 0xd6, 0x0a, 0xee, 0x6e, 0x22, 0x30, 0x6f, 0xdc, 0xd6, 0x8e, 0x75, 0xed, 0xf4, 0xf8, + 0x29, 0x96, 0x9a, 0x56, 0x36, 0x4d, 0x9d, 0x91, 0x79, 0x0e, 0x3e, 0x51, 0x13, 0x65, 0x55, 0x3c, + 0xad, 0xf2, 0xd5, 0x71, 0x95, 0xc3, 0xf8, 0x25, 0x0e, 0x06, 0x72, 0x18, 0xc6, 0x10, 0x6e, 0x15, + 0x94, 0x70, 0xc6, 0xb3, 0xf0, 0x6c, 0xea, 0xce, 0x47, 0xb8, 0x5b, 0xa2, 0x87, 0x70, 0x8f, 0xbe, + 0xdf, 0xe5, 0x4d, 0x4a, 0x37, 0x6f, 0x2a, 0x51, 0x6c, 0x18, 0x4f, 0xe9, 0x7b, 0x5a, 0x87, 0x77, + 0xd5, 0xb4, 0x60, 0x64, 0x73, 0x3f, 0x57, 0xa2, 0x58, 0x99, 0xcc, 0x22, 0x00, 0xd0, 0x76, 0xcc, + 0xd0, 0xff, 0xeb, 0xc2, 0xd8, 0x98, 0x47, 0x31, 0x0c, 0xf7, 0xb4, 0xb5, 0xaf, 0xfa, 0xe6, 0x8e, + 0x61, 0x45, 0xa2, 0xb5, 0xfe, 0x65, 0x29, 0x69, 0x25, 0x19, 0xad, 0xc3, 0xa1, 0x7e, 0x26, 0x0f, + 0xff, 0xef, 0x8e, 0xa2, 0x75, 0x5f, 0xb2, 0xe4, 0xb2, 0x6a, 0xf1, 0x35, 0x8d, 0xc9, 0xef, 0x70, + 0xfe, 0x41, 0x1a, 0xdd, 0x39, 0xb8, 0xf2, 0xcc, 0xb6, 0xdf, 0xc0, 0xe8, 0x30, 0xea, 0x9f, 0xf0, + 0x30, 0x0d, 0xfd, 0xfd, 0xe0, 0x3b, 0x77, 0xf1, 0xa7, 0x0b, 0xf7, 0x77, 0xa2, 0x38, 0x5a, 0xb1, + 0xf0, 0x8d, 0xc9, 0xb5, 0x9a, 0xf3, 0xb5, 0xfb, 0xdb, 0x33, 0x0b, 0x66, 0x22, 0x27, 0x3c, 0x8b, + 0x44, 0x95, 0xc5, 0x19, 0xe5, 0xfa, 0x15, 0xc4, 0x26, 0x45, 0x4a, 0x56, 0x7f, 0xfc, 0x0f, 0xf1, + 0xa4, 0x8f, 0xfc, 0x35, 0xf8, 0xfc, 0xb9, 0xd1, 0xf8, 0x31, 0x17, 0x4d, 0x1a, 0xfd, 0xd4, 0x6f, + 0xf9, 0xfa, 0xd1, 0x42, 0xa1, 0xdb, 0xb1, 0x96, 0x7b, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x7e, 0x53, 0x2d, 0x57, 0x6f, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/query.pb.go b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/query.pb.go new file mode 100644 index 000000000..42bc09e98 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/datastore/v1beta3/query.pb.go @@ -0,0 +1,969 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/datastore/v1beta3/query.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies what data the 'entity' field contains. +// A `ResultType` is either implied (for example, in `LookupResponse.missing` +// from `datastore.proto`, it is always `KEY_ONLY`) or specified by context +// (for example, in message `QueryResultBatch`, field `entity_result_type` +// specifies a `ResultType` for all the values in field `entity_results`). +type EntityResult_ResultType int32 + +const ( + // Unspecified. This value is never used. + EntityResult_RESULT_TYPE_UNSPECIFIED EntityResult_ResultType = 0 + // The key and properties. + EntityResult_FULL EntityResult_ResultType = 1 + // A projected subset of properties. The entity may have no key. + EntityResult_PROJECTION EntityResult_ResultType = 2 + // Only the key. + EntityResult_KEY_ONLY EntityResult_ResultType = 3 +) + +var EntityResult_ResultType_name = map[int32]string{ + 0: "RESULT_TYPE_UNSPECIFIED", + 1: "FULL", + 2: "PROJECTION", + 3: "KEY_ONLY", +} +var EntityResult_ResultType_value = map[string]int32{ + "RESULT_TYPE_UNSPECIFIED": 0, + "FULL": 1, + "PROJECTION": 2, + "KEY_ONLY": 3, +} + +func (x EntityResult_ResultType) String() string { + return proto.EnumName(EntityResult_ResultType_name, int32(x)) +} +func (EntityResult_ResultType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +// The sort direction. +type PropertyOrder_Direction int32 + +const ( + // Unspecified. This value must not be used. + PropertyOrder_DIRECTION_UNSPECIFIED PropertyOrder_Direction = 0 + // Ascending. + PropertyOrder_ASCENDING PropertyOrder_Direction = 1 + // Descending. + PropertyOrder_DESCENDING PropertyOrder_Direction = 2 +) + +var PropertyOrder_Direction_name = map[int32]string{ + 0: "DIRECTION_UNSPECIFIED", + 1: "ASCENDING", + 2: "DESCENDING", +} +var PropertyOrder_Direction_value = map[string]int32{ + "DIRECTION_UNSPECIFIED": 0, + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x PropertyOrder_Direction) String() string { + return proto.EnumName(PropertyOrder_Direction_name, int32(x)) +} +func (PropertyOrder_Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{5, 0} } + +// A composite filter operator. +type CompositeFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + CompositeFilter_OPERATOR_UNSPECIFIED CompositeFilter_Operator = 0 + // The results are required to satisfy each of the combined filters. + CompositeFilter_AND CompositeFilter_Operator = 1 +) + +var CompositeFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "AND", +} +var CompositeFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "AND": 1, +} + +func (x CompositeFilter_Operator) String() string { + return proto.EnumName(CompositeFilter_Operator_name, int32(x)) +} +func (CompositeFilter_Operator) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{7, 0} } + +// A property filter operator. +type PropertyFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + PropertyFilter_OPERATOR_UNSPECIFIED PropertyFilter_Operator = 0 + // Less than. + PropertyFilter_LESS_THAN PropertyFilter_Operator = 1 + // Less than or equal. + PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2 + // Greater than. + PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3 + // Greater than or equal. + PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4 + // Equal. + PropertyFilter_EQUAL PropertyFilter_Operator = 5 + // Has ancestor. + PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11 +) + +var PropertyFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 11: "HAS_ANCESTOR", +} +var PropertyFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "HAS_ANCESTOR": 11, +} + +func (x PropertyFilter_Operator) String() string { + return proto.EnumName(PropertyFilter_Operator_name, int32(x)) +} +func (PropertyFilter_Operator) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{8, 0} } + +// The possible values for the `more_results` field. +type QueryResultBatch_MoreResultsType int32 + +const ( + // Unspecified. This value is never used. + QueryResultBatch_MORE_RESULTS_TYPE_UNSPECIFIED QueryResultBatch_MoreResultsType = 0 + // There may be additional batches to fetch from this query. + QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1 + // The query is finished, but there may be more results after the limit. + QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2 + // The query is finished, but there may be more results after the end + // cursor. + QueryResultBatch_MORE_RESULTS_AFTER_CURSOR QueryResultBatch_MoreResultsType = 4 + // The query is finished, and there are no more results. + QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3 +) + +var QueryResultBatch_MoreResultsType_name = map[int32]string{ + 0: "MORE_RESULTS_TYPE_UNSPECIFIED", + 1: "NOT_FINISHED", + 2: "MORE_RESULTS_AFTER_LIMIT", + 4: "MORE_RESULTS_AFTER_CURSOR", + 3: "NO_MORE_RESULTS", +} +var QueryResultBatch_MoreResultsType_value = map[string]int32{ + "MORE_RESULTS_TYPE_UNSPECIFIED": 0, + "NOT_FINISHED": 1, + "MORE_RESULTS_AFTER_LIMIT": 2, + "MORE_RESULTS_AFTER_CURSOR": 4, + "NO_MORE_RESULTS": 3, +} + +func (x QueryResultBatch_MoreResultsType) String() string { + return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x)) +} +func (QueryResultBatch_MoreResultsType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{11, 0} +} + +// The result of fetching an entity from Datastore. +type EntityResult struct { + // The resulting entity. + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + // The version of the entity, a strictly positive number that monotonically + // increases with changes to the entity. + // + // This field is set for [`FULL`][google.datastore.v1beta3.EntityResult.ResultType.FULL] entity + // results. + // + // For [missing][google.datastore.v1beta3.LookupResponse.missing] entities in `LookupResponse`, this + // is the version of the snapshot that was used to look up the entity, and it + // is always set except for eventually consistent reads. + Version int64 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + // A cursor that points to the position after the result entity. + // Set only when the `EntityResult` is part of a `QueryResultBatch` message. + Cursor []byte `protobuf:"bytes,3,opt,name=cursor,proto3" json:"cursor,omitempty"` +} + +func (m *EntityResult) Reset() { *m = EntityResult{} } +func (m *EntityResult) String() string { return proto.CompactTextString(m) } +func (*EntityResult) ProtoMessage() {} +func (*EntityResult) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *EntityResult) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *EntityResult) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *EntityResult) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +// A query for entities. +type Query struct { + // The projection to return. Defaults to returning all properties. + Projection []*Projection `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"` + // The kinds to query (if empty, returns entities of all kinds). + // Currently at most 1 kind may be specified. + Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"` + // The filter to apply. + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // The order to apply to the query results (if empty, order is unspecified). + Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"` + // The properties to make distinct. The query results will contain the first + // result for each distinct combination of values for the given properties + // (if empty, all results are returned). + DistinctOn []*PropertyReference `protobuf:"bytes,6,rep,name=distinct_on,json=distinctOn" json:"distinct_on,omitempty"` + // A starting point for the query results. Query cursors are + // returned in query result batches and + // [can only be used to continue the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets). + StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor,json=startCursor,proto3" json:"start_cursor,omitempty"` + // An ending point for the query results. Query cursors are + // returned in query result batches and + // [can only be used to limit the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets). + EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor,json=endCursor,proto3" json:"end_cursor,omitempty"` + // The number of results to skip. Applies before limit, but after all other + // constraints. Optional. Must be >= 0 if specified. + Offset int32 `protobuf:"varint,10,opt,name=offset" json:"offset,omitempty"` + // The maximum number of results to return. Applies after all other + // constraints. Optional. + // Unspecified is interpreted as no limit. + // Must be >= 0 if specified. + Limit *google_protobuf3.Int32Value `protobuf:"bytes,12,opt,name=limit" json:"limit,omitempty"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *Query) GetProjection() []*Projection { + if m != nil { + return m.Projection + } + return nil +} + +func (m *Query) GetKind() []*KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Query) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetOrder() []*PropertyOrder { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetDistinctOn() []*PropertyReference { + if m != nil { + return m.DistinctOn + } + return nil +} + +func (m *Query) GetStartCursor() []byte { + if m != nil { + return m.StartCursor + } + return nil +} + +func (m *Query) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *Query) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *Query) GetLimit() *google_protobuf3.Int32Value { + if m != nil { + return m.Limit + } + return nil +} + +// A representation of a kind. +type KindExpression struct { + // The name of the kind. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} +func (*KindExpression) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *KindExpression) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A reference to a property relative to the kind expressions. +type PropertyReference struct { + // The name of the property. + // If name includes "."s, it may be interpreted as a property name path. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} +func (*PropertyReference) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *PropertyReference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A representation of a property in a projection. +type Projection struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` +} + +func (m *Projection) Reset() { *m = Projection{} } +func (m *Projection) String() string { return proto.CompactTextString(m) } +func (*Projection) ProtoMessage() {} +func (*Projection) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *Projection) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +// The desired order for a specific property. +type PropertyOrder struct { + // The property to order by. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The direction to order by. Defaults to `ASCENDING`. + Direction PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=google.datastore.v1beta3.PropertyOrder_Direction" json:"direction,omitempty"` +} + +func (m *PropertyOrder) Reset() { *m = PropertyOrder{} } +func (m *PropertyOrder) String() string { return proto.CompactTextString(m) } +func (*PropertyOrder) ProtoMessage() {} +func (*PropertyOrder) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *PropertyOrder) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyOrder) GetDirection() PropertyOrder_Direction { + if m != nil { + return m.Direction + } + return PropertyOrder_DIRECTION_UNSPECIFIED +} + +// A holder for any type of filter. +type Filter struct { + // The type of filter. + // + // Types that are valid to be assigned to FilterType: + // *Filter_CompositeFilter + // *Filter_PropertyFilter + FilterType isFilter_FilterType `protobuf_oneof:"filter_type"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +type isFilter_FilterType interface { + isFilter_FilterType() +} + +type Filter_CompositeFilter struct { + CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter,json=compositeFilter,oneof"` +} +type Filter_PropertyFilter struct { + PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter,json=propertyFilter,oneof"` +} + +func (*Filter_CompositeFilter) isFilter_FilterType() {} +func (*Filter_PropertyFilter) isFilter_FilterType() {} + +func (m *Filter) GetFilterType() isFilter_FilterType { + if m != nil { + return m.FilterType + } + return nil +} + +func (m *Filter) GetCompositeFilter() *CompositeFilter { + if x, ok := m.GetFilterType().(*Filter_CompositeFilter); ok { + return x.CompositeFilter + } + return nil +} + +func (m *Filter) GetPropertyFilter() *PropertyFilter { + if x, ok := m.GetFilterType().(*Filter_PropertyFilter); ok { + return x.PropertyFilter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Filter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Filter_OneofMarshaler, _Filter_OneofUnmarshaler, _Filter_OneofSizer, []interface{}{ + (*Filter_CompositeFilter)(nil), + (*Filter_PropertyFilter)(nil), + } +} + +func _Filter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Filter) + // filter_type + switch x := m.FilterType.(type) { + case *Filter_CompositeFilter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CompositeFilter); err != nil { + return err + } + case *Filter_PropertyFilter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PropertyFilter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Filter.FilterType has unexpected type %T", x) + } + return nil +} + +func _Filter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Filter) + switch tag { + case 1: // filter_type.composite_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CompositeFilter) + err := b.DecodeMessage(msg) + m.FilterType = &Filter_CompositeFilter{msg} + return true, err + case 2: // filter_type.property_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PropertyFilter) + err := b.DecodeMessage(msg) + m.FilterType = &Filter_PropertyFilter{msg} + return true, err + default: + return false, nil + } +} + +func _Filter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Filter) + // filter_type + switch x := m.FilterType.(type) { + case *Filter_CompositeFilter: + s := proto.Size(x.CompositeFilter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Filter_PropertyFilter: + s := proto.Size(x.PropertyFilter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A filter that merges multiple other filters using the given operator. +type CompositeFilter struct { + // The operator for combining multiple filters. + Op CompositeFilter_Operator `protobuf:"varint,1,opt,name=op,enum=google.datastore.v1beta3.CompositeFilter_Operator" json:"op,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filters []*Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"` +} + +func (m *CompositeFilter) Reset() { *m = CompositeFilter{} } +func (m *CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeFilter) ProtoMessage() {} +func (*CompositeFilter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *CompositeFilter) GetOp() CompositeFilter_Operator { + if m != nil { + return m.Op + } + return CompositeFilter_OPERATOR_UNSPECIFIED +} + +func (m *CompositeFilter) GetFilters() []*Filter { + if m != nil { + return m.Filters + } + return nil +} + +// A filter on a specific property. +type PropertyFilter struct { + // The property to filter by. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` + // The operator to filter by. + Op PropertyFilter_Operator `protobuf:"varint,2,opt,name=op,enum=google.datastore.v1beta3.PropertyFilter_Operator" json:"op,omitempty"` + // The value to compare the property to. + Value *Value `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *PropertyFilter) Reset() { *m = PropertyFilter{} } +func (m *PropertyFilter) String() string { return proto.CompactTextString(m) } +func (*PropertyFilter) ProtoMessage() {} +func (*PropertyFilter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *PropertyFilter) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +func (m *PropertyFilter) GetOp() PropertyFilter_Operator { + if m != nil { + return m.Op + } + return PropertyFilter_OPERATOR_UNSPECIFIED +} + +func (m *PropertyFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A [GQL query](https://cloud.google.com/datastore/docs/apis/gql/gql_reference). +type GqlQuery struct { + // A string of the format described + // [here](https://cloud.google.com/datastore/docs/apis/gql/gql_reference). + QueryString string `protobuf:"bytes,1,opt,name=query_string,json=queryString" json:"query_string,omitempty"` + // When false, the query string must not contain any literals and instead must + // bind all values. For example, + // `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while + // `SELECT * FROM Kind WHERE a = @value` is. + AllowLiterals bool `protobuf:"varint,2,opt,name=allow_literals,json=allowLiterals" json:"allow_literals,omitempty"` + // For each non-reserved named binding site in the query string, there must be + // a named parameter with that name, but not necessarily the inverse. + // + // Key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex + // `__.*__`, and must not be `""`. + NamedBindings map[string]*GqlQueryParameter `protobuf:"bytes,5,rep,name=named_bindings,json=namedBindings" json:"named_bindings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Numbered binding site @1 references the first numbered parameter, + // effectively using 1-based indexing, rather than the usual 0. + // + // For each binding site numbered i in `query_string`, there must be an i-th + // numbered parameter. The inverse must also be true. + PositionalBindings []*GqlQueryParameter `protobuf:"bytes,4,rep,name=positional_bindings,json=positionalBindings" json:"positional_bindings,omitempty"` +} + +func (m *GqlQuery) Reset() { *m = GqlQuery{} } +func (m *GqlQuery) String() string { return proto.CompactTextString(m) } +func (*GqlQuery) ProtoMessage() {} +func (*GqlQuery) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *GqlQuery) GetQueryString() string { + if m != nil { + return m.QueryString + } + return "" +} + +func (m *GqlQuery) GetAllowLiterals() bool { + if m != nil { + return m.AllowLiterals + } + return false +} + +func (m *GqlQuery) GetNamedBindings() map[string]*GqlQueryParameter { + if m != nil { + return m.NamedBindings + } + return nil +} + +func (m *GqlQuery) GetPositionalBindings() []*GqlQueryParameter { + if m != nil { + return m.PositionalBindings + } + return nil +} + +// A binding parameter for a GQL query. +type GqlQueryParameter struct { + // The type of parameter. + // + // Types that are valid to be assigned to ParameterType: + // *GqlQueryParameter_Value + // *GqlQueryParameter_Cursor + ParameterType isGqlQueryParameter_ParameterType `protobuf_oneof:"parameter_type"` +} + +func (m *GqlQueryParameter) Reset() { *m = GqlQueryParameter{} } +func (m *GqlQueryParameter) String() string { return proto.CompactTextString(m) } +func (*GqlQueryParameter) ProtoMessage() {} +func (*GqlQueryParameter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +type isGqlQueryParameter_ParameterType interface { + isGqlQueryParameter_ParameterType() +} + +type GqlQueryParameter_Value struct { + Value *Value `protobuf:"bytes,2,opt,name=value,oneof"` +} +type GqlQueryParameter_Cursor struct { + Cursor []byte `protobuf:"bytes,3,opt,name=cursor,proto3,oneof"` +} + +func (*GqlQueryParameter_Value) isGqlQueryParameter_ParameterType() {} +func (*GqlQueryParameter_Cursor) isGqlQueryParameter_ParameterType() {} + +func (m *GqlQueryParameter) GetParameterType() isGqlQueryParameter_ParameterType { + if m != nil { + return m.ParameterType + } + return nil +} + +func (m *GqlQueryParameter) GetValue() *Value { + if x, ok := m.GetParameterType().(*GqlQueryParameter_Value); ok { + return x.Value + } + return nil +} + +func (m *GqlQueryParameter) GetCursor() []byte { + if x, ok := m.GetParameterType().(*GqlQueryParameter_Cursor); ok { + return x.Cursor + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GqlQueryParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GqlQueryParameter_OneofMarshaler, _GqlQueryParameter_OneofUnmarshaler, _GqlQueryParameter_OneofSizer, []interface{}{ + (*GqlQueryParameter_Value)(nil), + (*GqlQueryParameter_Cursor)(nil), + } +} + +func _GqlQueryParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GqlQueryParameter) + // parameter_type + switch x := m.ParameterType.(type) { + case *GqlQueryParameter_Value: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Value); err != nil { + return err + } + case *GqlQueryParameter_Cursor: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Cursor) + case nil: + default: + return fmt.Errorf("GqlQueryParameter.ParameterType has unexpected type %T", x) + } + return nil +} + +func _GqlQueryParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GqlQueryParameter) + switch tag { + case 2: // parameter_type.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Value) + err := b.DecodeMessage(msg) + m.ParameterType = &GqlQueryParameter_Value{msg} + return true, err + case 3: // parameter_type.cursor + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ParameterType = &GqlQueryParameter_Cursor{x} + return true, err + default: + return false, nil + } +} + +func _GqlQueryParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GqlQueryParameter) + // parameter_type + switch x := m.ParameterType.(type) { + case *GqlQueryParameter_Value: + s := proto.Size(x.Value) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GqlQueryParameter_Cursor: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Cursor))) + n += len(x.Cursor) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A batch of results produced by a query. +type QueryResultBatch struct { + // The number of results skipped, typically because of an offset. + SkippedResults int32 `protobuf:"varint,6,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` + // A cursor that points to the position after the last skipped result. + // Will be set when `skipped_results` != 0. + SkippedCursor []byte `protobuf:"bytes,3,opt,name=skipped_cursor,json=skippedCursor,proto3" json:"skipped_cursor,omitempty"` + // The result type for every entity in `entity_results`. + EntityResultType EntityResult_ResultType `protobuf:"varint,1,opt,name=entity_result_type,json=entityResultType,enum=google.datastore.v1beta3.EntityResult_ResultType" json:"entity_result_type,omitempty"` + // The results for this batch. + EntityResults []*EntityResult `protobuf:"bytes,2,rep,name=entity_results,json=entityResults" json:"entity_results,omitempty"` + // A cursor that points to the position after the last result in the batch. + EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor,json=endCursor,proto3" json:"end_cursor,omitempty"` + // The state of the query after the current batch. + MoreResults QueryResultBatch_MoreResultsType `protobuf:"varint,5,opt,name=more_results,json=moreResults,enum=google.datastore.v1beta3.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"` + // The version number of the snapshot this batch was returned from. + // This applies to the range of results from the query's `start_cursor` (or + // the beginning of the query if no cursor was given) to this batch's + // `end_cursor` (not the query's `end_cursor`). + // + // In a single transaction, subsequent query result batches for the same query + // can have a greater snapshot version number. Each batch's snapshot version + // is valid for all preceding batches. + // The value will be zero for eventually consistent queries. + SnapshotVersion int64 `protobuf:"varint,7,opt,name=snapshot_version,json=snapshotVersion" json:"snapshot_version,omitempty"` +} + +func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} } +func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) } +func (*QueryResultBatch) ProtoMessage() {} +func (*QueryResultBatch) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func (m *QueryResultBatch) GetSkippedResults() int32 { + if m != nil { + return m.SkippedResults + } + return 0 +} + +func (m *QueryResultBatch) GetSkippedCursor() []byte { + if m != nil { + return m.SkippedCursor + } + return nil +} + +func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType { + if m != nil { + return m.EntityResultType + } + return EntityResult_RESULT_TYPE_UNSPECIFIED +} + +func (m *QueryResultBatch) GetEntityResults() []*EntityResult { + if m != nil { + return m.EntityResults + } + return nil +} + +func (m *QueryResultBatch) GetEndCursor() []byte { + if m != nil { + return m.EndCursor + } + return nil +} + +func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType { + if m != nil { + return m.MoreResults + } + return QueryResultBatch_MORE_RESULTS_TYPE_UNSPECIFIED +} + +func (m *QueryResultBatch) GetSnapshotVersion() int64 { + if m != nil { + return m.SnapshotVersion + } + return 0 +} + +func init() { + proto.RegisterType((*EntityResult)(nil), "google.datastore.v1beta3.EntityResult") + proto.RegisterType((*Query)(nil), "google.datastore.v1beta3.Query") + proto.RegisterType((*KindExpression)(nil), "google.datastore.v1beta3.KindExpression") + proto.RegisterType((*PropertyReference)(nil), "google.datastore.v1beta3.PropertyReference") + proto.RegisterType((*Projection)(nil), "google.datastore.v1beta3.Projection") + proto.RegisterType((*PropertyOrder)(nil), "google.datastore.v1beta3.PropertyOrder") + proto.RegisterType((*Filter)(nil), "google.datastore.v1beta3.Filter") + proto.RegisterType((*CompositeFilter)(nil), "google.datastore.v1beta3.CompositeFilter") + proto.RegisterType((*PropertyFilter)(nil), "google.datastore.v1beta3.PropertyFilter") + proto.RegisterType((*GqlQuery)(nil), "google.datastore.v1beta3.GqlQuery") + proto.RegisterType((*GqlQueryParameter)(nil), "google.datastore.v1beta3.GqlQueryParameter") + proto.RegisterType((*QueryResultBatch)(nil), "google.datastore.v1beta3.QueryResultBatch") + proto.RegisterEnum("google.datastore.v1beta3.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value) + proto.RegisterEnum("google.datastore.v1beta3.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value) + proto.RegisterEnum("google.datastore.v1beta3.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value) + proto.RegisterEnum("google.datastore.v1beta3.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value) + proto.RegisterEnum("google.datastore.v1beta3.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value) +} + +func init() { proto.RegisterFile("google/datastore/v1beta3/query.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 1312 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcb, 0x6e, 0xdb, 0x46, + 0x17, 0x36, 0x29, 0xc9, 0x97, 0xa3, 0x1b, 0x33, 0xf9, 0xff, 0x94, 0x71, 0x2e, 0x75, 0x88, 0xa4, + 0x51, 0x50, 0x54, 0x82, 0x15, 0x04, 0x0d, 0xd2, 0x76, 0xa1, 0x0b, 0x6d, 0xab, 0x91, 0x45, 0x65, + 0x24, 0x1b, 0x48, 0x91, 0x82, 0xa0, 0xa5, 0xb1, 0xc2, 0x86, 0x22, 0x99, 0xe1, 0x38, 0x89, 0xdf, + 0xa2, 0x9b, 0x02, 0x7d, 0x86, 0x3e, 0x45, 0x17, 0xdd, 0x76, 0xdb, 0x6d, 0x1f, 0xa0, 0x9b, 0xbe, + 0x41, 0x0b, 0xce, 0x0c, 0x75, 0x73, 0x14, 0xb9, 0x40, 0x76, 0x9a, 0x33, 0xdf, 0xf7, 0x9d, 0x99, + 0x8f, 0x67, 0x66, 0x8e, 0xe0, 0xee, 0x28, 0x08, 0x46, 0x1e, 0xa9, 0x0c, 0x1d, 0xe6, 0x44, 0x2c, + 0xa0, 0xa4, 0xf2, 0x66, 0xf7, 0x84, 0x30, 0xe7, 0x61, 0xe5, 0xf5, 0x19, 0xa1, 0xe7, 0xe5, 0x90, + 0x06, 0x2c, 0x40, 0xba, 0x40, 0x95, 0x27, 0xa8, 0xb2, 0x44, 0x6d, 0xdf, 0x94, 0x7c, 0x27, 0x74, + 0x2b, 0x8e, 0xef, 0x07, 0xcc, 0x61, 0x6e, 0xe0, 0x47, 0x82, 0xb7, 0x7d, 0x6f, 0xa9, 0x3a, 0xf1, + 0x99, 0xcb, 0xa4, 0xfc, 0xf6, 0x6d, 0x09, 0xe3, 0xa3, 0x93, 0xb3, 0xd3, 0xca, 0x5b, 0xea, 0x84, + 0x21, 0xa1, 0x89, 0x8c, 0x4c, 0x5f, 0x61, 0xe7, 0x21, 0xa9, 0x78, 0x0e, 0xf3, 0xfc, 0x91, 0x98, + 0x31, 0x7e, 0x57, 0x20, 0x67, 0x72, 0x29, 0x4c, 0xa2, 0x33, 0x8f, 0xa1, 0xc7, 0xb0, 0x2e, 0xa4, + 0x75, 0x65, 0x47, 0x29, 0x65, 0xab, 0x3b, 0xe5, 0x65, 0x4b, 0x2f, 0x4b, 0x9e, 0xc4, 0x23, 0x1d, + 0x36, 0xde, 0x10, 0x1a, 0xb9, 0x81, 0xaf, 0xa7, 0x77, 0x94, 0x52, 0x0a, 0x27, 0x43, 0x74, 0x0d, + 0xd6, 0x07, 0x67, 0x34, 0x0a, 0xa8, 0x9e, 0xda, 0x51, 0x4a, 0x39, 0x2c, 0x47, 0xc6, 0x33, 0x00, + 0x91, 0xb5, 0x7f, 0x1e, 0x12, 0x74, 0x03, 0x3e, 0xc1, 0x66, 0xef, 0xa8, 0xdd, 0xb7, 0xfb, 0xcf, + 0xbb, 0xa6, 0x7d, 0xd4, 0xe9, 0x75, 0xcd, 0x46, 0x6b, 0xaf, 0x65, 0x36, 0xb5, 0x35, 0xb4, 0x09, + 0xe9, 0xbd, 0xa3, 0x76, 0x5b, 0x53, 0x50, 0x01, 0xa0, 0x8b, 0xad, 0x6f, 0xcd, 0x46, 0xbf, 0x65, + 0x75, 0x34, 0x15, 0xe5, 0x60, 0xf3, 0xa9, 0xf9, 0xdc, 0xb6, 0x3a, 0xed, 0xe7, 0x5a, 0xca, 0xf8, + 0x33, 0x05, 0x99, 0x67, 0xb1, 0xf1, 0xa8, 0x09, 0x10, 0xd2, 0xe0, 0x07, 0x32, 0x88, 0xfd, 0xd4, + 0xd5, 0x9d, 0x54, 0x29, 0x5b, 0xbd, 0xbb, 0x7c, 0x33, 0xdd, 0x09, 0x16, 0xcf, 0xf0, 0xd0, 0xd7, + 0x90, 0x7e, 0xe5, 0xfa, 0x43, 0x3d, 0xc5, 0xf9, 0xa5, 0xe5, 0xfc, 0xa7, 0xae, 0x3f, 0x34, 0xdf, + 0x85, 0x94, 0x44, 0xf1, 0x96, 0x31, 0x67, 0xc5, 0x66, 0x9e, 0xba, 0x1e, 0x23, 0x94, 0x3b, 0xf2, + 0x41, 0x33, 0xf7, 0x38, 0x0e, 0x4b, 0x3c, 0xfa, 0x06, 0x32, 0x01, 0x1d, 0x12, 0xaa, 0x67, 0x78, + 0xe2, 0xfb, 0x1f, 0x5c, 0x78, 0x48, 0x28, 0x3b, 0xb7, 0x62, 0x38, 0x16, 0x2c, 0xd4, 0x86, 0xec, + 0xd0, 0x8d, 0x98, 0xeb, 0x0f, 0x98, 0x1d, 0xf8, 0xfa, 0x3a, 0x17, 0xf9, 0x7c, 0xb5, 0x08, 0x26, + 0xa7, 0x84, 0x12, 0x7f, 0x40, 0x30, 0x24, 0x7c, 0xcb, 0x47, 0x77, 0x20, 0x17, 0x31, 0x87, 0x32, + 0x5b, 0x7e, 0xc5, 0x0d, 0xfe, 0x15, 0xb3, 0x3c, 0xd6, 0xe0, 0x21, 0x74, 0x0b, 0x80, 0xf8, 0xc3, + 0x04, 0xb0, 0xc9, 0x01, 0x5b, 0xc4, 0x1f, 0xca, 0xe9, 0x6b, 0xb0, 0x1e, 0x9c, 0x9e, 0x46, 0x84, + 0xe9, 0xb0, 0xa3, 0x94, 0x32, 0x58, 0x8e, 0xd0, 0x2e, 0x64, 0x3c, 0x77, 0xec, 0x32, 0x3d, 0xc7, + 0xfd, 0xb9, 0x91, 0xac, 0x30, 0x29, 0xe4, 0x72, 0xcb, 0x67, 0x0f, 0xab, 0xc7, 0x8e, 0x77, 0x46, + 0xb0, 0x40, 0x1a, 0x77, 0xa1, 0x30, 0xef, 0x35, 0x42, 0x90, 0xf6, 0x9d, 0x31, 0xe1, 0x05, 0xbb, + 0x85, 0xf9, 0x6f, 0xe3, 0x3e, 0x5c, 0xb9, 0xb0, 0xa7, 0x09, 0x50, 0x9d, 0x01, 0x1e, 0x01, 0x4c, + 0x3f, 0x3d, 0xda, 0x87, 0xcd, 0x50, 0xd2, 0x64, 0xfd, 0xff, 0x27, 0xd3, 0x26, 0x64, 0xe3, 0x6f, + 0x05, 0xf2, 0x73, 0x5f, 0xe6, 0xa3, 0x49, 0x23, 0x0b, 0xb6, 0x86, 0x2e, 0x9d, 0xd4, 0xb5, 0x52, + 0x2a, 0x54, 0x77, 0x2f, 0x59, 0x1e, 0xe5, 0x66, 0x42, 0xc4, 0x53, 0x0d, 0xc3, 0x84, 0xad, 0x49, + 0x1c, 0x5d, 0x87, 0xff, 0x37, 0x5b, 0x58, 0x9c, 0xae, 0x85, 0x33, 0x98, 0x87, 0xad, 0x5a, 0xaf, + 0x61, 0x76, 0x9a, 0xad, 0xce, 0xbe, 0x38, 0x88, 0x4d, 0x73, 0x32, 0x56, 0x8d, 0xdf, 0x14, 0x58, + 0x17, 0x55, 0x8c, 0x8e, 0x41, 0x1b, 0x04, 0xe3, 0x30, 0x88, 0x5c, 0x46, 0x6c, 0x79, 0x02, 0xc4, + 0x9e, 0x1f, 0x2c, 0x5f, 0x69, 0x23, 0x61, 0x08, 0x91, 0x83, 0x35, 0x5c, 0x1c, 0xcc, 0x87, 0x50, + 0x0f, 0x8a, 0x89, 0x0d, 0x89, 0xac, 0xca, 0x65, 0x4b, 0xab, 0x0d, 0x98, 0xa8, 0x16, 0xc2, 0xb9, + 0x48, 0x3d, 0x0f, 0x59, 0xa1, 0x65, 0xc7, 0xd7, 0xa3, 0xf1, 0xab, 0x02, 0xc5, 0x85, 0xa5, 0xa0, + 0x3a, 0xa8, 0x41, 0xc8, 0x77, 0x50, 0xa8, 0x56, 0x2f, 0xbd, 0x83, 0xb2, 0x15, 0x12, 0xea, 0xb0, + 0x80, 0x62, 0x35, 0x08, 0xd1, 0x13, 0xd8, 0x10, 0x69, 0x22, 0x79, 0x19, 0xad, 0xbe, 0x0c, 0x12, + 0x82, 0xf1, 0x05, 0x6c, 0x26, 0x5a, 0x48, 0x87, 0xff, 0x59, 0x5d, 0x13, 0xd7, 0xfa, 0x16, 0x5e, + 0xf8, 0x3e, 0x1b, 0x90, 0xaa, 0x75, 0x9a, 0x9a, 0x62, 0xfc, 0xa5, 0x42, 0x61, 0x7e, 0xdb, 0x1f, + 0xaf, 0xfa, 0x6a, 0xdc, 0x8a, 0x4b, 0x97, 0xdd, 0xfb, 0x9c, 0x78, 0x04, 0x99, 0x37, 0xf1, 0x89, + 0xe6, 0xaf, 0x41, 0xb6, 0xfa, 0xe9, 0x72, 0x15, 0x79, 0xf0, 0x39, 0xda, 0xf8, 0x49, 0xb9, 0x94, + 0x0b, 0x79, 0xd8, 0x6a, 0x9b, 0xbd, 0x9e, 0xdd, 0x3f, 0xa8, 0x75, 0x34, 0x05, 0x5d, 0x03, 0x34, + 0x19, 0xda, 0x16, 0xb6, 0xcd, 0x67, 0x47, 0xb5, 0xb6, 0xa6, 0x22, 0x0d, 0x72, 0xfb, 0xd8, 0xac, + 0xf5, 0x4d, 0x2c, 0x90, 0xa9, 0xb8, 0xf2, 0x67, 0x23, 0x53, 0x70, 0x1a, 0x6d, 0x41, 0x46, 0xfc, + 0xcc, 0xc4, 0xbc, 0x83, 0x5a, 0xcf, 0xae, 0x75, 0x1a, 0x66, 0xaf, 0x6f, 0x61, 0x2d, 0x6b, 0xfc, + 0xa3, 0xc2, 0xe6, 0xfe, 0x6b, 0x4f, 0xbc, 0x3a, 0x77, 0x20, 0xc7, 0xdf, 0x7d, 0x3b, 0x62, 0xd4, + 0xf5, 0x47, 0xf2, 0x4e, 0xca, 0xf2, 0x58, 0x8f, 0x87, 0xd0, 0x3d, 0x28, 0x38, 0x9e, 0x17, 0xbc, + 0xb5, 0x3d, 0x97, 0x11, 0xea, 0x78, 0x11, 0x77, 0x73, 0x13, 0xe7, 0x79, 0xb4, 0x2d, 0x83, 0xe8, + 0x05, 0x14, 0xe2, 0x0b, 0x6a, 0x68, 0x9f, 0xb8, 0xfe, 0xd0, 0xf5, 0x47, 0x91, 0x7c, 0x0a, 0x1e, + 0x2d, 0xb7, 0x2b, 0x59, 0x45, 0xb9, 0x13, 0x13, 0xeb, 0x92, 0x67, 0xfa, 0x8c, 0x9e, 0xe3, 0xbc, + 0x3f, 0x1b, 0x43, 0x2f, 0xe0, 0x2a, 0x2f, 0x55, 0x37, 0xf0, 0x1d, 0x6f, 0x9a, 0x22, 0xbd, 0xea, + 0xa1, 0x48, 0x52, 0x74, 0x1d, 0xea, 0x8c, 0x49, 0x5c, 0xa4, 0x68, 0xaa, 0x93, 0xa8, 0x6f, 0x8f, + 0x01, 0x5d, 0x5c, 0x02, 0xd2, 0x20, 0xf5, 0x8a, 0x9c, 0x4b, 0x4b, 0xe2, 0x9f, 0xa8, 0x96, 0x54, + 0x82, 0xba, 0xaa, 0x24, 0x2f, 0xe6, 0x15, 0xcc, 0x27, 0xea, 0x63, 0xc5, 0x78, 0x07, 0x57, 0x2e, + 0xcc, 0xa3, 0x2f, 0xe7, 0xb5, 0x57, 0x55, 0xd9, 0xc1, 0x9a, 0x54, 0x44, 0xfa, 0x7c, 0xb7, 0x72, + 0xb0, 0x96, 0xf4, 0x2b, 0x75, 0x0d, 0x0a, 0x61, 0xa2, 0x2f, 0x2e, 0x8b, 0x3f, 0xd2, 0xa0, 0xf1, + 0xbc, 0xa2, 0x8f, 0xa9, 0x3b, 0x6c, 0xf0, 0x12, 0xdd, 0x87, 0x62, 0xf4, 0xca, 0x0d, 0x43, 0x32, + 0xb4, 0x29, 0x0f, 0x47, 0xfa, 0x3a, 0x7f, 0xf5, 0x0a, 0x32, 0x2c, 0xc0, 0x51, 0x5c, 0x09, 0x09, + 0x70, 0xae, 0x3f, 0xca, 0xcb, 0xa8, 0x7c, 0x3c, 0x6d, 0x40, 0xa2, 0xc5, 0x92, 0x72, 0x3c, 0xb5, + 0xbc, 0x8d, 0x76, 0x57, 0xb6, 0x67, 0x9c, 0x52, 0x9e, 0xf6, 0x59, 0x58, 0x23, 0x33, 0x13, 0xbc, + 0xf3, 0x3a, 0x84, 0xc2, 0x5c, 0x82, 0xe4, 0x86, 0xfa, 0xec, 0x72, 0xe2, 0x38, 0x3f, 0xab, 0x18, + 0x2d, 0xf4, 0x02, 0xe9, 0xc5, 0x5e, 0xe0, 0x7b, 0xc8, 0x8d, 0x03, 0x4a, 0x26, 0xb9, 0x32, 0x7c, + 0x23, 0x4f, 0x96, 0xe7, 0x5a, 0x34, 0xb8, 0x7c, 0x18, 0x50, 0x22, 0x93, 0xf1, 0x1d, 0x65, 0xc7, + 0xd3, 0x00, 0x7a, 0x00, 0x5a, 0xe4, 0x3b, 0x61, 0xf4, 0x32, 0x60, 0x76, 0xd2, 0x8f, 0x6e, 0xf0, + 0x7e, 0xb4, 0x98, 0xc4, 0x8f, 0x45, 0xd8, 0xf8, 0x59, 0x81, 0xe2, 0x82, 0x16, 0xba, 0x03, 0xb7, + 0x0e, 0x2d, 0x6c, 0xda, 0xa2, 0x15, 0xed, 0xbd, 0xaf, 0x17, 0xd5, 0x20, 0xd7, 0xb1, 0xfa, 0xf6, + 0x5e, 0xab, 0xd3, 0xea, 0x1d, 0x98, 0x4d, 0x4d, 0x41, 0x37, 0x41, 0x9f, 0x23, 0xd5, 0xf6, 0xe2, + 0x5b, 0xa4, 0xdd, 0x3a, 0x6c, 0xf5, 0x35, 0x15, 0xdd, 0x82, 0xeb, 0xef, 0x99, 0x6d, 0x1c, 0xe1, + 0x9e, 0x85, 0xb5, 0x34, 0xba, 0x0a, 0xc5, 0x8e, 0x65, 0xcf, 0x22, 0xb4, 0x54, 0xfd, 0x47, 0x05, + 0x6e, 0x0e, 0x82, 0xf1, 0x52, 0x53, 0xea, 0x20, 0xca, 0x3d, 0xee, 0x93, 0xba, 0xca, 0x77, 0x35, + 0x89, 0x1b, 0x05, 0x9e, 0xe3, 0x8f, 0xca, 0x01, 0x1d, 0x55, 0x46, 0xc4, 0xe7, 0x5d, 0x54, 0x45, + 0x4c, 0x39, 0xa1, 0x1b, 0x5d, 0xfc, 0x1b, 0xf1, 0xd5, 0x24, 0xf2, 0x8b, 0x7a, 0x7b, 0x5f, 0x68, + 0x34, 0xbc, 0xe0, 0x6c, 0x58, 0x6e, 0x4e, 0x32, 0x1e, 0xef, 0xd6, 0x63, 0xe8, 0xc9, 0x3a, 0x97, + 0x7b, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xc8, 0xc3, 0x34, 0xf2, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_events.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_events.pb.go new file mode 100644 index 000000000..dc5d7a502 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_events.pb.go @@ -0,0 +1,834 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/build/v1/build_events.proto + +/* +Package build is a generated protocol buffer package. + +It is generated from these files: + google/devtools/build/v1/build_events.proto + google/devtools/build/v1/build_status.proto + google/devtools/build/v1/publish_build_event.proto + +It has these top-level messages: + BuildEvent + StreamId + BuildStatus + PublishLifecycleEventRequest + PublishBuildToolEventStreamResponse + OrderedBuildEvent + PublishBuildToolEventStreamRequest +*/ +package build + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf3 "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of console output stream. +type ConsoleOutputStream int32 + +const ( + // Unspecified or unknown. + ConsoleOutputStream_UNKNOWN ConsoleOutputStream = 0 + // Normal output stream. + ConsoleOutputStream_STDOUT ConsoleOutputStream = 1 + // Error output stream. + ConsoleOutputStream_STDERR ConsoleOutputStream = 2 +) + +var ConsoleOutputStream_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STDOUT", + 2: "STDERR", +} +var ConsoleOutputStream_value = map[string]int32{ + "UNKNOWN": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x ConsoleOutputStream) String() string { + return proto.EnumName(ConsoleOutputStream_name, int32(x)) +} +func (ConsoleOutputStream) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// How did the event stream finish. +type BuildEvent_BuildComponentStreamFinished_FinishType int32 + +const ( + // Unknown or unspecified; callers should never set this value. + BuildEvent_BuildComponentStreamFinished_FINISH_TYPE_UNSPECIFIED BuildEvent_BuildComponentStreamFinished_FinishType = 0 + // Set by the event publisher to indicate a build event stream is + // finished. + BuildEvent_BuildComponentStreamFinished_FINISHED BuildEvent_BuildComponentStreamFinished_FinishType = 1 + // Set by the WatchBuild RPC server when the publisher of a build event + // stream stops publishing events without publishing a + // BuildComponentStreamFinished event whose type equals FINISHED. + BuildEvent_BuildComponentStreamFinished_EXPIRED BuildEvent_BuildComponentStreamFinished_FinishType = 2 +) + +var BuildEvent_BuildComponentStreamFinished_FinishType_name = map[int32]string{ + 0: "FINISH_TYPE_UNSPECIFIED", + 1: "FINISHED", + 2: "EXPIRED", +} +var BuildEvent_BuildComponentStreamFinished_FinishType_value = map[string]int32{ + "FINISH_TYPE_UNSPECIFIED": 0, + "FINISHED": 1, + "EXPIRED": 2, +} + +func (x BuildEvent_BuildComponentStreamFinished_FinishType) String() string { + return proto.EnumName(BuildEvent_BuildComponentStreamFinished_FinishType_name, int32(x)) +} +func (BuildEvent_BuildComponentStreamFinished_FinishType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 5, 0} +} + +// Which build component generates this event stream. Each build component +// may generate one event stream. +type StreamId_BuildComponent int32 + +const ( + // Unknown or unspecified; callers should never set this value. + StreamId_UNKNOWN_COMPONENT StreamId_BuildComponent = 0 + // A component that coordinates builds. + StreamId_CONTROLLER StreamId_BuildComponent = 1 + // A component that runs executables needed to complete a build. + StreamId_WORKER StreamId_BuildComponent = 2 + // A component that builds something. + StreamId_TOOL StreamId_BuildComponent = 3 +) + +var StreamId_BuildComponent_name = map[int32]string{ + 0: "UNKNOWN_COMPONENT", + 1: "CONTROLLER", + 2: "WORKER", + 3: "TOOL", +} +var StreamId_BuildComponent_value = map[string]int32{ + "UNKNOWN_COMPONENT": 0, + "CONTROLLER": 1, + "WORKER": 2, + "TOOL": 3, +} + +func (x StreamId_BuildComponent) String() string { + return proto.EnumName(StreamId_BuildComponent_name, int32(x)) +} +func (StreamId_BuildComponent) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// An event representing some state change that occured in the build. This +// message does not include field for uniquely identifying an event. +type BuildEvent struct { + // The timestamp of this event. + EventTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"` + // ////////////////////////////////////////////////////////////////////////// + // Events that indicate a state change of a build request in the build + // queue. + // + // Types that are valid to be assigned to Event: + // *BuildEvent_InvocationAttemptStarted_ + // *BuildEvent_InvocationAttemptFinished_ + // *BuildEvent_BuildEnqueued_ + // *BuildEvent_BuildFinished_ + // *BuildEvent_ConsoleOutput_ + // *BuildEvent_ComponentStreamFinished + // *BuildEvent_BazelEvent + // *BuildEvent_BuildExecutionEvent + // *BuildEvent_SourceFetchEvent + Event isBuildEvent_Event `protobuf_oneof:"event"` +} + +func (m *BuildEvent) Reset() { *m = BuildEvent{} } +func (m *BuildEvent) String() string { return proto.CompactTextString(m) } +func (*BuildEvent) ProtoMessage() {} +func (*BuildEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isBuildEvent_Event interface { + isBuildEvent_Event() +} + +type BuildEvent_InvocationAttemptStarted_ struct { + InvocationAttemptStarted *BuildEvent_InvocationAttemptStarted `protobuf:"bytes,51,opt,name=invocation_attempt_started,json=invocationAttemptStarted,oneof"` +} +type BuildEvent_InvocationAttemptFinished_ struct { + InvocationAttemptFinished *BuildEvent_InvocationAttemptFinished `protobuf:"bytes,52,opt,name=invocation_attempt_finished,json=invocationAttemptFinished,oneof"` +} +type BuildEvent_BuildEnqueued_ struct { + BuildEnqueued *BuildEvent_BuildEnqueued `protobuf:"bytes,53,opt,name=build_enqueued,json=buildEnqueued,oneof"` +} +type BuildEvent_BuildFinished_ struct { + BuildFinished *BuildEvent_BuildFinished `protobuf:"bytes,55,opt,name=build_finished,json=buildFinished,oneof"` +} +type BuildEvent_ConsoleOutput_ struct { + ConsoleOutput *BuildEvent_ConsoleOutput `protobuf:"bytes,56,opt,name=console_output,json=consoleOutput,oneof"` +} +type BuildEvent_ComponentStreamFinished struct { + ComponentStreamFinished *BuildEvent_BuildComponentStreamFinished `protobuf:"bytes,59,opt,name=component_stream_finished,json=componentStreamFinished,oneof"` +} +type BuildEvent_BazelEvent struct { + BazelEvent *google_protobuf1.Any `protobuf:"bytes,60,opt,name=bazel_event,json=bazelEvent,oneof"` +} +type BuildEvent_BuildExecutionEvent struct { + BuildExecutionEvent *google_protobuf1.Any `protobuf:"bytes,61,opt,name=build_execution_event,json=buildExecutionEvent,oneof"` +} +type BuildEvent_SourceFetchEvent struct { + SourceFetchEvent *google_protobuf1.Any `protobuf:"bytes,62,opt,name=source_fetch_event,json=sourceFetchEvent,oneof"` +} + +func (*BuildEvent_InvocationAttemptStarted_) isBuildEvent_Event() {} +func (*BuildEvent_InvocationAttemptFinished_) isBuildEvent_Event() {} +func (*BuildEvent_BuildEnqueued_) isBuildEvent_Event() {} +func (*BuildEvent_BuildFinished_) isBuildEvent_Event() {} +func (*BuildEvent_ConsoleOutput_) isBuildEvent_Event() {} +func (*BuildEvent_ComponentStreamFinished) isBuildEvent_Event() {} +func (*BuildEvent_BazelEvent) isBuildEvent_Event() {} +func (*BuildEvent_BuildExecutionEvent) isBuildEvent_Event() {} +func (*BuildEvent_SourceFetchEvent) isBuildEvent_Event() {} + +func (m *BuildEvent) GetEvent() isBuildEvent_Event { + if m != nil { + return m.Event + } + return nil +} + +func (m *BuildEvent) GetEventTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EventTime + } + return nil +} + +func (m *BuildEvent) GetInvocationAttemptStarted() *BuildEvent_InvocationAttemptStarted { + if x, ok := m.GetEvent().(*BuildEvent_InvocationAttemptStarted_); ok { + return x.InvocationAttemptStarted + } + return nil +} + +func (m *BuildEvent) GetInvocationAttemptFinished() *BuildEvent_InvocationAttemptFinished { + if x, ok := m.GetEvent().(*BuildEvent_InvocationAttemptFinished_); ok { + return x.InvocationAttemptFinished + } + return nil +} + +func (m *BuildEvent) GetBuildEnqueued() *BuildEvent_BuildEnqueued { + if x, ok := m.GetEvent().(*BuildEvent_BuildEnqueued_); ok { + return x.BuildEnqueued + } + return nil +} + +func (m *BuildEvent) GetBuildFinished() *BuildEvent_BuildFinished { + if x, ok := m.GetEvent().(*BuildEvent_BuildFinished_); ok { + return x.BuildFinished + } + return nil +} + +func (m *BuildEvent) GetConsoleOutput() *BuildEvent_ConsoleOutput { + if x, ok := m.GetEvent().(*BuildEvent_ConsoleOutput_); ok { + return x.ConsoleOutput + } + return nil +} + +func (m *BuildEvent) GetComponentStreamFinished() *BuildEvent_BuildComponentStreamFinished { + if x, ok := m.GetEvent().(*BuildEvent_ComponentStreamFinished); ok { + return x.ComponentStreamFinished + } + return nil +} + +func (m *BuildEvent) GetBazelEvent() *google_protobuf1.Any { + if x, ok := m.GetEvent().(*BuildEvent_BazelEvent); ok { + return x.BazelEvent + } + return nil +} + +func (m *BuildEvent) GetBuildExecutionEvent() *google_protobuf1.Any { + if x, ok := m.GetEvent().(*BuildEvent_BuildExecutionEvent); ok { + return x.BuildExecutionEvent + } + return nil +} + +func (m *BuildEvent) GetSourceFetchEvent() *google_protobuf1.Any { + if x, ok := m.GetEvent().(*BuildEvent_SourceFetchEvent); ok { + return x.SourceFetchEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BuildEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BuildEvent_OneofMarshaler, _BuildEvent_OneofUnmarshaler, _BuildEvent_OneofSizer, []interface{}{ + (*BuildEvent_InvocationAttemptStarted_)(nil), + (*BuildEvent_InvocationAttemptFinished_)(nil), + (*BuildEvent_BuildEnqueued_)(nil), + (*BuildEvent_BuildFinished_)(nil), + (*BuildEvent_ConsoleOutput_)(nil), + (*BuildEvent_ComponentStreamFinished)(nil), + (*BuildEvent_BazelEvent)(nil), + (*BuildEvent_BuildExecutionEvent)(nil), + (*BuildEvent_SourceFetchEvent)(nil), + } +} + +func _BuildEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BuildEvent) + // event + switch x := m.Event.(type) { + case *BuildEvent_InvocationAttemptStarted_: + b.EncodeVarint(51<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InvocationAttemptStarted); err != nil { + return err + } + case *BuildEvent_InvocationAttemptFinished_: + b.EncodeVarint(52<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InvocationAttemptFinished); err != nil { + return err + } + case *BuildEvent_BuildEnqueued_: + b.EncodeVarint(53<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BuildEnqueued); err != nil { + return err + } + case *BuildEvent_BuildFinished_: + b.EncodeVarint(55<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BuildFinished); err != nil { + return err + } + case *BuildEvent_ConsoleOutput_: + b.EncodeVarint(56<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConsoleOutput); err != nil { + return err + } + case *BuildEvent_ComponentStreamFinished: + b.EncodeVarint(59<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ComponentStreamFinished); err != nil { + return err + } + case *BuildEvent_BazelEvent: + b.EncodeVarint(60<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BazelEvent); err != nil { + return err + } + case *BuildEvent_BuildExecutionEvent: + b.EncodeVarint(61<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BuildExecutionEvent); err != nil { + return err + } + case *BuildEvent_SourceFetchEvent: + b.EncodeVarint(62<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SourceFetchEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("BuildEvent.Event has unexpected type %T", x) + } + return nil +} + +func _BuildEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BuildEvent) + switch tag { + case 51: // event.invocation_attempt_started + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_InvocationAttemptStarted) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_InvocationAttemptStarted_{msg} + return true, err + case 52: // event.invocation_attempt_finished + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_InvocationAttemptFinished) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_InvocationAttemptFinished_{msg} + return true, err + case 53: // event.build_enqueued + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_BuildEnqueued) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_BuildEnqueued_{msg} + return true, err + case 55: // event.build_finished + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_BuildFinished) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_BuildFinished_{msg} + return true, err + case 56: // event.console_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_ConsoleOutput) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_ConsoleOutput_{msg} + return true, err + case 59: // event.component_stream_finished + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildEvent_BuildComponentStreamFinished) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_ComponentStreamFinished{msg} + return true, err + case 60: // event.bazel_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Any) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_BazelEvent{msg} + return true, err + case 61: // event.build_execution_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Any) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_BuildExecutionEvent{msg} + return true, err + case 62: // event.source_fetch_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Any) + err := b.DecodeMessage(msg) + m.Event = &BuildEvent_SourceFetchEvent{msg} + return true, err + default: + return false, nil + } +} + +func _BuildEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BuildEvent) + // event + switch x := m.Event.(type) { + case *BuildEvent_InvocationAttemptStarted_: + s := proto.Size(x.InvocationAttemptStarted) + n += proto.SizeVarint(51<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_InvocationAttemptFinished_: + s := proto.Size(x.InvocationAttemptFinished) + n += proto.SizeVarint(52<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_BuildEnqueued_: + s := proto.Size(x.BuildEnqueued) + n += proto.SizeVarint(53<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_BuildFinished_: + s := proto.Size(x.BuildFinished) + n += proto.SizeVarint(55<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_ConsoleOutput_: + s := proto.Size(x.ConsoleOutput) + n += proto.SizeVarint(56<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_ComponentStreamFinished: + s := proto.Size(x.ComponentStreamFinished) + n += proto.SizeVarint(59<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_BazelEvent: + s := proto.Size(x.BazelEvent) + n += proto.SizeVarint(60<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_BuildExecutionEvent: + s := proto.Size(x.BuildExecutionEvent) + n += proto.SizeVarint(61<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildEvent_SourceFetchEvent: + s := proto.Size(x.SourceFetchEvent) + n += proto.SizeVarint(62<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Notification that the build system has attempted to run the build tool. +type BuildEvent_InvocationAttemptStarted struct { + // The number of the invocation attempt, starting at 1 and increasing by 1 + // for each new attempt. Can be used to determine if there is a later + // invocation attempt replacing the current one a client is processing. + AttemptNumber int64 `protobuf:"varint,1,opt,name=attempt_number,json=attemptNumber" json:"attempt_number,omitempty"` +} + +func (m *BuildEvent_InvocationAttemptStarted) Reset() { *m = BuildEvent_InvocationAttemptStarted{} } +func (m *BuildEvent_InvocationAttemptStarted) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_InvocationAttemptStarted) ProtoMessage() {} +func (*BuildEvent_InvocationAttemptStarted) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 0} +} + +func (m *BuildEvent_InvocationAttemptStarted) GetAttemptNumber() int64 { + if m != nil { + return m.AttemptNumber + } + return 0 +} + +// Notification that an invocation attempt has finished. +type BuildEvent_InvocationAttemptFinished struct { + // The exit code of the build tool. + ExitCode *google_protobuf3.Int32Value `protobuf:"bytes,2,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"` + // Final status of the invocation. + InvocationStatus *BuildStatus `protobuf:"bytes,3,opt,name=invocation_status,json=invocationStatus" json:"invocation_status,omitempty"` +} + +func (m *BuildEvent_InvocationAttemptFinished) Reset() { *m = BuildEvent_InvocationAttemptFinished{} } +func (m *BuildEvent_InvocationAttemptFinished) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_InvocationAttemptFinished) ProtoMessage() {} +func (*BuildEvent_InvocationAttemptFinished) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 1} +} + +func (m *BuildEvent_InvocationAttemptFinished) GetExitCode() *google_protobuf3.Int32Value { + if m != nil { + return m.ExitCode + } + return nil +} + +func (m *BuildEvent_InvocationAttemptFinished) GetInvocationStatus() *BuildStatus { + if m != nil { + return m.InvocationStatus + } + return nil +} + +// Notification that the build request is enqueued. It could happen when +// a new build request is inserted into the build queue, or when a +// build request is put back into the build queue due to a previous build +// failure. +type BuildEvent_BuildEnqueued struct { +} + +func (m *BuildEvent_BuildEnqueued) Reset() { *m = BuildEvent_BuildEnqueued{} } +func (m *BuildEvent_BuildEnqueued) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_BuildEnqueued) ProtoMessage() {} +func (*BuildEvent_BuildEnqueued) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 2} } + +// Notification that the build request has finished, and no further +// invocations will occur. Note that this applies to the entire Build. +// Individual invocations trigger InvocationFinished when they finish. +type BuildEvent_BuildFinished struct { + // Final status of the build. + Status *BuildStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *BuildEvent_BuildFinished) Reset() { *m = BuildEvent_BuildFinished{} } +func (m *BuildEvent_BuildFinished) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_BuildFinished) ProtoMessage() {} +func (*BuildEvent_BuildFinished) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 3} } + +func (m *BuildEvent_BuildFinished) GetStatus() *BuildStatus { + if m != nil { + return m.Status + } + return nil +} + +// Textual output written to standard output or standard error. +type BuildEvent_ConsoleOutput struct { + // The output stream type. + Type ConsoleOutputStream `protobuf:"varint,1,opt,name=type,enum=google.devtools.build.v1.ConsoleOutputStream" json:"type,omitempty"` + // The output stream content. + // + // Types that are valid to be assigned to Output: + // *BuildEvent_ConsoleOutput_TextOutput + // *BuildEvent_ConsoleOutput_BinaryOutput + Output isBuildEvent_ConsoleOutput_Output `protobuf_oneof:"output"` +} + +func (m *BuildEvent_ConsoleOutput) Reset() { *m = BuildEvent_ConsoleOutput{} } +func (m *BuildEvent_ConsoleOutput) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_ConsoleOutput) ProtoMessage() {} +func (*BuildEvent_ConsoleOutput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 4} } + +type isBuildEvent_ConsoleOutput_Output interface { + isBuildEvent_ConsoleOutput_Output() +} + +type BuildEvent_ConsoleOutput_TextOutput struct { + TextOutput string `protobuf:"bytes,2,opt,name=text_output,json=textOutput,oneof"` +} +type BuildEvent_ConsoleOutput_BinaryOutput struct { + BinaryOutput []byte `protobuf:"bytes,3,opt,name=binary_output,json=binaryOutput,proto3,oneof"` +} + +func (*BuildEvent_ConsoleOutput_TextOutput) isBuildEvent_ConsoleOutput_Output() {} +func (*BuildEvent_ConsoleOutput_BinaryOutput) isBuildEvent_ConsoleOutput_Output() {} + +func (m *BuildEvent_ConsoleOutput) GetOutput() isBuildEvent_ConsoleOutput_Output { + if m != nil { + return m.Output + } + return nil +} + +func (m *BuildEvent_ConsoleOutput) GetType() ConsoleOutputStream { + if m != nil { + return m.Type + } + return ConsoleOutputStream_UNKNOWN +} + +func (m *BuildEvent_ConsoleOutput) GetTextOutput() string { + if x, ok := m.GetOutput().(*BuildEvent_ConsoleOutput_TextOutput); ok { + return x.TextOutput + } + return "" +} + +func (m *BuildEvent_ConsoleOutput) GetBinaryOutput() []byte { + if x, ok := m.GetOutput().(*BuildEvent_ConsoleOutput_BinaryOutput); ok { + return x.BinaryOutput + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BuildEvent_ConsoleOutput) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BuildEvent_ConsoleOutput_OneofMarshaler, _BuildEvent_ConsoleOutput_OneofUnmarshaler, _BuildEvent_ConsoleOutput_OneofSizer, []interface{}{ + (*BuildEvent_ConsoleOutput_TextOutput)(nil), + (*BuildEvent_ConsoleOutput_BinaryOutput)(nil), + } +} + +func _BuildEvent_ConsoleOutput_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BuildEvent_ConsoleOutput) + // output + switch x := m.Output.(type) { + case *BuildEvent_ConsoleOutput_TextOutput: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.TextOutput) + case *BuildEvent_ConsoleOutput_BinaryOutput: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.BinaryOutput) + case nil: + default: + return fmt.Errorf("BuildEvent_ConsoleOutput.Output has unexpected type %T", x) + } + return nil +} + +func _BuildEvent_ConsoleOutput_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BuildEvent_ConsoleOutput) + switch tag { + case 2: // output.text_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Output = &BuildEvent_ConsoleOutput_TextOutput{x} + return true, err + case 3: // output.binary_output + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Output = &BuildEvent_ConsoleOutput_BinaryOutput{x} + return true, err + default: + return false, nil + } +} + +func _BuildEvent_ConsoleOutput_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BuildEvent_ConsoleOutput) + // output + switch x := m.Output.(type) { + case *BuildEvent_ConsoleOutput_TextOutput: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TextOutput))) + n += len(x.TextOutput) + case *BuildEvent_ConsoleOutput_BinaryOutput: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.BinaryOutput))) + n += len(x.BinaryOutput) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Notification of the end of a build event stream published by a build +// component other than CONTROLLER (See StreamId.BuildComponents). +type BuildEvent_BuildComponentStreamFinished struct { + // How the event stream finished. + Type BuildEvent_BuildComponentStreamFinished_FinishType `protobuf:"varint,1,opt,name=type,enum=google.devtools.build.v1.BuildEvent_BuildComponentStreamFinished_FinishType" json:"type,omitempty"` +} + +func (m *BuildEvent_BuildComponentStreamFinished) Reset() { + *m = BuildEvent_BuildComponentStreamFinished{} +} +func (m *BuildEvent_BuildComponentStreamFinished) String() string { return proto.CompactTextString(m) } +func (*BuildEvent_BuildComponentStreamFinished) ProtoMessage() {} +func (*BuildEvent_BuildComponentStreamFinished) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 5} +} + +func (m *BuildEvent_BuildComponentStreamFinished) GetType() BuildEvent_BuildComponentStreamFinished_FinishType { + if m != nil { + return m.Type + } + return BuildEvent_BuildComponentStreamFinished_FINISH_TYPE_UNSPECIFIED +} + +// Unique identifier for a build event stream. +type StreamId struct { + // The id of a Build message. + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId" json:"build_id,omitempty"` + // The unique invocation ID within this build. + // It should be the same as {invocation} (below) during the migration. + InvocationId string `protobuf:"bytes,6,opt,name=invocation_id,json=invocationId" json:"invocation_id,omitempty"` + // The component that emitted this event. + Component StreamId_BuildComponent `protobuf:"varint,3,opt,name=component,enum=google.devtools.build.v1.StreamId_BuildComponent" json:"component,omitempty"` +} + +func (m *StreamId) Reset() { *m = StreamId{} } +func (m *StreamId) String() string { return proto.CompactTextString(m) } +func (*StreamId) ProtoMessage() {} +func (*StreamId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *StreamId) GetBuildId() string { + if m != nil { + return m.BuildId + } + return "" +} + +func (m *StreamId) GetInvocationId() string { + if m != nil { + return m.InvocationId + } + return "" +} + +func (m *StreamId) GetComponent() StreamId_BuildComponent { + if m != nil { + return m.Component + } + return StreamId_UNKNOWN_COMPONENT +} + +func init() { + proto.RegisterType((*BuildEvent)(nil), "google.devtools.build.v1.BuildEvent") + proto.RegisterType((*BuildEvent_InvocationAttemptStarted)(nil), "google.devtools.build.v1.BuildEvent.InvocationAttemptStarted") + proto.RegisterType((*BuildEvent_InvocationAttemptFinished)(nil), "google.devtools.build.v1.BuildEvent.InvocationAttemptFinished") + proto.RegisterType((*BuildEvent_BuildEnqueued)(nil), "google.devtools.build.v1.BuildEvent.BuildEnqueued") + proto.RegisterType((*BuildEvent_BuildFinished)(nil), "google.devtools.build.v1.BuildEvent.BuildFinished") + proto.RegisterType((*BuildEvent_ConsoleOutput)(nil), "google.devtools.build.v1.BuildEvent.ConsoleOutput") + proto.RegisterType((*BuildEvent_BuildComponentStreamFinished)(nil), "google.devtools.build.v1.BuildEvent.BuildComponentStreamFinished") + proto.RegisterType((*StreamId)(nil), "google.devtools.build.v1.StreamId") + proto.RegisterEnum("google.devtools.build.v1.ConsoleOutputStream", ConsoleOutputStream_name, ConsoleOutputStream_value) + proto.RegisterEnum("google.devtools.build.v1.BuildEvent_BuildComponentStreamFinished_FinishType", BuildEvent_BuildComponentStreamFinished_FinishType_name, BuildEvent_BuildComponentStreamFinished_FinishType_value) + proto.RegisterEnum("google.devtools.build.v1.StreamId_BuildComponent", StreamId_BuildComponent_name, StreamId_BuildComponent_value) +} + +func init() { proto.RegisterFile("google/devtools/build/v1/build_events.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 927 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6d, 0x6f, 0xe3, 0x44, + 0x10, 0x8e, 0xdb, 0xa3, 0x4d, 0xa7, 0x49, 0xce, 0xb7, 0xc7, 0xa9, 0x8e, 0x5b, 0xf1, 0x52, 0x54, + 0x09, 0x81, 0x70, 0xd4, 0x14, 0x74, 0x07, 0x47, 0x4f, 0xca, 0x8b, 0xab, 0x98, 0xeb, 0xd9, 0xd1, + 0x26, 0xe5, 0x78, 0xf9, 0x10, 0x1c, 0x7b, 0x9b, 0xb3, 0x94, 0x78, 0x8d, 0xbd, 0x0e, 0x0d, 0x12, + 0x82, 0x5f, 0x83, 0xc4, 0x1f, 0xe1, 0xc7, 0xf0, 0x07, 0xe0, 0x23, 0xf2, 0xee, 0xba, 0x49, 0xda, + 0xa6, 0x77, 0x85, 0x6f, 0xbb, 0x33, 0xcf, 0x3c, 0xcf, 0xcc, 0xec, 0x8c, 0x65, 0xf8, 0x78, 0x44, + 0xe9, 0x68, 0x4c, 0x6a, 0x3e, 0x99, 0x32, 0x4a, 0xc7, 0x49, 0x6d, 0x98, 0x06, 0x63, 0xbf, 0x36, + 0x3d, 0x14, 0x87, 0x01, 0x99, 0x92, 0x90, 0x25, 0x46, 0x14, 0x53, 0x46, 0x91, 0x26, 0xc0, 0x46, + 0x0e, 0x36, 0x38, 0xc6, 0x98, 0x1e, 0xea, 0x7b, 0x92, 0xc6, 0x8d, 0x82, 0x9a, 0x1b, 0x86, 0x94, + 0xb9, 0x2c, 0xa0, 0xa1, 0x8c, 0xd3, 0x5f, 0x27, 0x92, 0x30, 0x97, 0xa5, 0x39, 0xb8, 0x2a, 0xc1, + 0xfc, 0x36, 0x4c, 0xcf, 0x6b, 0x6e, 0x38, 0x93, 0xae, 0x77, 0xaf, 0xba, 0x58, 0x30, 0x21, 0x09, + 0x73, 0x27, 0x91, 0x04, 0xbc, 0x73, 0x15, 0xf0, 0x53, 0xec, 0x46, 0x11, 0x89, 0x73, 0xee, 0x1d, + 0xe9, 0x8f, 0x23, 0xaf, 0xb6, 0x28, 0xba, 0xff, 0x77, 0x09, 0xa0, 0x99, 0xe5, 0x62, 0x66, 0xf5, + 0xa2, 0xcf, 0x01, 0x78, 0xe1, 0x83, 0x4c, 0x40, 0x53, 0xde, 0x53, 0x3e, 0xdc, 0xae, 0xeb, 0x86, + 0xac, 0x3e, 0x27, 0x37, 0xfa, 0xb9, 0x3a, 0xde, 0xe2, 0xe8, 0xec, 0x8e, 0x7e, 0x01, 0x3d, 0x08, + 0xa7, 0xd4, 0xe3, 0x0d, 0x18, 0xb8, 0x8c, 0x91, 0x49, 0xc4, 0xb2, 0x0a, 0x63, 0x46, 0x7c, 0xed, + 0x88, 0x53, 0x1d, 0x1b, 0xab, 0x1a, 0x69, 0xcc, 0x93, 0x30, 0xac, 0x4b, 0x9a, 0x86, 0x60, 0xe9, + 0x09, 0x92, 0x4e, 0x01, 0x6b, 0xc1, 0x0a, 0x1f, 0xfa, 0x4d, 0x81, 0xdd, 0x1b, 0xf4, 0xcf, 0x83, + 0x30, 0x48, 0x5e, 0x11, 0x5f, 0xfb, 0x94, 0x27, 0xf0, 0xec, 0xbf, 0x25, 0x70, 0x22, 0x59, 0x3a, + 0x05, 0x5c, 0x0d, 0x56, 0x39, 0xd1, 0xf7, 0x50, 0x91, 0xb3, 0x13, 0xfe, 0x98, 0x92, 0x94, 0xf8, + 0xda, 0x67, 0x5c, 0xb4, 0xfe, 0x46, 0xa2, 0xe2, 0x28, 0x23, 0x3b, 0x05, 0x5c, 0x1e, 0x2e, 0x1a, + 0xe6, 0xe4, 0x97, 0x15, 0x3d, 0xbe, 0x2b, 0xf9, 0x42, 0x15, 0x82, 0x7c, 0x31, 0x73, 0x8f, 0x86, + 0x09, 0x1d, 0x93, 0x01, 0x4d, 0x59, 0x94, 0x32, 0xed, 0xc9, 0x1d, 0xc8, 0x5b, 0x22, 0xd4, 0xe1, + 0x91, 0x19, 0xb9, 0xb7, 0x68, 0x40, 0xbf, 0x42, 0xd5, 0xa3, 0x93, 0x88, 0x86, 0xd9, 0x5c, 0x25, + 0x2c, 0x26, 0xee, 0x64, 0x5e, 0xc4, 0x53, 0xae, 0xd3, 0x78, 0xf3, 0x22, 0x5a, 0x39, 0x55, 0x8f, + 0x33, 0x2d, 0xd4, 0xb4, 0xe3, 0xdd, 0xec, 0x42, 0x8f, 0x61, 0x7b, 0xe8, 0xfe, 0x4c, 0xc6, 0x62, + 0xa7, 0xb5, 0x2f, 0xb9, 0xe4, 0xdb, 0xd7, 0xa6, 0xba, 0x11, 0xce, 0x3a, 0x05, 0x0c, 0x1c, 0x2a, + 0xb6, 0xe1, 0x2b, 0x78, 0x24, 0x1f, 0xf4, 0x82, 0x78, 0x29, 0x9f, 0x2b, 0x41, 0x71, 0x7c, 0x2b, + 0xc5, 0x43, 0xf1, 0x72, 0x79, 0x8c, 0xe0, 0x6a, 0x03, 0x4a, 0x68, 0x1a, 0x7b, 0x64, 0x70, 0x4e, + 0x98, 0xf7, 0x4a, 0x12, 0x3d, 0xbb, 0x95, 0x48, 0x15, 0x11, 0x27, 0x59, 0x00, 0x67, 0xd1, 0x1b, + 0xa0, 0xad, 0xda, 0x0e, 0x74, 0x00, 0x95, 0x7c, 0xea, 0xc3, 0x74, 0x32, 0x24, 0x31, 0xdf, 0xdf, + 0x75, 0x5c, 0x96, 0x56, 0x9b, 0x1b, 0xf5, 0x3f, 0x14, 0xa8, 0xae, 0x1c, 0x70, 0xf4, 0x04, 0xb6, + 0xc8, 0x45, 0xc0, 0x06, 0x1e, 0xf5, 0x89, 0xb6, 0xc6, 0xb3, 0xdb, 0xbd, 0x96, 0x9d, 0x15, 0xb2, + 0xa3, 0xfa, 0xd7, 0xee, 0x38, 0x25, 0xb8, 0x98, 0xa1, 0x5b, 0xd4, 0x27, 0x08, 0xc3, 0x83, 0x85, + 0xfd, 0x13, 0x1f, 0x19, 0x6d, 0x9d, 0x33, 0x1c, 0xbc, 0xe6, 0x79, 0x7b, 0x1c, 0x8c, 0xd5, 0x79, + 0xbc, 0xb0, 0xe8, 0xf7, 0xa1, 0xbc, 0xb4, 0x16, 0xba, 0x2d, 0x0d, 0x97, 0xf9, 0x1e, 0xc3, 0x86, + 0x94, 0x52, 0xee, 0x22, 0x25, 0x83, 0xf4, 0xdf, 0x15, 0x28, 0x2f, 0x8d, 0x2f, 0x6a, 0xc0, 0x3d, + 0x36, 0x8b, 0xc4, 0xb7, 0xaf, 0x52, 0xff, 0x64, 0x35, 0xdd, 0x52, 0x98, 0x98, 0x38, 0xcc, 0x43, + 0xd1, 0xfb, 0xb0, 0xcd, 0xc8, 0x05, 0xcb, 0x57, 0x29, 0xeb, 0xe2, 0x56, 0x36, 0x59, 0x99, 0x51, + 0xaa, 0x1c, 0x40, 0x79, 0x18, 0x84, 0x6e, 0x3c, 0xcb, 0x41, 0x59, 0xa3, 0x4a, 0x9d, 0x02, 0x2e, + 0x09, 0xb3, 0x80, 0x35, 0x8b, 0xb0, 0x21, 0xfc, 0xfa, 0x9f, 0x0a, 0xec, 0xdd, 0x36, 0xff, 0xe8, + 0x87, 0xa5, 0xbc, 0x4f, 0xff, 0xf7, 0x42, 0x19, 0xe2, 0xd0, 0x9f, 0x45, 0x44, 0x94, 0xb5, 0xdf, + 0x06, 0x98, 0xdb, 0xd0, 0x2e, 0xec, 0x9c, 0x58, 0xb6, 0xd5, 0xeb, 0x0c, 0xfa, 0xdf, 0x76, 0xcd, + 0xc1, 0x99, 0xdd, 0xeb, 0x9a, 0x2d, 0xeb, 0xc4, 0x32, 0xdb, 0x6a, 0x01, 0x95, 0xa0, 0x28, 0x9c, + 0x66, 0x5b, 0x55, 0xd0, 0x36, 0x6c, 0x9a, 0xdf, 0x74, 0x2d, 0x6c, 0xb6, 0xd5, 0xb5, 0xe6, 0x26, + 0xbc, 0xc5, 0x47, 0x7f, 0xff, 0x2f, 0x05, 0x8a, 0x42, 0xd2, 0xf2, 0x51, 0x15, 0x8a, 0x62, 0xd3, + 0x02, 0x9f, 0x57, 0xb0, 0x85, 0x37, 0xf9, 0xdd, 0xf2, 0xd1, 0x07, 0x50, 0x5e, 0x98, 0xab, 0xc0, + 0xd7, 0x36, 0xb8, 0xbf, 0x34, 0x37, 0x5a, 0x3e, 0x72, 0x60, 0xeb, 0x72, 0xfb, 0x79, 0x2f, 0x2b, + 0xf5, 0xc3, 0xd5, 0x2d, 0xc8, 0x65, 0xaf, 0x34, 0x00, 0xcf, 0x39, 0xf6, 0x5f, 0x40, 0x65, 0xd9, + 0x89, 0x1e, 0xc1, 0x83, 0x33, 0xfb, 0xb9, 0xed, 0xbc, 0xb4, 0x07, 0x2d, 0xe7, 0x45, 0xd7, 0xb1, + 0x4d, 0xbb, 0xaf, 0x16, 0x50, 0x05, 0xa0, 0xe5, 0xd8, 0x7d, 0xec, 0x9c, 0x9e, 0x9a, 0x58, 0x55, + 0x10, 0xc0, 0xc6, 0x4b, 0x07, 0x3f, 0x37, 0xb1, 0xba, 0x86, 0x8a, 0x70, 0xaf, 0xef, 0x38, 0xa7, + 0xea, 0xfa, 0x47, 0x5f, 0xc0, 0xc3, 0x1b, 0xe6, 0x25, 0xeb, 0x8c, 0xe4, 0x54, 0x0b, 0x59, 0x64, + 0xaf, 0xdf, 0x76, 0xce, 0xfa, 0x82, 0xa5, 0xd7, 0x6f, 0x9b, 0x18, 0xab, 0x6b, 0xcd, 0x04, 0xf6, + 0x3c, 0x3a, 0x59, 0x59, 0x4d, 0xf3, 0xfe, 0xfc, 0x45, 0xbb, 0xd9, 0x86, 0x76, 0x95, 0xef, 0x8e, + 0x25, 0x78, 0x44, 0xc7, 0x6e, 0x38, 0x32, 0x68, 0x3c, 0xaa, 0x8d, 0x48, 0xc8, 0xf7, 0xb7, 0x26, + 0x5c, 0x6e, 0x14, 0x24, 0xd7, 0x7f, 0x4b, 0x9e, 0xf2, 0xc3, 0x3f, 0x8a, 0x32, 0xdc, 0xe0, 0xe0, + 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x0f, 0x0f, 0xdf, 0x27, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_status.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_status.pb.go new file mode 100644 index 000000000..940ddd69e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/build_status.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/build/v1/build_status.proto + +package build + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The end result of the Build. +type BuildStatus_Result int32 + +const ( + // Unspecified or unknown. + BuildStatus_UNKNOWN_STATUS BuildStatus_Result = 0 + // Build was successful and tests (if requested) all pass. + BuildStatus_COMMAND_SUCCEEDED BuildStatus_Result = 1 + // Build error and/or test failure. + BuildStatus_COMMAND_FAILED BuildStatus_Result = 2 + // Unable to obtain a result due to input provided by the user. + BuildStatus_USER_ERROR BuildStatus_Result = 3 + // Unable to obtain a result due to a failure within the build system. + BuildStatus_SYSTEM_ERROR BuildStatus_Result = 4 + // Build required too many resources, such as build tool RAM. + BuildStatus_RESOURCE_EXHAUSTED BuildStatus_Result = 5 + // An invocation attempt time exceeded its deadline. + BuildStatus_INVOCATION_DEADLINE_EXCEEDED BuildStatus_Result = 6 + // Build request time exceeded the request_deadline + BuildStatus_REQUEST_DEADLINE_EXCEEDED BuildStatus_Result = 8 + // The build was cancelled by a call to CancelBuild. + BuildStatus_CANCELLED BuildStatus_Result = 7 +) + +var BuildStatus_Result_name = map[int32]string{ + 0: "UNKNOWN_STATUS", + 1: "COMMAND_SUCCEEDED", + 2: "COMMAND_FAILED", + 3: "USER_ERROR", + 4: "SYSTEM_ERROR", + 5: "RESOURCE_EXHAUSTED", + 6: "INVOCATION_DEADLINE_EXCEEDED", + 8: "REQUEST_DEADLINE_EXCEEDED", + 7: "CANCELLED", +} +var BuildStatus_Result_value = map[string]int32{ + "UNKNOWN_STATUS": 0, + "COMMAND_SUCCEEDED": 1, + "COMMAND_FAILED": 2, + "USER_ERROR": 3, + "SYSTEM_ERROR": 4, + "RESOURCE_EXHAUSTED": 5, + "INVOCATION_DEADLINE_EXCEEDED": 6, + "REQUEST_DEADLINE_EXCEEDED": 8, + "CANCELLED": 7, +} + +func (x BuildStatus_Result) String() string { + return proto.EnumName(BuildStatus_Result_name, int32(x)) +} +func (BuildStatus_Result) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// Status used for both invocation attempt and overall build completion. +type BuildStatus struct { + // The end result. + Result BuildStatus_Result `protobuf:"varint,1,opt,name=result,enum=google.devtools.build.v1.BuildStatus_Result" json:"result,omitempty"` + // Fine-grained diagnostic information to complement the status. + Details *google_protobuf1.Any `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` +} + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (m *BuildStatus) String() string { return proto.CompactTextString(m) } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *BuildStatus) GetResult() BuildStatus_Result { + if m != nil { + return m.Result + } + return BuildStatus_UNKNOWN_STATUS +} + +func (m *BuildStatus) GetDetails() *google_protobuf1.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*BuildStatus)(nil), "google.devtools.build.v1.BuildStatus") + proto.RegisterEnum("google.devtools.build.v1.BuildStatus_Result", BuildStatus_Result_name, BuildStatus_Result_value) +} + +func init() { proto.RegisterFile("google/devtools/build/v1/build_status.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 390 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x0b, 0xd3, 0x30, + 0x18, 0xc6, 0xcd, 0xd4, 0x4e, 0x33, 0x1d, 0x35, 0xa8, 0x6c, 0x63, 0xc2, 0xd8, 0x69, 0xa0, 0xa4, + 0x6c, 0x1e, 0xc5, 0x43, 0xd6, 0x44, 0x2c, 0x6e, 0xe9, 0x4c, 0x5a, 0xff, 0x5d, 0x4a, 0xe6, 0x6a, + 0x29, 0xd4, 0x66, 0xac, 0xe9, 0x60, 0x1f, 0xd3, 0x93, 0x5f, 0xc5, 0xa3, 0xf4, 0x1f, 0x0c, 0x74, + 0xb7, 0xf4, 0x7d, 0x7e, 0xcf, 0xf3, 0xbe, 0x3c, 0x14, 0xbe, 0x4c, 0xb4, 0x4e, 0xb2, 0xd8, 0x39, + 0xc4, 0x67, 0xa3, 0x75, 0x56, 0x38, 0xfb, 0x32, 0xcd, 0x0e, 0xce, 0x79, 0xd9, 0x3c, 0xa2, 0xc2, + 0x28, 0x53, 0x16, 0xf8, 0x78, 0xd2, 0x46, 0xa3, 0x51, 0x03, 0xe3, 0x0e, 0xc6, 0x35, 0x83, 0xcf, + 0xcb, 0xc9, 0xb4, 0x8d, 0x51, 0xc7, 0xd4, 0x51, 0x79, 0xae, 0x8d, 0x32, 0xa9, 0xce, 0x5b, 0xdf, + 0x64, 0xdc, 0xaa, 0xf5, 0xd7, 0xbe, 0xfc, 0xe1, 0xa8, 0xfc, 0xd2, 0x48, 0xf3, 0xdf, 0x3d, 0x38, + 0x58, 0x57, 0x29, 0xb2, 0x5e, 0x84, 0x28, 0xb4, 0x4e, 0x71, 0x51, 0x66, 0x66, 0x04, 0x66, 0x60, + 0x31, 0x5c, 0xbd, 0xc2, 0xb7, 0x76, 0xe2, 0x2b, 0x1b, 0x16, 0xb5, 0x47, 0xb4, 0x5e, 0x84, 0x61, + 0xff, 0x10, 0x1b, 0x95, 0x66, 0xc5, 0xa8, 0x37, 0x03, 0x8b, 0xc1, 0xea, 0x69, 0x17, 0xd3, 0x9d, + 0x80, 0x49, 0x7e, 0x11, 0x1d, 0x34, 0xff, 0x05, 0xa0, 0xd5, 0x44, 0x20, 0x04, 0x87, 0x21, 0xff, + 0xc0, 0xfd, 0xcf, 0x3c, 0x92, 0x01, 0x09, 0x42, 0x69, 0xdf, 0x41, 0xcf, 0xe0, 0x13, 0xd7, 0xdf, + 0x6e, 0x09, 0xa7, 0x91, 0x0c, 0x5d, 0x97, 0x31, 0xca, 0xa8, 0x0d, 0x2a, 0xb4, 0x1b, 0xbf, 0x23, + 0xde, 0x86, 0x51, 0xbb, 0x87, 0x86, 0x10, 0x86, 0x92, 0x89, 0x88, 0x09, 0xe1, 0x0b, 0xfb, 0x2e, + 0xb2, 0xe1, 0x23, 0xf9, 0x55, 0x06, 0x6c, 0xdb, 0x4e, 0xee, 0xa1, 0xe7, 0x10, 0x09, 0x26, 0xfd, + 0x50, 0xb8, 0x2c, 0x62, 0x5f, 0xde, 0x93, 0x50, 0x06, 0x8c, 0xda, 0xf7, 0xd1, 0x0c, 0x4e, 0x3d, + 0xfe, 0xc9, 0x77, 0x49, 0xe0, 0xf9, 0x3c, 0xa2, 0x8c, 0xd0, 0x8d, 0xc7, 0x2b, 0xa4, 0xdd, 0x67, + 0xa1, 0x17, 0x70, 0x2c, 0xd8, 0xc7, 0x90, 0xc9, 0xe0, 0x3f, 0xf2, 0x03, 0xf4, 0x18, 0x3e, 0x74, + 0x09, 0x77, 0xd9, 0xa6, 0xba, 0xa4, 0xbf, 0x36, 0x70, 0xfa, 0x5d, 0xff, 0xbc, 0x59, 0xdf, 0xda, + 0xbe, 0xea, 0x6f, 0x57, 0xb5, 0xb2, 0x03, 0xdf, 0xde, 0xb6, 0x74, 0xa2, 0x33, 0x95, 0x27, 0x58, + 0x9f, 0x12, 0x27, 0x89, 0xf3, 0xba, 0x33, 0xa7, 0x91, 0xd4, 0x31, 0x2d, 0xfe, 0xfd, 0x59, 0xde, + 0xd4, 0x8f, 0x3f, 0x00, 0xec, 0xad, 0x1a, 0x7e, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x3d, + 0xf5, 0x87, 0x58, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/publish_build_event.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/publish_build_event.pb.go new file mode 100644 index 000000000..ee9fd227b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/build/v1/publish_build_event.pb.go @@ -0,0 +1,465 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/build/v1/publish_build_event.proto + +package build + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf4 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The service level of the build request. Backends only uses this value when +// the BuildEnqueued event is published to determine what level of service +// this build should receive. +type PublishLifecycleEventRequest_ServiceLevel int32 + +const ( + // Non-interactive builds can tolerate longer event latencies. This is the + // default ServiceLevel if callers do not specify one. + PublishLifecycleEventRequest_NONINTERACTIVE PublishLifecycleEventRequest_ServiceLevel = 0 + // The events of an interactive build should be delivered with low latency. + PublishLifecycleEventRequest_INTERACTIVE PublishLifecycleEventRequest_ServiceLevel = 1 +) + +var PublishLifecycleEventRequest_ServiceLevel_name = map[int32]string{ + 0: "NONINTERACTIVE", + 1: "INTERACTIVE", +} +var PublishLifecycleEventRequest_ServiceLevel_value = map[string]int32{ + "NONINTERACTIVE": 0, + "INTERACTIVE": 1, +} + +func (x PublishLifecycleEventRequest_ServiceLevel) String() string { + return proto.EnumName(PublishLifecycleEventRequest_ServiceLevel_name, int32(x)) +} +func (PublishLifecycleEventRequest_ServiceLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +// Publishes 'lifecycle events' that update the high-level state of a build: +// - BuildEnqueued: When a build is scheduled. +// - InvocationAttemptStarted: When work for a build starts; there can be +// multiple invocations for a build (e.g. retries). +// - InvocationAttemptCompleted: When work for a build finishes. +// - BuildFinished: When a build is finished. +type PublishLifecycleEventRequest struct { + // The interactivity of this build. + ServiceLevel PublishLifecycleEventRequest_ServiceLevel `protobuf:"varint,1,opt,name=service_level,json=serviceLevel,enum=google.devtools.build.v1.PublishLifecycleEventRequest_ServiceLevel" json:"service_level,omitempty"` + // The lifecycle build event. If this is a build tool event, the RPC will fail + // with INVALID_REQUEST. + BuildEvent *OrderedBuildEvent `protobuf:"bytes,2,opt,name=build_event,json=buildEvent" json:"build_event,omitempty"` + // If the next event for this build or invocation (depending on the event + // type) hasn't been published after this duration from when {build_event} + // is written to BES, consider this stream expired. If this field is not set, + // BES backend will use its own default value. + StreamTimeout *google_protobuf4.Duration `protobuf:"bytes,3,opt,name=stream_timeout,json=streamTimeout" json:"stream_timeout,omitempty"` + // Additional information about a build request. These are define by the event + // publishers, and the Build Event Service does not validate or interpret + // them. They are used while notifying internal systems of new builds and + // invocations if the OrderedBuildEvent.event type is + // BuildEnqueued/InvocationAttemptStarted. + NotificationKeywords []string `protobuf:"bytes,4,rep,name=notification_keywords,json=notificationKeywords" json:"notification_keywords,omitempty"` + // This field identifies which project (if any) the build is associated with. + ProjectId string `protobuf:"bytes,6,opt,name=project_id,json=projectId" json:"project_id,omitempty"` +} + +func (m *PublishLifecycleEventRequest) Reset() { *m = PublishLifecycleEventRequest{} } +func (m *PublishLifecycleEventRequest) String() string { return proto.CompactTextString(m) } +func (*PublishLifecycleEventRequest) ProtoMessage() {} +func (*PublishLifecycleEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *PublishLifecycleEventRequest) GetServiceLevel() PublishLifecycleEventRequest_ServiceLevel { + if m != nil { + return m.ServiceLevel + } + return PublishLifecycleEventRequest_NONINTERACTIVE +} + +func (m *PublishLifecycleEventRequest) GetBuildEvent() *OrderedBuildEvent { + if m != nil { + return m.BuildEvent + } + return nil +} + +func (m *PublishLifecycleEventRequest) GetStreamTimeout() *google_protobuf4.Duration { + if m != nil { + return m.StreamTimeout + } + return nil +} + +func (m *PublishLifecycleEventRequest) GetNotificationKeywords() []string { + if m != nil { + return m.NotificationKeywords + } + return nil +} + +func (m *PublishLifecycleEventRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// States which event has been committed. Any failure to commit will cause +// RPC errors, hence not recorded by this proto. +type PublishBuildToolEventStreamResponse struct { + // The stream that contains this event. + StreamId *StreamId `protobuf:"bytes,1,opt,name=stream_id,json=streamId" json:"stream_id,omitempty"` + // The sequence number of this event that has been committed. + SequenceNumber int64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` +} + +func (m *PublishBuildToolEventStreamResponse) Reset() { *m = PublishBuildToolEventStreamResponse{} } +func (m *PublishBuildToolEventStreamResponse) String() string { return proto.CompactTextString(m) } +func (*PublishBuildToolEventStreamResponse) ProtoMessage() {} +func (*PublishBuildToolEventStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{1} +} + +func (m *PublishBuildToolEventStreamResponse) GetStreamId() *StreamId { + if m != nil { + return m.StreamId + } + return nil +} + +func (m *PublishBuildToolEventStreamResponse) GetSequenceNumber() int64 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +// Build event with contextual information about the stream it belongs to and +// its position in that stream. +type OrderedBuildEvent struct { + // Which build event stream this event belongs to. + StreamId *StreamId `protobuf:"bytes,1,opt,name=stream_id,json=streamId" json:"stream_id,omitempty"` + // The position of this event in the stream. The sequence numbers for a build + // event stream should be a sequence of consecutive natural numbers starting + // from one. (1, 2, 3, ...) + SequenceNumber int64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + // The actual event. + Event *BuildEvent `protobuf:"bytes,3,opt,name=event" json:"event,omitempty"` +} + +func (m *OrderedBuildEvent) Reset() { *m = OrderedBuildEvent{} } +func (m *OrderedBuildEvent) String() string { return proto.CompactTextString(m) } +func (*OrderedBuildEvent) ProtoMessage() {} +func (*OrderedBuildEvent) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *OrderedBuildEvent) GetStreamId() *StreamId { + if m != nil { + return m.StreamId + } + return nil +} + +func (m *OrderedBuildEvent) GetSequenceNumber() int64 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +func (m *OrderedBuildEvent) GetEvent() *BuildEvent { + if m != nil { + return m.Event + } + return nil +} + +type PublishBuildToolEventStreamRequest struct { + // Which build event stream this event belongs to. + StreamId *StreamId `protobuf:"bytes,1,opt,name=stream_id,json=streamId" json:"stream_id,omitempty"` + // The position of this event in the stream. The sequence numbers for a build + // event stream should be a sequence of consecutive natural numbers starting + // from one. (1, 2, 3, ...) + SequenceNumber int64 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + // The actual event. + Event *BuildEvent `protobuf:"bytes,3,opt,name=event" json:"event,omitempty"` + // The build event with position info. + // New publishing clients should use this field rather than the 3 above. + OrderedBuildEvent *OrderedBuildEvent `protobuf:"bytes,4,opt,name=ordered_build_event,json=orderedBuildEvent" json:"ordered_build_event,omitempty"` + // The keywords to be attached to the notification which notifies the start + // of a new build event stream. BES only reads this field when sequence_number + // or ordered_build_event.sequence_number is 1 in this message. If this field + // is empty, BES will not publish notification messages for this stream. + NotificationKeywords []string `protobuf:"bytes,5,rep,name=notification_keywords,json=notificationKeywords" json:"notification_keywords,omitempty"` +} + +func (m *PublishBuildToolEventStreamRequest) Reset() { *m = PublishBuildToolEventStreamRequest{} } +func (m *PublishBuildToolEventStreamRequest) String() string { return proto.CompactTextString(m) } +func (*PublishBuildToolEventStreamRequest) ProtoMessage() {} +func (*PublishBuildToolEventStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{3} +} + +func (m *PublishBuildToolEventStreamRequest) GetStreamId() *StreamId { + if m != nil { + return m.StreamId + } + return nil +} + +func (m *PublishBuildToolEventStreamRequest) GetSequenceNumber() int64 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +func (m *PublishBuildToolEventStreamRequest) GetEvent() *BuildEvent { + if m != nil { + return m.Event + } + return nil +} + +func (m *PublishBuildToolEventStreamRequest) GetOrderedBuildEvent() *OrderedBuildEvent { + if m != nil { + return m.OrderedBuildEvent + } + return nil +} + +func (m *PublishBuildToolEventStreamRequest) GetNotificationKeywords() []string { + if m != nil { + return m.NotificationKeywords + } + return nil +} + +func init() { + proto.RegisterType((*PublishLifecycleEventRequest)(nil), "google.devtools.build.v1.PublishLifecycleEventRequest") + proto.RegisterType((*PublishBuildToolEventStreamResponse)(nil), "google.devtools.build.v1.PublishBuildToolEventStreamResponse") + proto.RegisterType((*OrderedBuildEvent)(nil), "google.devtools.build.v1.OrderedBuildEvent") + proto.RegisterType((*PublishBuildToolEventStreamRequest)(nil), "google.devtools.build.v1.PublishBuildToolEventStreamRequest") + proto.RegisterEnum("google.devtools.build.v1.PublishLifecycleEventRequest_ServiceLevel", PublishLifecycleEventRequest_ServiceLevel_name, PublishLifecycleEventRequest_ServiceLevel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for PublishBuildEvent service + +type PublishBuildEventClient interface { + // Publish a build event stating the new state of a build (typically from the + // build queue). If the event is a BuildEnqueued event, also register the new + // build request ID and its build type to BES. + // + // The backend will persist the event and deliver it to registered frontend + // jobs immediately without batching. + // + // The commit status of the request is reported by the RPC's util_status() + // function. The error code is the canoncial error code defined in + // //util/task/codes.proto. + PublishLifecycleEvent(ctx context.Context, in *PublishLifecycleEventRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) + // Publish build tool events belonging to the same stream to a backend job + // using bidirectional streaming. + PublishBuildToolEventStream(ctx context.Context, opts ...grpc.CallOption) (PublishBuildEvent_PublishBuildToolEventStreamClient, error) +} + +type publishBuildEventClient struct { + cc *grpc.ClientConn +} + +func NewPublishBuildEventClient(cc *grpc.ClientConn) PublishBuildEventClient { + return &publishBuildEventClient{cc} +} + +func (c *publishBuildEventClient) PublishLifecycleEvent(ctx context.Context, in *PublishLifecycleEventRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.devtools.build.v1.PublishBuildEvent/PublishLifecycleEvent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publishBuildEventClient) PublishBuildToolEventStream(ctx context.Context, opts ...grpc.CallOption) (PublishBuildEvent_PublishBuildToolEventStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_PublishBuildEvent_serviceDesc.Streams[0], c.cc, "/google.devtools.build.v1.PublishBuildEvent/PublishBuildToolEventStream", opts...) + if err != nil { + return nil, err + } + x := &publishBuildEventPublishBuildToolEventStreamClient{stream} + return x, nil +} + +type PublishBuildEvent_PublishBuildToolEventStreamClient interface { + Send(*OrderedBuildEvent) error + Recv() (*PublishBuildToolEventStreamResponse, error) + grpc.ClientStream +} + +type publishBuildEventPublishBuildToolEventStreamClient struct { + grpc.ClientStream +} + +func (x *publishBuildEventPublishBuildToolEventStreamClient) Send(m *OrderedBuildEvent) error { + return x.ClientStream.SendMsg(m) +} + +func (x *publishBuildEventPublishBuildToolEventStreamClient) Recv() (*PublishBuildToolEventStreamResponse, error) { + m := new(PublishBuildToolEventStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for PublishBuildEvent service + +type PublishBuildEventServer interface { + // Publish a build event stating the new state of a build (typically from the + // build queue). If the event is a BuildEnqueued event, also register the new + // build request ID and its build type to BES. + // + // The backend will persist the event and deliver it to registered frontend + // jobs immediately without batching. + // + // The commit status of the request is reported by the RPC's util_status() + // function. The error code is the canoncial error code defined in + // //util/task/codes.proto. + PublishLifecycleEvent(context.Context, *PublishLifecycleEventRequest) (*google_protobuf5.Empty, error) + // Publish build tool events belonging to the same stream to a backend job + // using bidirectional streaming. + PublishBuildToolEventStream(PublishBuildEvent_PublishBuildToolEventStreamServer) error +} + +func RegisterPublishBuildEventServer(s *grpc.Server, srv PublishBuildEventServer) { + s.RegisterService(&_PublishBuildEvent_serviceDesc, srv) +} + +func _PublishBuildEvent_PublishLifecycleEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishLifecycleEventRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublishBuildEventServer).PublishLifecycleEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.build.v1.PublishBuildEvent/PublishLifecycleEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublishBuildEventServer).PublishLifecycleEvent(ctx, req.(*PublishLifecycleEventRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PublishBuildEvent_PublishBuildToolEventStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PublishBuildEventServer).PublishBuildToolEventStream(&publishBuildEventPublishBuildToolEventStreamServer{stream}) +} + +type PublishBuildEvent_PublishBuildToolEventStreamServer interface { + Send(*PublishBuildToolEventStreamResponse) error + Recv() (*OrderedBuildEvent, error) + grpc.ServerStream +} + +type publishBuildEventPublishBuildToolEventStreamServer struct { + grpc.ServerStream +} + +func (x *publishBuildEventPublishBuildToolEventStreamServer) Send(m *PublishBuildToolEventStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *publishBuildEventPublishBuildToolEventStreamServer) Recv() (*OrderedBuildEvent, error) { + m := new(OrderedBuildEvent) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _PublishBuildEvent_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.build.v1.PublishBuildEvent", + HandlerType: (*PublishBuildEventServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PublishLifecycleEvent", + Handler: _PublishBuildEvent_PublishLifecycleEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PublishBuildToolEventStream", + Handler: _PublishBuildEvent_PublishBuildToolEventStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/devtools/build/v1/publish_build_event.proto", +} + +func init() { proto.RegisterFile("google/devtools/build/v1/publish_build_event.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xfe, 0x6d, 0xd2, 0x56, 0xbf, 0x6c, 0xda, 0x94, 0x2e, 0x14, 0x4c, 0xda, 0xa2, 0xc8, 0x20, + 0x88, 0xa8, 0x64, 0xd3, 0x54, 0xe2, 0x50, 0x54, 0xfe, 0xa4, 0xe4, 0x10, 0x51, 0xa5, 0x95, 0x1b, + 0x71, 0x80, 0x83, 0xe5, 0xd8, 0xd3, 0x74, 0xa9, 0xe3, 0x35, 0xde, 0xb5, 0x51, 0xaf, 0xbc, 0x40, + 0x0f, 0x3c, 0x01, 0x77, 0x5e, 0x80, 0xe7, 0x80, 0x47, 0xe0, 0xc0, 0x23, 0x70, 0x44, 0xde, 0x75, + 0x90, 0x69, 0x70, 0x50, 0x73, 0xe0, 0xe6, 0xdd, 0x99, 0xf9, 0x66, 0xbe, 0x6f, 0x76, 0xc6, 0xb8, + 0x35, 0x64, 0x6c, 0xe8, 0x83, 0xe9, 0x41, 0x22, 0x18, 0xf3, 0xb9, 0x39, 0x88, 0xa9, 0xef, 0x99, + 0xc9, 0x96, 0x19, 0xc6, 0x03, 0x9f, 0xf2, 0x13, 0x5b, 0x5e, 0xd8, 0x90, 0x40, 0x20, 0x8c, 0x30, + 0x62, 0x82, 0x11, 0x4d, 0xc5, 0x18, 0xe3, 0x18, 0x43, 0xba, 0x18, 0xc9, 0x56, 0x7d, 0x3d, 0x43, + 0x73, 0x42, 0x6a, 0x3a, 0x41, 0xc0, 0x84, 0x23, 0x28, 0x0b, 0xb8, 0x8a, 0xab, 0x6f, 0x16, 0xe6, + 0xca, 0xe5, 0x18, 0x3b, 0xdf, 0xca, 0x9c, 0xe5, 0x69, 0x10, 0x1f, 0x9b, 0x5e, 0x1c, 0x49, 0xb4, + 0xcc, 0xbe, 0x76, 0xd1, 0x0e, 0xa3, 0x50, 0x9c, 0x29, 0xa3, 0xfe, 0xb1, 0x8c, 0xd7, 0x0f, 0x55, + 0xfd, 0xfb, 0xf4, 0x18, 0xdc, 0x33, 0xd7, 0x87, 0x4e, 0x8a, 0x6e, 0xc1, 0xdb, 0x18, 0xb8, 0x20, + 0x27, 0x78, 0x89, 0x43, 0x94, 0x50, 0x17, 0x6c, 0x1f, 0x12, 0xf0, 0x35, 0xd4, 0x40, 0xcd, 0x5a, + 0x6b, 0xcf, 0x28, 0xa2, 0x66, 0x4c, 0x83, 0x33, 0x8e, 0x14, 0xd6, 0x7e, 0x0a, 0x65, 0x2d, 0xf2, + 0xdc, 0x89, 0xec, 0xe3, 0x6a, 0x8e, 0x9d, 0x56, 0x6a, 0xa0, 0x66, 0xb5, 0xb5, 0x59, 0x9c, 0xe7, + 0x20, 0xf2, 0x20, 0x02, 0xaf, 0x9d, 0x9e, 0x55, 0x0e, 0x3c, 0xf8, 0xf5, 0x4d, 0x9e, 0xe2, 0x1a, + 0x17, 0x11, 0x38, 0x23, 0x5b, 0xd0, 0x11, 0xb0, 0x58, 0x68, 0x65, 0x09, 0x78, 0x73, 0x0c, 0x38, + 0x96, 0xc3, 0x78, 0x9e, 0xc9, 0x65, 0x2d, 0xa9, 0x80, 0xbe, 0xf2, 0x27, 0xdb, 0x78, 0x35, 0x60, + 0x82, 0x1e, 0x53, 0x57, 0x9a, 0xed, 0x53, 0x38, 0x7b, 0xc7, 0x22, 0x8f, 0x6b, 0x73, 0x8d, 0x72, + 0xb3, 0x62, 0x5d, 0xcb, 0x1b, 0x5f, 0x64, 0x36, 0xb2, 0x81, 0x71, 0x18, 0xb1, 0x37, 0xe0, 0x0a, + 0x9b, 0x7a, 0xda, 0x42, 0x03, 0x35, 0x2b, 0x56, 0x25, 0xbb, 0xe9, 0x7a, 0xfa, 0x36, 0x5e, 0xcc, + 0x2b, 0x40, 0x08, 0xae, 0xf5, 0x0e, 0x7a, 0xdd, 0x5e, 0xbf, 0x63, 0x3d, 0xdb, 0xeb, 0x77, 0x5f, + 0x76, 0xae, 0xfc, 0x47, 0x96, 0x71, 0x35, 0x7f, 0x81, 0xf4, 0x73, 0x84, 0x6f, 0x67, 0xa2, 0x4a, + 0xb2, 0x7d, 0xc6, 0x7c, 0x49, 0xf2, 0x48, 0xd6, 0x6b, 0x01, 0x0f, 0x59, 0xc0, 0x81, 0x3c, 0xc1, + 0x95, 0x8c, 0x32, 0xf5, 0x64, 0x9b, 0xaa, 0x2d, 0xbd, 0x58, 0x3e, 0x15, 0xdc, 0xf5, 0xac, 0xff, + 0x79, 0xf6, 0x45, 0xee, 0xe1, 0x65, 0x9e, 0xf6, 0x29, 0x70, 0xc1, 0x0e, 0xe2, 0xd1, 0x00, 0x22, + 0xd9, 0x85, 0xb2, 0x55, 0x1b, 0x5f, 0xf7, 0xe4, 0xad, 0xfe, 0x19, 0xe1, 0x95, 0x09, 0xf9, 0xff, + 0x5d, 0x7e, 0xb2, 0x83, 0xe7, 0xd5, 0x23, 0x51, 0x3d, 0xbd, 0x53, 0x9c, 0x25, 0xf7, 0x3a, 0x54, + 0x88, 0xfe, 0xbd, 0x84, 0xf5, 0xa9, 0x6a, 0xaa, 0x77, 0xbf, 0x37, 0x13, 0x99, 0x76, 0x49, 0x43, + 0x39, 0x42, 0x9b, 0x05, 0x84, 0xa4, 0xdb, 0x45, 0x52, 0x8f, 0x67, 0x20, 0x25, 0x81, 0x54, 0x18, + 0x79, 0x8d, 0xaf, 0x32, 0xd5, 0x93, 0xfc, 0x26, 0xd2, 0xe6, 0x2e, 0x3f, 0x47, 0x2b, 0x6c, 0xa2, + 0xb7, 0x85, 0xc3, 0x30, 0x5f, 0x3c, 0x0c, 0xad, 0xaf, 0x25, 0xbc, 0x92, 0x97, 0x5a, 0x41, 0x9d, + 0x23, 0xbc, 0xfa, 0xc7, 0x1d, 0x41, 0x1e, 0xce, 0xb6, 0x54, 0xea, 0xd7, 0x27, 0x66, 0xba, 0x93, + 0xae, 0x38, 0xfd, 0xee, 0xfb, 0x2f, 0xdf, 0x3e, 0x94, 0x1a, 0xfa, 0x5a, 0xba, 0x39, 0xfd, 0xdf, + 0x42, 0xf9, 0x4e, 0xb6, 0xb5, 0x77, 0xd0, 0x7d, 0xf2, 0x09, 0xe1, 0xb5, 0x29, 0x4f, 0x82, 0x5c, + 0x46, 0xbc, 0xfa, 0xee, 0x5f, 0x49, 0x4c, 0x1b, 0x62, 0x7d, 0x43, 0xd6, 0x7c, 0x43, 0x27, 0x69, + 0xcd, 0x70, 0xb1, 0xd4, 0x26, 0x7a, 0x80, 0xda, 0x21, 0x5e, 0x77, 0xd9, 0xa8, 0x30, 0x4d, 0x7b, + 0xb1, 0xed, 0xb8, 0xa7, 0x10, 0x78, 0x87, 0xa9, 0x1a, 0x87, 0xe8, 0xd5, 0x6e, 0xe6, 0x39, 0x64, + 0xbe, 0x13, 0x0c, 0x0d, 0x16, 0x0d, 0xcd, 0x21, 0x04, 0x52, 0x2b, 0x53, 0x99, 0x9c, 0x90, 0xf2, + 0xc9, 0x9f, 0xcd, 0x23, 0xf9, 0xf1, 0x03, 0xa1, 0xc1, 0x82, 0x74, 0xde, 0xfe, 0x19, 0x00, 0x00, + 0xff, 0xff, 0x85, 0x4c, 0x16, 0x72, 0x04, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go new file mode 100644 index 000000000..0ec93f0f1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudbuild/v1/cloudbuild.pb.go @@ -0,0 +1,2213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudbuild/v1/cloudbuild.proto + +/* +Package cloudbuild is a generated protocol buffer package. + +It is generated from these files: + google/devtools/cloudbuild/v1/cloudbuild.proto + +It has these top-level messages: + StorageSource + RepoSource + Source + BuiltImage + BuildStep + Volume + Results + Build + BuildOperationMetadata + SourceProvenance + FileHashes + Hash + Secret + CreateBuildRequest + GetBuildRequest + ListBuildsRequest + ListBuildsResponse + CancelBuildRequest + BuildTrigger + CreateBuildTriggerRequest + GetBuildTriggerRequest + ListBuildTriggersRequest + ListBuildTriggersResponse + DeleteBuildTriggerRequest + UpdateBuildTriggerRequest + BuildOptions +*/ +package cloudbuild + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Possible status of a build. +type Build_Status int32 + +const ( + // Status of the build is unknown. + Build_STATUS_UNKNOWN Build_Status = 0 + // Build is queued; work has not yet begun. + Build_QUEUED Build_Status = 1 + // Build is being executed. + Build_WORKING Build_Status = 2 + // Build finished successfully. + Build_SUCCESS Build_Status = 3 + // Build failed to complete successfully. + Build_FAILURE Build_Status = 4 + // Build failed due to an internal cause. + Build_INTERNAL_ERROR Build_Status = 5 + // Build took longer than was allowed. + Build_TIMEOUT Build_Status = 6 + // Build was canceled by a user. + Build_CANCELLED Build_Status = 7 +) + +var Build_Status_name = map[int32]string{ + 0: "STATUS_UNKNOWN", + 1: "QUEUED", + 2: "WORKING", + 3: "SUCCESS", + 4: "FAILURE", + 5: "INTERNAL_ERROR", + 6: "TIMEOUT", + 7: "CANCELLED", +} +var Build_Status_value = map[string]int32{ + "STATUS_UNKNOWN": 0, + "QUEUED": 1, + "WORKING": 2, + "SUCCESS": 3, + "FAILURE": 4, + "INTERNAL_ERROR": 5, + "TIMEOUT": 6, + "CANCELLED": 7, +} + +func (x Build_Status) String() string { + return proto.EnumName(Build_Status_name, int32(x)) +} +func (Build_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Specifies the hash algorithm, if any. +type Hash_HashType int32 + +const ( + // No hash requested. + Hash_NONE Hash_HashType = 0 + // Use a sha256 hash. + Hash_SHA256 Hash_HashType = 1 +) + +var Hash_HashType_name = map[int32]string{ + 0: "NONE", + 1: "SHA256", +} +var Hash_HashType_value = map[string]int32{ + "NONE": 0, + "SHA256": 1, +} + +func (x Hash_HashType) String() string { + return proto.EnumName(Hash_HashType_name, int32(x)) +} +func (Hash_HashType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } + +// Specifies the manner in which the build should be verified, if at all. +type BuildOptions_VerifyOption int32 + +const ( + // Not a verifiable build. (default) + BuildOptions_NOT_VERIFIED BuildOptions_VerifyOption = 0 + // Verified build. + BuildOptions_VERIFIED BuildOptions_VerifyOption = 1 +) + +var BuildOptions_VerifyOption_name = map[int32]string{ + 0: "NOT_VERIFIED", + 1: "VERIFIED", +} +var BuildOptions_VerifyOption_value = map[string]int32{ + "NOT_VERIFIED": 0, + "VERIFIED": 1, +} + +func (x BuildOptions_VerifyOption) String() string { + return proto.EnumName(BuildOptions_VerifyOption_name, int32(x)) +} +func (BuildOptions_VerifyOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{25, 0} +} + +// Specifies the behavior when there is an error in the substitution checks. +type BuildOptions_SubstitutionOption int32 + +const ( + // Fails the build if error in substitutions checks, like missing + // a substitution in the template or in the map. + BuildOptions_MUST_MATCH BuildOptions_SubstitutionOption = 0 + // Do not fail the build if error in substitutions checks. + BuildOptions_ALLOW_LOOSE BuildOptions_SubstitutionOption = 1 +) + +var BuildOptions_SubstitutionOption_name = map[int32]string{ + 0: "MUST_MATCH", + 1: "ALLOW_LOOSE", +} +var BuildOptions_SubstitutionOption_value = map[string]int32{ + "MUST_MATCH": 0, + "ALLOW_LOOSE": 1, +} + +func (x BuildOptions_SubstitutionOption) String() string { + return proto.EnumName(BuildOptions_SubstitutionOption_name, int32(x)) +} +func (BuildOptions_SubstitutionOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{25, 1} +} + +// StorageSource describes the location of the source in an archive file in +// Google Cloud Storage. +type StorageSource struct { + // Google Cloud Storage bucket containing source (see + // [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). + Bucket string `protobuf:"bytes,1,opt,name=bucket" json:"bucket,omitempty"` + // Google Cloud Storage object containing source. + // + // This object must be a gzipped archive file (.tar.gz) containing source to + // build. + Object string `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + // Google Cloud Storage generation for the object. If the generation is + // omitted, the latest generation will be used. + Generation int64 `protobuf:"varint,3,opt,name=generation" json:"generation,omitempty"` +} + +func (m *StorageSource) Reset() { *m = StorageSource{} } +func (m *StorageSource) String() string { return proto.CompactTextString(m) } +func (*StorageSource) ProtoMessage() {} +func (*StorageSource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *StorageSource) GetBucket() string { + if m != nil { + return m.Bucket + } + return "" +} + +func (m *StorageSource) GetObject() string { + if m != nil { + return m.Object + } + return "" +} + +func (m *StorageSource) GetGeneration() int64 { + if m != nil { + return m.Generation + } + return 0 +} + +// RepoSource describes the location of the source in a Google Cloud Source +// Repository. +type RepoSource struct { + // ID of the project that owns the repo. If omitted, the project ID requesting + // the build is assumed. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Name of the repo. If omitted, the name "default" is assumed. + RepoName string `protobuf:"bytes,2,opt,name=repo_name,json=repoName" json:"repo_name,omitempty"` + // A revision within the source repository must be specified in + // one of these ways. + // + // Types that are valid to be assigned to Revision: + // *RepoSource_BranchName + // *RepoSource_TagName + // *RepoSource_CommitSha + Revision isRepoSource_Revision `protobuf_oneof:"revision"` +} + +func (m *RepoSource) Reset() { *m = RepoSource{} } +func (m *RepoSource) String() string { return proto.CompactTextString(m) } +func (*RepoSource) ProtoMessage() {} +func (*RepoSource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isRepoSource_Revision interface { + isRepoSource_Revision() +} + +type RepoSource_BranchName struct { + BranchName string `protobuf:"bytes,3,opt,name=branch_name,json=branchName,oneof"` +} +type RepoSource_TagName struct { + TagName string `protobuf:"bytes,4,opt,name=tag_name,json=tagName,oneof"` +} +type RepoSource_CommitSha struct { + CommitSha string `protobuf:"bytes,5,opt,name=commit_sha,json=commitSha,oneof"` +} + +func (*RepoSource_BranchName) isRepoSource_Revision() {} +func (*RepoSource_TagName) isRepoSource_Revision() {} +func (*RepoSource_CommitSha) isRepoSource_Revision() {} + +func (m *RepoSource) GetRevision() isRepoSource_Revision { + if m != nil { + return m.Revision + } + return nil +} + +func (m *RepoSource) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RepoSource) GetRepoName() string { + if m != nil { + return m.RepoName + } + return "" +} + +func (m *RepoSource) GetBranchName() string { + if x, ok := m.GetRevision().(*RepoSource_BranchName); ok { + return x.BranchName + } + return "" +} + +func (m *RepoSource) GetTagName() string { + if x, ok := m.GetRevision().(*RepoSource_TagName); ok { + return x.TagName + } + return "" +} + +func (m *RepoSource) GetCommitSha() string { + if x, ok := m.GetRevision().(*RepoSource_CommitSha); ok { + return x.CommitSha + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RepoSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RepoSource_OneofMarshaler, _RepoSource_OneofUnmarshaler, _RepoSource_OneofSizer, []interface{}{ + (*RepoSource_BranchName)(nil), + (*RepoSource_TagName)(nil), + (*RepoSource_CommitSha)(nil), + } +} + +func _RepoSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RepoSource) + // revision + switch x := m.Revision.(type) { + case *RepoSource_BranchName: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.BranchName) + case *RepoSource_TagName: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.TagName) + case *RepoSource_CommitSha: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.CommitSha) + case nil: + default: + return fmt.Errorf("RepoSource.Revision has unexpected type %T", x) + } + return nil +} + +func _RepoSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RepoSource) + switch tag { + case 3: // revision.branch_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &RepoSource_BranchName{x} + return true, err + case 4: // revision.tag_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &RepoSource_TagName{x} + return true, err + case 5: // revision.commit_sha + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &RepoSource_CommitSha{x} + return true, err + default: + return false, nil + } +} + +func _RepoSource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RepoSource) + // revision + switch x := m.Revision.(type) { + case *RepoSource_BranchName: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.BranchName))) + n += len(x.BranchName) + case *RepoSource_TagName: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TagName))) + n += len(x.TagName) + case *RepoSource_CommitSha: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.CommitSha))) + n += len(x.CommitSha) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Source describes the location of the source in a supported storage +// service. +type Source struct { + // Describes location of source. + // + // Types that are valid to be assigned to Source: + // *Source_StorageSource + // *Source_RepoSource + Source isSource_Source `protobuf_oneof:"source"` +} + +func (m *Source) Reset() { *m = Source{} } +func (m *Source) String() string { return proto.CompactTextString(m) } +func (*Source) ProtoMessage() {} +func (*Source) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isSource_Source interface { + isSource_Source() +} + +type Source_StorageSource struct { + StorageSource *StorageSource `protobuf:"bytes,2,opt,name=storage_source,json=storageSource,oneof"` +} +type Source_RepoSource struct { + RepoSource *RepoSource `protobuf:"bytes,3,opt,name=repo_source,json=repoSource,oneof"` +} + +func (*Source_StorageSource) isSource_Source() {} +func (*Source_RepoSource) isSource_Source() {} + +func (m *Source) GetSource() isSource_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Source) GetStorageSource() *StorageSource { + if x, ok := m.GetSource().(*Source_StorageSource); ok { + return x.StorageSource + } + return nil +} + +func (m *Source) GetRepoSource() *RepoSource { + if x, ok := m.GetSource().(*Source_RepoSource); ok { + return x.RepoSource + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Source) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Source_OneofMarshaler, _Source_OneofUnmarshaler, _Source_OneofSizer, []interface{}{ + (*Source_StorageSource)(nil), + (*Source_RepoSource)(nil), + } +} + +func _Source_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Source) + // source + switch x := m.Source.(type) { + case *Source_StorageSource: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StorageSource); err != nil { + return err + } + case *Source_RepoSource: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RepoSource); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Source.Source has unexpected type %T", x) + } + return nil +} + +func _Source_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Source) + switch tag { + case 2: // source.storage_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StorageSource) + err := b.DecodeMessage(msg) + m.Source = &Source_StorageSource{msg} + return true, err + case 3: // source.repo_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RepoSource) + err := b.DecodeMessage(msg) + m.Source = &Source_RepoSource{msg} + return true, err + default: + return false, nil + } +} + +func _Source_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Source) + // source + switch x := m.Source.(type) { + case *Source_StorageSource: + s := proto.Size(x.StorageSource) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Source_RepoSource: + s := proto.Size(x.RepoSource) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// BuiltImage describes an image built by the pipeline. +type BuiltImage struct { + // Name used to push the container image to Google Container Registry, as + // presented to `docker push`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Docker Registry 2.0 digest. + Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` +} + +func (m *BuiltImage) Reset() { *m = BuiltImage{} } +func (m *BuiltImage) String() string { return proto.CompactTextString(m) } +func (*BuiltImage) ProtoMessage() {} +func (*BuiltImage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BuiltImage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BuiltImage) GetDigest() string { + if m != nil { + return m.Digest + } + return "" +} + +// BuildStep describes a step to perform in the build pipeline. +type BuildStep struct { + // The name of the container image that will run this particular build step. + // + // If the image is already available in the host's Docker daemon's cache, it + // will be run directly. If not, the host will attempt to pull the image + // first, using the builder service account's credentials if necessary. + // + // The Docker daemon's cache will already have the latest versions of all of + // the officially supported build steps + // ([https://github.com/GoogleCloudPlatform/cloud-builders](https://github.com/GoogleCloudPlatform/cloud-builders)). + // The Docker daemon will also have cached many of the layers for some popular + // images, like "ubuntu", "debian", but they will be refreshed at the time you + // attempt to use them. + // + // If you built an image in a previous build step, it will be stored in the + // host's Docker daemon's cache and is available to use as the name for a + // later build step. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A list of environment variable definitions to be used when running a step. + // + // The elements are of the form "KEY=VALUE" for the environment variable "KEY" + // being given the value "VALUE". + Env []string `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` + // A list of arguments that will be presented to the step when it is started. + // + // If the image used to run the step's container has an entrypoint, these args + // will be used as arguments to that entrypoint. If the image does not define + // an entrypoint, the first element in args will be used as the entrypoint, + // and the remainder will be used as arguments. + Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + // Working directory (relative to project source root) to use when running + // this operation's container. + Dir string `protobuf:"bytes,4,opt,name=dir" json:"dir,omitempty"` + // Optional unique identifier for this build step, used in wait_for to + // reference this build step as a dependency. + Id string `protobuf:"bytes,5,opt,name=id" json:"id,omitempty"` + // The ID(s) of the step(s) that this build step depends on. + // This build step will not start until all the build steps in wait_for + // have completed successfully. If wait_for is empty, this build step will + // start when all previous build steps in the Build.Steps list have completed + // successfully. + WaitFor []string `protobuf:"bytes,6,rep,name=wait_for,json=waitFor" json:"wait_for,omitempty"` + // Optional entrypoint to be used instead of the build step image's default + // If unset, the image's default will be used. + Entrypoint string `protobuf:"bytes,7,opt,name=entrypoint" json:"entrypoint,omitempty"` + // A list of environment variables which are encrypted using a Cloud KMS + // crypto key. These values must be specified in the build's secrets. + SecretEnv []string `protobuf:"bytes,8,rep,name=secret_env,json=secretEnv" json:"secret_env,omitempty"` + // List of volumes to mount into the build step. + // + // Each volume will be created as an empty volume prior to execution of the + // build step. Upon completion of the build, volumes and their contents will + // be discarded. + // + // Using a named volume in only one step is not valid as it is indicative + // of a mis-configured build request. + Volumes []*Volume `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` +} + +func (m *BuildStep) Reset() { *m = BuildStep{} } +func (m *BuildStep) String() string { return proto.CompactTextString(m) } +func (*BuildStep) ProtoMessage() {} +func (*BuildStep) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BuildStep) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BuildStep) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *BuildStep) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *BuildStep) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func (m *BuildStep) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *BuildStep) GetWaitFor() []string { + if m != nil { + return m.WaitFor + } + return nil +} + +func (m *BuildStep) GetEntrypoint() string { + if m != nil { + return m.Entrypoint + } + return "" +} + +func (m *BuildStep) GetSecretEnv() []string { + if m != nil { + return m.SecretEnv + } + return nil +} + +func (m *BuildStep) GetVolumes() []*Volume { + if m != nil { + return m.Volumes + } + return nil +} + +// Volume describes a Docker container volume which is mounted into build steps +// in order to persist files across build step execution. +type Volume struct { + // Name of the volume to mount. + // + // Volume names must be unique per build step and must be valid names for + // Docker volumes. Each named volume must be used by at least two build steps. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Path at which to mount the volume. + // + // Paths must be absolute and cannot conflict with other volume paths on the + // same build step or with certain reserved volume paths. + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Volume) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Volume) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +// Results describes the artifacts created by the build pipeline. +type Results struct { + // Images that were built as a part of the build. + Images []*BuiltImage `protobuf:"bytes,2,rep,name=images" json:"images,omitempty"` + // List of build step digests, in order corresponding to build step indices. + BuildStepImages []string `protobuf:"bytes,3,rep,name=build_step_images,json=buildStepImages" json:"build_step_images,omitempty"` +} + +func (m *Results) Reset() { *m = Results{} } +func (m *Results) String() string { return proto.CompactTextString(m) } +func (*Results) ProtoMessage() {} +func (*Results) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Results) GetImages() []*BuiltImage { + if m != nil { + return m.Images + } + return nil +} + +func (m *Results) GetBuildStepImages() []string { + if m != nil { + return m.BuildStepImages + } + return nil +} + +// A build resource in the Container Builder API. +// +// At a high level, a Build describes where to find source code, how to build +// it (for example, the builder image to run on the source), and what tag to +// apply to the built image when it is pushed to Google Container Registry. +// +// Fields can include the following variables which will be expanded when the +// build is created: +// +// - $PROJECT_ID: the project ID of the build. +// - $BUILD_ID: the autogenerated ID of the build. +// - $REPO_NAME: the source repository name specified by RepoSource. +// - $BRANCH_NAME: the branch name specified by RepoSource. +// - $TAG_NAME: the tag name specified by RepoSource. +// - $REVISION_ID or $COMMIT_SHA: the commit SHA specified by RepoSource or +// resolved from the specified branch or tag. +type Build struct { + // Unique identifier of the build. + // @OutputOnly + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // ID of the project. + // @OutputOnly. + ProjectId string `protobuf:"bytes,16,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Status of the build. + // @OutputOnly + Status Build_Status `protobuf:"varint,2,opt,name=status,enum=google.devtools.cloudbuild.v1.Build_Status" json:"status,omitempty"` + // Customer-readable message about the current status. + // @OutputOnly + StatusDetail string `protobuf:"bytes,24,opt,name=status_detail,json=statusDetail" json:"status_detail,omitempty"` + // Describes where to find the source files to build. + Source *Source `protobuf:"bytes,3,opt,name=source" json:"source,omitempty"` + // Describes the operations to be performed on the workspace. + Steps []*BuildStep `protobuf:"bytes,11,rep,name=steps" json:"steps,omitempty"` + // Results of the build. + // @OutputOnly + Results *Results `protobuf:"bytes,10,opt,name=results" json:"results,omitempty"` + // Time at which the request to create the build was received. + // @OutputOnly + CreateTime *google_protobuf4.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time at which execution of the build was started. + // @OutputOnly + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,7,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time at which execution of the build was finished. + // + // The difference between finish_time and start_time is the duration of the + // build's execution. + // @OutputOnly + FinishTime *google_protobuf4.Timestamp `protobuf:"bytes,8,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"` + // Amount of time that this build should be allowed to run, to second + // granularity. If this amount of time elapses, work on the build will cease + // and the build status will be TIMEOUT. + // + // Default time is ten minutes. + Timeout *google_protobuf3.Duration `protobuf:"bytes,12,opt,name=timeout" json:"timeout,omitempty"` + // A list of images to be pushed upon the successful completion of all build + // steps. + // + // The images will be pushed using the builder service account's credentials. + // + // The digests of the pushed images will be stored in the Build resource's + // results field. + // + // If any of the images fail to be pushed, the build is marked FAILURE. + Images []string `protobuf:"bytes,13,rep,name=images" json:"images,omitempty"` + // Google Cloud Storage bucket where logs should be written (see + // [Bucket Name + // Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). + // Logs file names will be of the format `${logs_bucket}/log-${build_id}.txt`. + LogsBucket string `protobuf:"bytes,19,opt,name=logs_bucket,json=logsBucket" json:"logs_bucket,omitempty"` + // A permanent fixed identifier for source. + // @OutputOnly + SourceProvenance *SourceProvenance `protobuf:"bytes,21,opt,name=source_provenance,json=sourceProvenance" json:"source_provenance,omitempty"` + // The ID of the BuildTrigger that triggered this build, if it was + // triggered automatically. + // @OutputOnly + BuildTriggerId string `protobuf:"bytes,22,opt,name=build_trigger_id,json=buildTriggerId" json:"build_trigger_id,omitempty"` + // Special options for this build. + Options *BuildOptions `protobuf:"bytes,23,opt,name=options" json:"options,omitempty"` + // URL to logs for this build in Google Cloud Logging. + // @OutputOnly + LogUrl string `protobuf:"bytes,25,opt,name=log_url,json=logUrl" json:"log_url,omitempty"` + // Substitutions data for Build resource. + Substitutions map[string]string `protobuf:"bytes,29,rep,name=substitutions" json:"substitutions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Tags for annotation of a Build. These are not docker tags. + Tags []string `protobuf:"bytes,31,rep,name=tags" json:"tags,omitempty"` + // Secrets to decrypt using Cloud KMS. + Secrets []*Secret `protobuf:"bytes,32,rep,name=secrets" json:"secrets,omitempty"` +} + +func (m *Build) Reset() { *m = Build{} } +func (m *Build) String() string { return proto.CompactTextString(m) } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Build) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Build) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Build) GetStatus() Build_Status { + if m != nil { + return m.Status + } + return Build_STATUS_UNKNOWN +} + +func (m *Build) GetStatusDetail() string { + if m != nil { + return m.StatusDetail + } + return "" +} + +func (m *Build) GetSource() *Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Build) GetSteps() []*BuildStep { + if m != nil { + return m.Steps + } + return nil +} + +func (m *Build) GetResults() *Results { + if m != nil { + return m.Results + } + return nil +} + +func (m *Build) GetCreateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Build) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Build) GetFinishTime() *google_protobuf4.Timestamp { + if m != nil { + return m.FinishTime + } + return nil +} + +func (m *Build) GetTimeout() *google_protobuf3.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *Build) GetImages() []string { + if m != nil { + return m.Images + } + return nil +} + +func (m *Build) GetLogsBucket() string { + if m != nil { + return m.LogsBucket + } + return "" +} + +func (m *Build) GetSourceProvenance() *SourceProvenance { + if m != nil { + return m.SourceProvenance + } + return nil +} + +func (m *Build) GetBuildTriggerId() string { + if m != nil { + return m.BuildTriggerId + } + return "" +} + +func (m *Build) GetOptions() *BuildOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *Build) GetLogUrl() string { + if m != nil { + return m.LogUrl + } + return "" +} + +func (m *Build) GetSubstitutions() map[string]string { + if m != nil { + return m.Substitutions + } + return nil +} + +func (m *Build) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Build) GetSecrets() []*Secret { + if m != nil { + return m.Secrets + } + return nil +} + +// Metadata for build operations. +type BuildOperationMetadata struct { + // The build that the operation is tracking. + Build *Build `protobuf:"bytes,1,opt,name=build" json:"build,omitempty"` +} + +func (m *BuildOperationMetadata) Reset() { *m = BuildOperationMetadata{} } +func (m *BuildOperationMetadata) String() string { return proto.CompactTextString(m) } +func (*BuildOperationMetadata) ProtoMessage() {} +func (*BuildOperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *BuildOperationMetadata) GetBuild() *Build { + if m != nil { + return m.Build + } + return nil +} + +// Provenance of the source. Ways to find the original source, or verify that +// some source was used for this build. +type SourceProvenance struct { + // A copy of the build's source.storage_source, if exists, with any + // generations resolved. + ResolvedStorageSource *StorageSource `protobuf:"bytes,3,opt,name=resolved_storage_source,json=resolvedStorageSource" json:"resolved_storage_source,omitempty"` + // A copy of the build's source.repo_source, if exists, with any + // revisions resolved. + ResolvedRepoSource *RepoSource `protobuf:"bytes,6,opt,name=resolved_repo_source,json=resolvedRepoSource" json:"resolved_repo_source,omitempty"` + // Hash(es) of the build source, which can be used to verify that the original + // source integrity was maintained in the build. Note that FileHashes will + // only be populated if BuildOptions has requested a SourceProvenanceHash. + // + // The keys to this map are file paths used as build source and the values + // contain the hash values for those files. + // + // If the build source came in a single package such as a gzipped tarfile + // (.tar.gz), the FileHash will be for the single path to that file. + // @OutputOnly + FileHashes map[string]*FileHashes `protobuf:"bytes,4,rep,name=file_hashes,json=fileHashes" json:"file_hashes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *SourceProvenance) Reset() { *m = SourceProvenance{} } +func (m *SourceProvenance) String() string { return proto.CompactTextString(m) } +func (*SourceProvenance) ProtoMessage() {} +func (*SourceProvenance) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *SourceProvenance) GetResolvedStorageSource() *StorageSource { + if m != nil { + return m.ResolvedStorageSource + } + return nil +} + +func (m *SourceProvenance) GetResolvedRepoSource() *RepoSource { + if m != nil { + return m.ResolvedRepoSource + } + return nil +} + +func (m *SourceProvenance) GetFileHashes() map[string]*FileHashes { + if m != nil { + return m.FileHashes + } + return nil +} + +// Container message for hashes of byte content of files, used in +// SourceProvenance messages to verify integrity of source input to the build. +type FileHashes struct { + // Collection of file hashes. + FileHash []*Hash `protobuf:"bytes,1,rep,name=file_hash,json=fileHash" json:"file_hash,omitempty"` +} + +func (m *FileHashes) Reset() { *m = FileHashes{} } +func (m *FileHashes) String() string { return proto.CompactTextString(m) } +func (*FileHashes) ProtoMessage() {} +func (*FileHashes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *FileHashes) GetFileHash() []*Hash { + if m != nil { + return m.FileHash + } + return nil +} + +// Container message for hash values. +type Hash struct { + // The type of hash that was performed. + Type Hash_HashType `protobuf:"varint,1,opt,name=type,enum=google.devtools.cloudbuild.v1.Hash_HashType" json:"type,omitempty"` + // The hash value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Hash) Reset() { *m = Hash{} } +func (m *Hash) String() string { return proto.CompactTextString(m) } +func (*Hash) ProtoMessage() {} +func (*Hash) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Hash) GetType() Hash_HashType { + if m != nil { + return m.Type + } + return Hash_NONE +} + +func (m *Hash) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Secret pairs a set of secret environment variables containing encrypted +// values with the Cloud KMS key to use to decrypt the value. +type Secret struct { + // Cloud KMS key name to use to decrypt these envs. + KmsKeyName string `protobuf:"bytes,1,opt,name=kms_key_name,json=kmsKeyName" json:"kms_key_name,omitempty"` + // Map of environment variable name to its encrypted value. + // + // Secret environment variables must be unique across all of a build's + // secrets, and must be used by at least one build step. Values can be at most + // 1 KB in size. There can be at most ten secret values across all of a + // build's secrets. + SecretEnv map[string][]byte `protobuf:"bytes,3,rep,name=secret_env,json=secretEnv" json:"secret_env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (m *Secret) String() string { return proto.CompactTextString(m) } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *Secret) GetKmsKeyName() string { + if m != nil { + return m.KmsKeyName + } + return "" +} + +func (m *Secret) GetSecretEnv() map[string][]byte { + if m != nil { + return m.SecretEnv + } + return nil +} + +// Request to create a new build. +type CreateBuildRequest struct { + // ID of the project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Build resource to create. + Build *Build `protobuf:"bytes,2,opt,name=build" json:"build,omitempty"` +} + +func (m *CreateBuildRequest) Reset() { *m = CreateBuildRequest{} } +func (m *CreateBuildRequest) String() string { return proto.CompactTextString(m) } +func (*CreateBuildRequest) ProtoMessage() {} +func (*CreateBuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *CreateBuildRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateBuildRequest) GetBuild() *Build { + if m != nil { + return m.Build + } + return nil +} + +// Request to get a build. +type GetBuildRequest struct { + // ID of the project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the build. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` +} + +func (m *GetBuildRequest) Reset() { *m = GetBuildRequest{} } +func (m *GetBuildRequest) String() string { return proto.CompactTextString(m) } +func (*GetBuildRequest) ProtoMessage() {} +func (*GetBuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *GetBuildRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetBuildRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +// Request to list builds. +type ListBuildsRequest struct { + // ID of the project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Number of results to return in the list. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Token to provide to skip to a particular spot in the list. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The raw filter text to constrain the results. + Filter string `protobuf:"bytes,8,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListBuildsRequest) Reset() { *m = ListBuildsRequest{} } +func (m *ListBuildsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBuildsRequest) ProtoMessage() {} +func (*ListBuildsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ListBuildsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListBuildsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBuildsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListBuildsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Response including listed builds. +type ListBuildsResponse struct { + // Builds will be sorted by create_time, descending. + Builds []*Build `protobuf:"bytes,1,rep,name=builds" json:"builds,omitempty"` + // Token to receive the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListBuildsResponse) Reset() { *m = ListBuildsResponse{} } +func (m *ListBuildsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBuildsResponse) ProtoMessage() {} +func (*ListBuildsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ListBuildsResponse) GetBuilds() []*Build { + if m != nil { + return m.Builds + } + return nil +} + +func (m *ListBuildsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request to cancel an ongoing build. +type CancelBuildRequest struct { + // ID of the project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the build. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` +} + +func (m *CancelBuildRequest) Reset() { *m = CancelBuildRequest{} } +func (m *CancelBuildRequest) String() string { return proto.CompactTextString(m) } +func (*CancelBuildRequest) ProtoMessage() {} +func (*CancelBuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *CancelBuildRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CancelBuildRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +// Configuration for an automated build in response to source repository +// changes. +type BuildTrigger struct { + // Unique identifier of the trigger. + // + // @OutputOnly + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Human-readable description of this trigger. + Description string `protobuf:"bytes,10,opt,name=description" json:"description,omitempty"` + // Template describing the types of source changes to trigger a build. + // + // Branch and tag names in trigger templates are interpreted as regular + // expressions. Any branch or tag change that matches that regular expression + // will trigger a build. + TriggerTemplate *RepoSource `protobuf:"bytes,7,opt,name=trigger_template,json=triggerTemplate" json:"trigger_template,omitempty"` + // Template describing the Build request to make when the trigger is matched. + // + // Types that are valid to be assigned to BuildTemplate: + // *BuildTrigger_Build + // *BuildTrigger_Filename + BuildTemplate isBuildTrigger_BuildTemplate `protobuf_oneof:"build_template"` + // Time when the trigger was created. + // + // @OutputOnly + CreateTime *google_protobuf4.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // If true, the trigger will never result in a build. + Disabled bool `protobuf:"varint,9,opt,name=disabled" json:"disabled,omitempty"` + // Substitutions data for Build resource. + Substitutions map[string]string `protobuf:"bytes,11,rep,name=substitutions" json:"substitutions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *BuildTrigger) Reset() { *m = BuildTrigger{} } +func (m *BuildTrigger) String() string { return proto.CompactTextString(m) } +func (*BuildTrigger) ProtoMessage() {} +func (*BuildTrigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +type isBuildTrigger_BuildTemplate interface { + isBuildTrigger_BuildTemplate() +} + +type BuildTrigger_Build struct { + Build *Build `protobuf:"bytes,4,opt,name=build,oneof"` +} +type BuildTrigger_Filename struct { + Filename string `protobuf:"bytes,8,opt,name=filename,oneof"` +} + +func (*BuildTrigger_Build) isBuildTrigger_BuildTemplate() {} +func (*BuildTrigger_Filename) isBuildTrigger_BuildTemplate() {} + +func (m *BuildTrigger) GetBuildTemplate() isBuildTrigger_BuildTemplate { + if m != nil { + return m.BuildTemplate + } + return nil +} + +func (m *BuildTrigger) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *BuildTrigger) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BuildTrigger) GetTriggerTemplate() *RepoSource { + if m != nil { + return m.TriggerTemplate + } + return nil +} + +func (m *BuildTrigger) GetBuild() *Build { + if x, ok := m.GetBuildTemplate().(*BuildTrigger_Build); ok { + return x.Build + } + return nil +} + +func (m *BuildTrigger) GetFilename() string { + if x, ok := m.GetBuildTemplate().(*BuildTrigger_Filename); ok { + return x.Filename + } + return "" +} + +func (m *BuildTrigger) GetCreateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *BuildTrigger) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *BuildTrigger) GetSubstitutions() map[string]string { + if m != nil { + return m.Substitutions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BuildTrigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BuildTrigger_OneofMarshaler, _BuildTrigger_OneofUnmarshaler, _BuildTrigger_OneofSizer, []interface{}{ + (*BuildTrigger_Build)(nil), + (*BuildTrigger_Filename)(nil), + } +} + +func _BuildTrigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BuildTrigger) + // build_template + switch x := m.BuildTemplate.(type) { + case *BuildTrigger_Build: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Build); err != nil { + return err + } + case *BuildTrigger_Filename: + b.EncodeVarint(8<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Filename) + case nil: + default: + return fmt.Errorf("BuildTrigger.BuildTemplate has unexpected type %T", x) + } + return nil +} + +func _BuildTrigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BuildTrigger) + switch tag { + case 4: // build_template.build + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Build) + err := b.DecodeMessage(msg) + m.BuildTemplate = &BuildTrigger_Build{msg} + return true, err + case 8: // build_template.filename + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.BuildTemplate = &BuildTrigger_Filename{x} + return true, err + default: + return false, nil + } +} + +func _BuildTrigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BuildTrigger) + // build_template + switch x := m.BuildTemplate.(type) { + case *BuildTrigger_Build: + s := proto.Size(x.Build) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BuildTrigger_Filename: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Filename))) + n += len(x.Filename) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request to create a new BuildTrigger. +type CreateBuildTriggerRequest struct { + // ID of the project for which to configure automatic builds. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // BuildTrigger to create. + Trigger *BuildTrigger `protobuf:"bytes,2,opt,name=trigger" json:"trigger,omitempty"` +} + +func (m *CreateBuildTriggerRequest) Reset() { *m = CreateBuildTriggerRequest{} } +func (m *CreateBuildTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateBuildTriggerRequest) ProtoMessage() {} +func (*CreateBuildTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *CreateBuildTriggerRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateBuildTriggerRequest) GetTrigger() *BuildTrigger { + if m != nil { + return m.Trigger + } + return nil +} + +// Returns the BuildTrigger with the specified ID. +type GetBuildTriggerRequest struct { + // ID of the project that owns the trigger. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the BuildTrigger to get. + TriggerId string `protobuf:"bytes,2,opt,name=trigger_id,json=triggerId" json:"trigger_id,omitempty"` +} + +func (m *GetBuildTriggerRequest) Reset() { *m = GetBuildTriggerRequest{} } +func (m *GetBuildTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*GetBuildTriggerRequest) ProtoMessage() {} +func (*GetBuildTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GetBuildTriggerRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetBuildTriggerRequest) GetTriggerId() string { + if m != nil { + return m.TriggerId + } + return "" +} + +// Request to list existing BuildTriggers. +type ListBuildTriggersRequest struct { + // ID of the project for which to list BuildTriggers. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` +} + +func (m *ListBuildTriggersRequest) Reset() { *m = ListBuildTriggersRequest{} } +func (m *ListBuildTriggersRequest) String() string { return proto.CompactTextString(m) } +func (*ListBuildTriggersRequest) ProtoMessage() {} +func (*ListBuildTriggersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *ListBuildTriggersRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Response containing existing BuildTriggers. +type ListBuildTriggersResponse struct { + // BuildTriggers for the project, sorted by create_time descending. + Triggers []*BuildTrigger `protobuf:"bytes,1,rep,name=triggers" json:"triggers,omitempty"` +} + +func (m *ListBuildTriggersResponse) Reset() { *m = ListBuildTriggersResponse{} } +func (m *ListBuildTriggersResponse) String() string { return proto.CompactTextString(m) } +func (*ListBuildTriggersResponse) ProtoMessage() {} +func (*ListBuildTriggersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *ListBuildTriggersResponse) GetTriggers() []*BuildTrigger { + if m != nil { + return m.Triggers + } + return nil +} + +// Request to delete a BuildTrigger. +type DeleteBuildTriggerRequest struct { + // ID of the project that owns the trigger. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the BuildTrigger to delete. + TriggerId string `protobuf:"bytes,2,opt,name=trigger_id,json=triggerId" json:"trigger_id,omitempty"` +} + +func (m *DeleteBuildTriggerRequest) Reset() { *m = DeleteBuildTriggerRequest{} } +func (m *DeleteBuildTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteBuildTriggerRequest) ProtoMessage() {} +func (*DeleteBuildTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *DeleteBuildTriggerRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteBuildTriggerRequest) GetTriggerId() string { + if m != nil { + return m.TriggerId + } + return "" +} + +// Request to update an existing BuildTrigger. +type UpdateBuildTriggerRequest struct { + // ID of the project that owns the trigger. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the BuildTrigger to update. + TriggerId string `protobuf:"bytes,2,opt,name=trigger_id,json=triggerId" json:"trigger_id,omitempty"` + // BuildTrigger to update. + Trigger *BuildTrigger `protobuf:"bytes,3,opt,name=trigger" json:"trigger,omitempty"` +} + +func (m *UpdateBuildTriggerRequest) Reset() { *m = UpdateBuildTriggerRequest{} } +func (m *UpdateBuildTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateBuildTriggerRequest) ProtoMessage() {} +func (*UpdateBuildTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *UpdateBuildTriggerRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateBuildTriggerRequest) GetTriggerId() string { + if m != nil { + return m.TriggerId + } + return "" +} + +func (m *UpdateBuildTriggerRequest) GetTrigger() *BuildTrigger { + if m != nil { + return m.Trigger + } + return nil +} + +// Optional arguments to enable specific features of builds. +type BuildOptions struct { + // Requested hash for SourceProvenance. + SourceProvenanceHash []Hash_HashType `protobuf:"varint,1,rep,packed,name=source_provenance_hash,json=sourceProvenanceHash,enum=google.devtools.cloudbuild.v1.Hash_HashType" json:"source_provenance_hash,omitempty"` + // Requested verifiability options. + RequestedVerifyOption BuildOptions_VerifyOption `protobuf:"varint,2,opt,name=requested_verify_option,json=requestedVerifyOption,enum=google.devtools.cloudbuild.v1.BuildOptions_VerifyOption" json:"requested_verify_option,omitempty"` + // SubstitutionOption to allow unmatch substitutions. + SubstitutionOption BuildOptions_SubstitutionOption `protobuf:"varint,4,opt,name=substitution_option,json=substitutionOption,enum=google.devtools.cloudbuild.v1.BuildOptions_SubstitutionOption" json:"substitution_option,omitempty"` +} + +func (m *BuildOptions) Reset() { *m = BuildOptions{} } +func (m *BuildOptions) String() string { return proto.CompactTextString(m) } +func (*BuildOptions) ProtoMessage() {} +func (*BuildOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *BuildOptions) GetSourceProvenanceHash() []Hash_HashType { + if m != nil { + return m.SourceProvenanceHash + } + return nil +} + +func (m *BuildOptions) GetRequestedVerifyOption() BuildOptions_VerifyOption { + if m != nil { + return m.RequestedVerifyOption + } + return BuildOptions_NOT_VERIFIED +} + +func (m *BuildOptions) GetSubstitutionOption() BuildOptions_SubstitutionOption { + if m != nil { + return m.SubstitutionOption + } + return BuildOptions_MUST_MATCH +} + +func init() { + proto.RegisterType((*StorageSource)(nil), "google.devtools.cloudbuild.v1.StorageSource") + proto.RegisterType((*RepoSource)(nil), "google.devtools.cloudbuild.v1.RepoSource") + proto.RegisterType((*Source)(nil), "google.devtools.cloudbuild.v1.Source") + proto.RegisterType((*BuiltImage)(nil), "google.devtools.cloudbuild.v1.BuiltImage") + proto.RegisterType((*BuildStep)(nil), "google.devtools.cloudbuild.v1.BuildStep") + proto.RegisterType((*Volume)(nil), "google.devtools.cloudbuild.v1.Volume") + proto.RegisterType((*Results)(nil), "google.devtools.cloudbuild.v1.Results") + proto.RegisterType((*Build)(nil), "google.devtools.cloudbuild.v1.Build") + proto.RegisterType((*BuildOperationMetadata)(nil), "google.devtools.cloudbuild.v1.BuildOperationMetadata") + proto.RegisterType((*SourceProvenance)(nil), "google.devtools.cloudbuild.v1.SourceProvenance") + proto.RegisterType((*FileHashes)(nil), "google.devtools.cloudbuild.v1.FileHashes") + proto.RegisterType((*Hash)(nil), "google.devtools.cloudbuild.v1.Hash") + proto.RegisterType((*Secret)(nil), "google.devtools.cloudbuild.v1.Secret") + proto.RegisterType((*CreateBuildRequest)(nil), "google.devtools.cloudbuild.v1.CreateBuildRequest") + proto.RegisterType((*GetBuildRequest)(nil), "google.devtools.cloudbuild.v1.GetBuildRequest") + proto.RegisterType((*ListBuildsRequest)(nil), "google.devtools.cloudbuild.v1.ListBuildsRequest") + proto.RegisterType((*ListBuildsResponse)(nil), "google.devtools.cloudbuild.v1.ListBuildsResponse") + proto.RegisterType((*CancelBuildRequest)(nil), "google.devtools.cloudbuild.v1.CancelBuildRequest") + proto.RegisterType((*BuildTrigger)(nil), "google.devtools.cloudbuild.v1.BuildTrigger") + proto.RegisterType((*CreateBuildTriggerRequest)(nil), "google.devtools.cloudbuild.v1.CreateBuildTriggerRequest") + proto.RegisterType((*GetBuildTriggerRequest)(nil), "google.devtools.cloudbuild.v1.GetBuildTriggerRequest") + proto.RegisterType((*ListBuildTriggersRequest)(nil), "google.devtools.cloudbuild.v1.ListBuildTriggersRequest") + proto.RegisterType((*ListBuildTriggersResponse)(nil), "google.devtools.cloudbuild.v1.ListBuildTriggersResponse") + proto.RegisterType((*DeleteBuildTriggerRequest)(nil), "google.devtools.cloudbuild.v1.DeleteBuildTriggerRequest") + proto.RegisterType((*UpdateBuildTriggerRequest)(nil), "google.devtools.cloudbuild.v1.UpdateBuildTriggerRequest") + proto.RegisterType((*BuildOptions)(nil), "google.devtools.cloudbuild.v1.BuildOptions") + proto.RegisterEnum("google.devtools.cloudbuild.v1.Build_Status", Build_Status_name, Build_Status_value) + proto.RegisterEnum("google.devtools.cloudbuild.v1.Hash_HashType", Hash_HashType_name, Hash_HashType_value) + proto.RegisterEnum("google.devtools.cloudbuild.v1.BuildOptions_VerifyOption", BuildOptions_VerifyOption_name, BuildOptions_VerifyOption_value) + proto.RegisterEnum("google.devtools.cloudbuild.v1.BuildOptions_SubstitutionOption", BuildOptions_SubstitutionOption_name, BuildOptions_SubstitutionOption_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CloudBuild service + +type CloudBuildClient interface { + // Starts a build with the specified configuration. + // + // The long-running Operation returned by this method will include the ID of + // the build, which can be passed to GetBuild to determine its status (e.g., + // success or failure). + CreateBuild(ctx context.Context, in *CreateBuildRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Returns information about a previously requested build. + // + // The Build that is returned includes its status (e.g., success or failure, + // or in-progress), and timing information. + GetBuild(ctx context.Context, in *GetBuildRequest, opts ...grpc.CallOption) (*Build, error) + // Lists previously requested builds. + // + // Previously requested builds may still be in-progress, or may have finished + // successfully or unsuccessfully. + ListBuilds(ctx context.Context, in *ListBuildsRequest, opts ...grpc.CallOption) (*ListBuildsResponse, error) + // Cancels a requested build in progress. + CancelBuild(ctx context.Context, in *CancelBuildRequest, opts ...grpc.CallOption) (*Build, error) + // Creates a new BuildTrigger. + // + // This API is experimental. + CreateBuildTrigger(ctx context.Context, in *CreateBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) + // Gets information about a BuildTrigger. + // + // This API is experimental. + GetBuildTrigger(ctx context.Context, in *GetBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) + // Lists existing BuildTrigger. + // + // This API is experimental. + ListBuildTriggers(ctx context.Context, in *ListBuildTriggersRequest, opts ...grpc.CallOption) (*ListBuildTriggersResponse, error) + // Deletes an BuildTrigger by its project ID and trigger ID. + // + // This API is experimental. + DeleteBuildTrigger(ctx context.Context, in *DeleteBuildTriggerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Updates an BuildTrigger by its project ID and trigger ID. + // + // This API is experimental. + UpdateBuildTrigger(ctx context.Context, in *UpdateBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) +} + +type cloudBuildClient struct { + cc *grpc.ClientConn +} + +func NewCloudBuildClient(cc *grpc.ClientConn) CloudBuildClient { + return &cloudBuildClient{cc} +} + +func (c *cloudBuildClient) CreateBuild(ctx context.Context, in *CreateBuildRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/CreateBuild", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) GetBuild(ctx context.Context, in *GetBuildRequest, opts ...grpc.CallOption) (*Build, error) { + out := new(Build) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/GetBuild", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) ListBuilds(ctx context.Context, in *ListBuildsRequest, opts ...grpc.CallOption) (*ListBuildsResponse, error) { + out := new(ListBuildsResponse) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/ListBuilds", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) CancelBuild(ctx context.Context, in *CancelBuildRequest, opts ...grpc.CallOption) (*Build, error) { + out := new(Build) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/CancelBuild", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) CreateBuildTrigger(ctx context.Context, in *CreateBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) { + out := new(BuildTrigger) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/CreateBuildTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) GetBuildTrigger(ctx context.Context, in *GetBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) { + out := new(BuildTrigger) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/GetBuildTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) ListBuildTriggers(ctx context.Context, in *ListBuildTriggersRequest, opts ...grpc.CallOption) (*ListBuildTriggersResponse, error) { + out := new(ListBuildTriggersResponse) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/ListBuildTriggers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) DeleteBuildTrigger(ctx context.Context, in *DeleteBuildTriggerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/DeleteBuildTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudBuildClient) UpdateBuildTrigger(ctx context.Context, in *UpdateBuildTriggerRequest, opts ...grpc.CallOption) (*BuildTrigger, error) { + out := new(BuildTrigger) + err := grpc.Invoke(ctx, "/google.devtools.cloudbuild.v1.CloudBuild/UpdateBuildTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CloudBuild service + +type CloudBuildServer interface { + // Starts a build with the specified configuration. + // + // The long-running Operation returned by this method will include the ID of + // the build, which can be passed to GetBuild to determine its status (e.g., + // success or failure). + CreateBuild(context.Context, *CreateBuildRequest) (*google_longrunning.Operation, error) + // Returns information about a previously requested build. + // + // The Build that is returned includes its status (e.g., success or failure, + // or in-progress), and timing information. + GetBuild(context.Context, *GetBuildRequest) (*Build, error) + // Lists previously requested builds. + // + // Previously requested builds may still be in-progress, or may have finished + // successfully or unsuccessfully. + ListBuilds(context.Context, *ListBuildsRequest) (*ListBuildsResponse, error) + // Cancels a requested build in progress. + CancelBuild(context.Context, *CancelBuildRequest) (*Build, error) + // Creates a new BuildTrigger. + // + // This API is experimental. + CreateBuildTrigger(context.Context, *CreateBuildTriggerRequest) (*BuildTrigger, error) + // Gets information about a BuildTrigger. + // + // This API is experimental. + GetBuildTrigger(context.Context, *GetBuildTriggerRequest) (*BuildTrigger, error) + // Lists existing BuildTrigger. + // + // This API is experimental. + ListBuildTriggers(context.Context, *ListBuildTriggersRequest) (*ListBuildTriggersResponse, error) + // Deletes an BuildTrigger by its project ID and trigger ID. + // + // This API is experimental. + DeleteBuildTrigger(context.Context, *DeleteBuildTriggerRequest) (*google_protobuf2.Empty, error) + // Updates an BuildTrigger by its project ID and trigger ID. + // + // This API is experimental. + UpdateBuildTrigger(context.Context, *UpdateBuildTriggerRequest) (*BuildTrigger, error) +} + +func RegisterCloudBuildServer(s *grpc.Server, srv CloudBuildServer) { + s.RegisterService(&_CloudBuild_serviceDesc, srv) +} + +func _CloudBuild_CreateBuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).CreateBuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/CreateBuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).CreateBuild(ctx, req.(*CreateBuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_GetBuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).GetBuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/GetBuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).GetBuild(ctx, req.(*GetBuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_ListBuilds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBuildsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).ListBuilds(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/ListBuilds", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).ListBuilds(ctx, req.(*ListBuildsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_CancelBuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelBuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).CancelBuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/CancelBuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).CancelBuild(ctx, req.(*CancelBuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_CreateBuildTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBuildTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).CreateBuildTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/CreateBuildTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).CreateBuildTrigger(ctx, req.(*CreateBuildTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_GetBuildTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBuildTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).GetBuildTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/GetBuildTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).GetBuildTrigger(ctx, req.(*GetBuildTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_ListBuildTriggers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBuildTriggersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).ListBuildTriggers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/ListBuildTriggers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).ListBuildTriggers(ctx, req.(*ListBuildTriggersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_DeleteBuildTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBuildTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).DeleteBuildTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/DeleteBuildTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).DeleteBuildTrigger(ctx, req.(*DeleteBuildTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudBuild_UpdateBuildTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBuildTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudBuildServer).UpdateBuildTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudbuild.v1.CloudBuild/UpdateBuildTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudBuildServer).UpdateBuildTrigger(ctx, req.(*UpdateBuildTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CloudBuild_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudbuild.v1.CloudBuild", + HandlerType: (*CloudBuildServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateBuild", + Handler: _CloudBuild_CreateBuild_Handler, + }, + { + MethodName: "GetBuild", + Handler: _CloudBuild_GetBuild_Handler, + }, + { + MethodName: "ListBuilds", + Handler: _CloudBuild_ListBuilds_Handler, + }, + { + MethodName: "CancelBuild", + Handler: _CloudBuild_CancelBuild_Handler, + }, + { + MethodName: "CreateBuildTrigger", + Handler: _CloudBuild_CreateBuildTrigger_Handler, + }, + { + MethodName: "GetBuildTrigger", + Handler: _CloudBuild_GetBuildTrigger_Handler, + }, + { + MethodName: "ListBuildTriggers", + Handler: _CloudBuild_ListBuildTriggers_Handler, + }, + { + MethodName: "DeleteBuildTrigger", + Handler: _CloudBuild_DeleteBuildTrigger_Handler, + }, + { + MethodName: "UpdateBuildTrigger", + Handler: _CloudBuild_UpdateBuildTrigger_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudbuild/v1/cloudbuild.proto", +} + +func init() { proto.RegisterFile("google/devtools/cloudbuild/v1/cloudbuild.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xdb, 0x72, 0x1b, 0x49, + 0x19, 0xf6, 0x48, 0xb2, 0x0e, 0xbf, 0x7c, 0xd0, 0xf6, 0x66, 0x9d, 0xb1, 0xb2, 0xd9, 0x98, 0xc9, + 0xee, 0xe2, 0xcd, 0xee, 0x4a, 0x6b, 0x87, 0x90, 0xac, 0x13, 0x12, 0xdb, 0xb2, 0x62, 0xbb, 0xe2, + 0xc8, 0x61, 0x24, 0x25, 0xc5, 0xa9, 0x86, 0xb1, 0xa6, 0x2d, 0x0f, 0x1e, 0xcd, 0x0c, 0xd3, 0x2d, + 0xb1, 0x4e, 0x2a, 0x05, 0x6c, 0x15, 0xdc, 0x42, 0x15, 0x97, 0x5c, 0x70, 0x78, 0x00, 0x8a, 0xa2, + 0x8a, 0xe2, 0x86, 0x7d, 0x0a, 0x1e, 0x01, 0x2e, 0x79, 0x08, 0xaa, 0x0f, 0x23, 0x8d, 0xa4, 0x84, + 0x19, 0x91, 0xe2, 0x26, 0x99, 0xfe, 0xbb, 0xff, 0x43, 0xff, 0xc7, 0xaf, 0x65, 0xa8, 0x74, 0x3d, + 0xaf, 0xeb, 0xe0, 0xaa, 0x85, 0x07, 0xd4, 0xf3, 0x1c, 0x52, 0xed, 0x38, 0x5e, 0xdf, 0x3a, 0xe9, + 0xdb, 0x8e, 0x55, 0x1d, 0x6c, 0x44, 0x56, 0x15, 0x3f, 0xf0, 0xa8, 0x87, 0xae, 0x8a, 0xf3, 0x95, + 0xf0, 0x7c, 0x25, 0x72, 0x62, 0xb0, 0x51, 0x7e, 0x57, 0x8a, 0x33, 0x7d, 0xbb, 0x6a, 0xba, 0xae, + 0x47, 0x4d, 0x6a, 0x7b, 0x2e, 0x11, 0xcc, 0xe5, 0xeb, 0x72, 0xd7, 0xf1, 0xdc, 0x6e, 0xd0, 0x77, + 0x5d, 0xdb, 0xed, 0x56, 0x3d, 0x1f, 0x07, 0x63, 0x87, 0xde, 0x93, 0x87, 0xf8, 0xea, 0xa4, 0x7f, + 0x5a, 0xb5, 0xfa, 0xe2, 0x80, 0xdc, 0xbf, 0x32, 0xb9, 0x8f, 0x7b, 0x3e, 0xbd, 0x90, 0x9b, 0xd7, + 0x26, 0x37, 0xa9, 0xdd, 0xc3, 0x84, 0x9a, 0x3d, 0x5f, 0x1c, 0xd0, 0x0c, 0x58, 0x6c, 0x52, 0x2f, + 0x30, 0xbb, 0xb8, 0xe9, 0xf5, 0x83, 0x0e, 0x46, 0x2b, 0x90, 0x3d, 0xe9, 0x77, 0xce, 0x31, 0x55, + 0x95, 0x35, 0x65, 0xbd, 0xa0, 0xcb, 0x15, 0xa3, 0x7b, 0x27, 0x3f, 0xc2, 0x1d, 0xaa, 0xa6, 0x04, + 0x5d, 0xac, 0xd0, 0x7b, 0x00, 0x5d, 0xec, 0x4a, 0x9b, 0xd5, 0xf4, 0x9a, 0xb2, 0x9e, 0xd6, 0x23, + 0x14, 0xed, 0xaf, 0x0a, 0x80, 0x8e, 0x7d, 0x4f, 0x8a, 0xbf, 0x0a, 0xe0, 0x07, 0x1e, 0xe3, 0x34, + 0x6c, 0x4b, 0xaa, 0x28, 0x48, 0xca, 0xa1, 0x85, 0xae, 0x40, 0x21, 0xc0, 0xbe, 0x67, 0xb8, 0x66, + 0x0f, 0x4b, 0x45, 0x79, 0x46, 0x68, 0x98, 0x3d, 0x8c, 0xbe, 0x06, 0xc5, 0x93, 0xc0, 0x74, 0x3b, + 0x67, 0x62, 0x9b, 0xe9, 0x2a, 0x1c, 0xcc, 0xe9, 0x20, 0x88, 0xfc, 0xc8, 0x15, 0xc8, 0x53, 0xb3, + 0x2b, 0xf6, 0x33, 0x72, 0x3f, 0x47, 0xcd, 0x2e, 0xdf, 0xbc, 0x06, 0xd0, 0xf1, 0x7a, 0x3d, 0x9b, + 0x1a, 0xe4, 0xcc, 0x54, 0xe7, 0xe5, 0x76, 0x41, 0xd0, 0x9a, 0x67, 0xe6, 0x2e, 0x40, 0x3e, 0xc0, + 0x03, 0x9b, 0x30, 0xbb, 0xff, 0xa6, 0x40, 0x56, 0xda, 0xdc, 0x86, 0x25, 0x22, 0x7c, 0x64, 0x10, + 0x4e, 0xe1, 0x96, 0x15, 0x37, 0x3f, 0xa9, 0xfc, 0xd7, 0xe0, 0x57, 0xc6, 0x1c, 0x7b, 0x30, 0xa7, + 0x2f, 0x92, 0x31, 0x4f, 0x1f, 0x41, 0x91, 0xdf, 0x55, 0xca, 0x4c, 0x73, 0x99, 0x1f, 0xc5, 0xc8, + 0x1c, 0xb9, 0x92, 0xdd, 0x3c, 0x18, 0xae, 0x76, 0xf3, 0x90, 0x15, 0x82, 0xb4, 0x3b, 0x00, 0xbb, + 0x7d, 0xdb, 0xa1, 0x87, 0x3d, 0xb3, 0x8b, 0x11, 0x82, 0x0c, 0xf7, 0x86, 0x70, 0x35, 0xff, 0x66, + 0xb1, 0xb4, 0xec, 0x2e, 0x26, 0x54, 0xf8, 0x50, 0x97, 0x2b, 0xed, 0xcb, 0x14, 0x14, 0x18, 0xab, + 0xd5, 0xa4, 0xd8, 0x7f, 0x25, 0x67, 0x09, 0xd2, 0xd8, 0x1d, 0xa8, 0xa9, 0xb5, 0xf4, 0x7a, 0x41, + 0x67, 0x9f, 0xec, 0x94, 0x19, 0x74, 0x89, 0x9a, 0xe6, 0x24, 0xfe, 0xcd, 0x4e, 0x59, 0x76, 0x20, + 0x02, 0xa0, 0xb3, 0x4f, 0xb4, 0x04, 0x29, 0xdb, 0x12, 0x2e, 0xd7, 0x53, 0xb6, 0x85, 0x56, 0x21, + 0xff, 0x13, 0xd3, 0xa6, 0xc6, 0xa9, 0x17, 0xa8, 0x59, 0xce, 0x99, 0x63, 0xeb, 0x87, 0x5e, 0xc0, + 0x12, 0x0a, 0xbb, 0x34, 0xb8, 0xf0, 0x3d, 0xdb, 0xa5, 0x6a, 0x8e, 0xb3, 0x44, 0x28, 0x2c, 0x83, + 0x08, 0xee, 0x04, 0x98, 0x1a, 0xcc, 0x92, 0x3c, 0x67, 0x2e, 0x08, 0x4a, 0xdd, 0x1d, 0xa0, 0x07, + 0x90, 0x1b, 0x78, 0x4e, 0xbf, 0x87, 0x89, 0x5a, 0x58, 0x4b, 0xaf, 0x17, 0x37, 0x3f, 0x88, 0xf1, + 0xe8, 0x53, 0x7e, 0x5a, 0x0f, 0xb9, 0xb4, 0xcf, 0x20, 0x2b, 0x48, 0xaf, 0x74, 0x00, 0x82, 0x8c, + 0x6f, 0xd2, 0x33, 0x99, 0x9b, 0xfc, 0x5b, 0xfb, 0x02, 0x72, 0x3a, 0x26, 0x7d, 0x87, 0x12, 0xb4, + 0x03, 0x59, 0x9b, 0xb9, 0x9d, 0x70, 0x17, 0xc5, 0x87, 0x73, 0x14, 0x28, 0x5d, 0x32, 0xa2, 0x1b, + 0xf0, 0x16, 0xdf, 0x36, 0x08, 0xc5, 0xbe, 0x21, 0xa5, 0x09, 0xef, 0x2e, 0x9f, 0x84, 0xc1, 0xe1, + 0x2c, 0x44, 0xfb, 0x67, 0x01, 0xe6, 0x79, 0xc0, 0xa4, 0x83, 0x95, 0xa1, 0x83, 0xc7, 0xeb, 0xac, + 0x34, 0x59, 0x67, 0x35, 0xc8, 0x12, 0x6a, 0xd2, 0x3e, 0xe1, 0x17, 0x59, 0xda, 0xfc, 0x38, 0x81, + 0x9d, 0x56, 0xa5, 0xc9, 0x59, 0x74, 0xc9, 0x8a, 0xae, 0xc3, 0xa2, 0xf8, 0x32, 0x2c, 0x4c, 0x4d, + 0xdb, 0x51, 0x55, 0xae, 0x66, 0x41, 0x10, 0xf7, 0x38, 0x0d, 0x7d, 0x2b, 0xcc, 0x4b, 0x99, 0xe0, + 0x71, 0xe1, 0x10, 0xe9, 0xac, 0x4b, 0x26, 0x74, 0x1f, 0xe6, 0x99, 0x1f, 0x88, 0x5a, 0xe4, 0xfe, + 0x5c, 0x4f, 0x62, 0x27, 0x73, 0x90, 0x2e, 0xd8, 0xd0, 0x36, 0xe4, 0x02, 0x11, 0x1b, 0x15, 0xb8, + 0xfe, 0x0f, 0x63, 0x0b, 0x8c, 0x9f, 0xd6, 0x43, 0x36, 0x74, 0x17, 0x8a, 0x9d, 0x00, 0x9b, 0x14, + 0x1b, 0xac, 0x77, 0xaa, 0x59, 0x2e, 0xa5, 0x1c, 0x4a, 0x09, 0x1b, 0x6b, 0xa5, 0x15, 0x36, 0x56, + 0x1d, 0xc4, 0x71, 0x46, 0x40, 0x9f, 0x03, 0x10, 0x6a, 0x06, 0x54, 0xf0, 0xe6, 0x62, 0x79, 0x0b, + 0xfc, 0x34, 0x67, 0xbd, 0x0b, 0xc5, 0x53, 0xdb, 0xb5, 0xc9, 0x99, 0xe0, 0xcd, 0xc7, 0xeb, 0x15, + 0xc7, 0x39, 0xf3, 0x4d, 0xc8, 0x31, 0x2e, 0xaf, 0x4f, 0xd5, 0x05, 0xce, 0xb8, 0x3a, 0xc5, 0xb8, + 0x27, 0xc7, 0x88, 0x1e, 0x9e, 0x64, 0x6d, 0x41, 0xa6, 0xdb, 0x22, 0x4f, 0xb7, 0x30, 0x23, 0xaf, + 0x41, 0xd1, 0xf1, 0xba, 0xc4, 0x90, 0x73, 0xe1, 0x6d, 0x51, 0x92, 0x8c, 0xb4, 0x2b, 0x66, 0xc3, + 0xf7, 0xe1, 0x2d, 0x11, 0x2e, 0xc3, 0x0f, 0xbc, 0x01, 0x76, 0x4d, 0xb7, 0x83, 0xd5, 0x77, 0xb8, + 0xde, 0x6a, 0xa2, 0x70, 0x3f, 0x19, 0xb2, 0xe9, 0x25, 0x32, 0x41, 0x41, 0xeb, 0x50, 0x12, 0x05, + 0x41, 0x03, 0xbb, 0xdb, 0xc5, 0x01, 0x4b, 0xe8, 0x15, 0x6e, 0xc3, 0x12, 0xa7, 0xb7, 0x04, 0xf9, + 0xd0, 0x42, 0x75, 0xc8, 0x79, 0x3e, 0x9f, 0x9d, 0xea, 0x65, 0xae, 0x3d, 0x51, 0x5a, 0x1f, 0x0b, + 0x16, 0x3d, 0xe4, 0x45, 0x97, 0x21, 0xe7, 0x78, 0x5d, 0xa3, 0x1f, 0x38, 0xea, 0xaa, 0xe8, 0x8f, + 0x8e, 0xd7, 0x6d, 0x07, 0x0e, 0xfa, 0x01, 0x2c, 0x92, 0xfe, 0x09, 0xa1, 0x36, 0xed, 0x0b, 0x2d, + 0x57, 0x79, 0x52, 0xde, 0x4e, 0x56, 0x3c, 0x51, 0xce, 0x3a, 0xeb, 0x67, 0xfa, 0xb8, 0x34, 0xd6, + 0x5b, 0xa8, 0xd9, 0x25, 0xea, 0x35, 0xd1, 0x4a, 0xd9, 0x37, 0x6b, 0x67, 0xa2, 0xb7, 0x11, 0x75, + 0x2d, 0x51, 0x3b, 0x6b, 0xf2, 0xd3, 0x7a, 0xc8, 0x55, 0xde, 0x06, 0x34, 0xad, 0x99, 0x75, 0xe8, + 0x73, 0x7c, 0x21, 0xfb, 0x05, 0xfb, 0x44, 0x97, 0x60, 0x7e, 0x60, 0x3a, 0xfd, 0x70, 0xea, 0x8a, + 0xc5, 0x56, 0xea, 0x8e, 0xa2, 0xfd, 0x14, 0xb2, 0xa2, 0xf0, 0x11, 0x82, 0xa5, 0x66, 0x6b, 0xa7, + 0xd5, 0x6e, 0x1a, 0xed, 0xc6, 0xa3, 0xc6, 0xf1, 0xb3, 0x46, 0x69, 0x0e, 0x01, 0x64, 0xbf, 0xdd, + 0xae, 0xb7, 0xeb, 0x7b, 0x25, 0x05, 0x15, 0x21, 0xf7, 0xec, 0x58, 0x7f, 0x74, 0xd8, 0xd8, 0x2f, + 0xa5, 0xd8, 0xa2, 0xd9, 0xae, 0xd5, 0xea, 0xcd, 0x66, 0x29, 0xcd, 0x16, 0x0f, 0x77, 0x0e, 0x8f, + 0xda, 0x7a, 0xbd, 0x94, 0x61, 0x62, 0x0e, 0x1b, 0xad, 0xba, 0xde, 0xd8, 0x39, 0x32, 0xea, 0xba, + 0x7e, 0xac, 0x97, 0xe6, 0xd9, 0x81, 0xd6, 0xe1, 0xe3, 0xfa, 0x71, 0xbb, 0x55, 0xca, 0xa2, 0x45, + 0x28, 0xd4, 0x76, 0x1a, 0xb5, 0xfa, 0xd1, 0x51, 0x7d, 0xaf, 0x94, 0xd3, 0x5a, 0xb0, 0x22, 0x03, + 0x25, 0x41, 0xc5, 0x63, 0x4c, 0x4d, 0xcb, 0xa4, 0x26, 0xda, 0x82, 0x79, 0x7e, 0x71, 0x7e, 0x91, + 0xe2, 0xe6, 0xfb, 0x49, 0x02, 0xa1, 0x0b, 0x16, 0xed, 0x0f, 0x69, 0x28, 0x4d, 0x66, 0x1f, 0xb2, + 0xe0, 0x72, 0x80, 0x89, 0xe7, 0x0c, 0x30, 0xeb, 0xbf, 0x63, 0x33, 0x3f, 0x3d, 0xfb, 0xcc, 0xd7, + 0xdf, 0x09, 0x85, 0x8d, 0x63, 0xac, 0xef, 0xc1, 0xa5, 0xa1, 0x96, 0x28, 0x04, 0xc8, 0xce, 0x08, + 0x01, 0x74, 0x14, 0x8a, 0x89, 0x20, 0xac, 0x1f, 0xb2, 0xbe, 0xe1, 0x60, 0xe3, 0xcc, 0x24, 0x67, + 0x98, 0xa8, 0x19, 0x9e, 0x35, 0x0f, 0x66, 0x2c, 0xc3, 0xca, 0x43, 0xdb, 0xc1, 0x07, 0x5c, 0x82, + 0x48, 0x55, 0x38, 0x1d, 0x12, 0xca, 0x67, 0xb0, 0x3c, 0xb1, 0xfd, 0x8a, 0x7c, 0x7a, 0x10, 0xcd, + 0xa7, 0xf8, 0x4b, 0x8d, 0x04, 0x46, 0x53, 0xaf, 0x01, 0x30, 0xda, 0x40, 0xdb, 0x50, 0x18, 0xde, + 0x4c, 0x55, 0xf8, 0xbd, 0xae, 0xc7, 0x88, 0x65, 0x9c, 0x7a, 0x3e, 0xb4, 0x5d, 0xfb, 0x99, 0x02, + 0x19, 0xf6, 0x81, 0xb6, 0x21, 0x43, 0x2f, 0x7c, 0x31, 0xda, 0x97, 0x62, 0x83, 0xca, 0x58, 0xf8, + 0x3f, 0xad, 0x0b, 0x1f, 0xeb, 0x9c, 0x73, 0xbc, 0x5e, 0x16, 0xa4, 0xd1, 0xda, 0x1a, 0xe4, 0xc3, + 0x73, 0x28, 0x0f, 0x99, 0xc6, 0x71, 0xa3, 0x2e, 0x6a, 0xa4, 0x79, 0xb0, 0xb3, 0x79, 0xeb, 0x9b, + 0x25, 0x45, 0xfb, 0x8a, 0xe1, 0x4a, 0x5e, 0x9b, 0x68, 0x0d, 0x16, 0xce, 0x7b, 0xc4, 0x38, 0xc7, + 0x17, 0x46, 0x04, 0x67, 0xc0, 0x79, 0x8f, 0x3c, 0xc2, 0x17, 0x1c, 0xb1, 0x36, 0xc7, 0xb0, 0x4e, + 0x9a, 0x5f, 0xf9, 0x1b, 0x89, 0x1a, 0x80, 0xfc, 0xaf, 0xee, 0x0e, 0x44, 0xfc, 0x46, 0x08, 0xa9, + 0x7c, 0x0f, 0x96, 0xc6, 0x37, 0xe3, 0xba, 0xc1, 0x42, 0x34, 0x24, 0x1e, 0xa0, 0x1a, 0x9f, 0x6f, + 0xa2, 0x98, 0xf0, 0x8f, 0xfb, 0x98, 0xd0, 0x38, 0x58, 0x3f, 0xac, 0xd3, 0xd4, 0xec, 0x75, 0xba, + 0x0d, 0xcb, 0xfb, 0x98, 0xce, 0xa2, 0x4d, 0x60, 0xa1, 0x54, 0x88, 0x85, 0xb4, 0x5f, 0x2a, 0xf0, + 0xd6, 0x91, 0x4d, 0x84, 0x0c, 0x92, 0x50, 0xc8, 0x15, 0x28, 0xf8, 0xbc, 0xfa, 0xed, 0xe7, 0xc2, + 0x0b, 0xf3, 0x7a, 0x9e, 0x11, 0x9a, 0xf6, 0x73, 0xf1, 0x8a, 0x61, 0x9b, 0xd4, 0x3b, 0xc7, 0xae, + 0x04, 0xd1, 0xfc, 0x78, 0x8b, 0x11, 0xd8, 0x20, 0x3d, 0xb5, 0x1d, 0x8a, 0x03, 0x3e, 0xb5, 0x0b, + 0xba, 0x5c, 0x69, 0xcf, 0x01, 0x45, 0xed, 0x20, 0xbe, 0xe7, 0x12, 0x8c, 0xee, 0xb1, 0x17, 0x17, + 0xa3, 0xc8, 0x9c, 0x4e, 0xe6, 0x1d, 0xc9, 0x83, 0x3e, 0x84, 0x65, 0x17, 0x7f, 0x41, 0x8d, 0x88, + 0x3d, 0xe2, 0xe6, 0x8b, 0x8c, 0xfc, 0x24, 0xb4, 0x49, 0xab, 0x01, 0xaa, 0xb1, 0xca, 0x76, 0xde, + 0xc4, 0x93, 0xbf, 0xc8, 0xc0, 0xc2, 0x6e, 0x64, 0xe6, 0x4e, 0xc1, 0xce, 0x35, 0x28, 0x5a, 0x98, + 0x74, 0x02, 0x9b, 0x8f, 0x52, 0x0e, 0xb9, 0x0a, 0x7a, 0x94, 0x84, 0x5a, 0x50, 0x0a, 0xe7, 0x38, + 0xc5, 0x3d, 0xdf, 0x31, 0x69, 0x88, 0x8b, 0x66, 0xe8, 0x7b, 0xcb, 0x52, 0x44, 0x4b, 0x4a, 0x40, + 0xf7, 0xc2, 0x04, 0xcb, 0x24, 0x4f, 0xb0, 0x83, 0x39, 0x99, 0x62, 0xe8, 0x5d, 0xe0, 0x2d, 0x82, + 0x17, 0x61, 0x5e, 0x3e, 0x0b, 0x87, 0x94, 0x49, 0x00, 0x38, 0x3f, 0x13, 0x00, 0x2c, 0x43, 0xde, + 0xb2, 0x89, 0x79, 0xe2, 0x60, 0x4b, 0x2d, 0xac, 0x29, 0xeb, 0x79, 0x7d, 0xb8, 0x46, 0xd6, 0x24, + 0x9c, 0x10, 0x18, 0xf7, 0x7e, 0x12, 0xe3, 0x65, 0x00, 0xe2, 0x51, 0xc5, 0x9b, 0x03, 0x80, 0xdd, + 0x12, 0x2c, 0x49, 0x00, 0x26, 0xdd, 0xad, 0xfd, 0x5c, 0x81, 0xd5, 0x48, 0x17, 0x90, 0xc6, 0x24, + 0x4c, 0xaa, 0x3a, 0xe4, 0x64, 0xf8, 0x64, 0x3b, 0xf8, 0x78, 0x86, 0x0b, 0xeb, 0x21, 0xaf, 0xf6, + 0x14, 0x56, 0xc2, 0xbe, 0x30, 0x9b, 0xfe, 0xab, 0x00, 0x11, 0x24, 0x29, 0x6e, 0x5b, 0xa0, 0x21, + 0x88, 0xd4, 0x3e, 0x07, 0x75, 0x58, 0xa4, 0x52, 0x70, 0xc2, 0x9e, 0xa1, 0x59, 0xb0, 0xfa, 0x0a, + 0x56, 0x59, 0xe6, 0xfb, 0x90, 0x97, 0x4a, 0xc2, 0x42, 0x9f, 0xe9, 0xde, 0x43, 0x66, 0xed, 0x3b, + 0xb0, 0xba, 0x87, 0x1d, 0xfc, 0x3f, 0xf9, 0x3e, 0xe6, 0xee, 0xbf, 0x57, 0x60, 0xb5, 0xed, 0x5b, + 0xe6, 0xff, 0x41, 0x76, 0x34, 0xec, 0xe9, 0x37, 0x08, 0xfb, 0xdf, 0xd3, 0xb2, 0x05, 0x49, 0xd8, + 0x8e, 0x4e, 0x60, 0x65, 0xea, 0xf1, 0x31, 0x82, 0x08, 0xb3, 0x0e, 0xf7, 0x4b, 0x93, 0xcf, 0x0f, + 0x0e, 0x17, 0x7c, 0x06, 0x0b, 0xb9, 0x13, 0xb0, 0x65, 0x0c, 0x70, 0x60, 0x9f, 0x5e, 0x18, 0xe2, + 0xb5, 0x20, 0xdf, 0xcf, 0x77, 0x66, 0x78, 0x68, 0x54, 0x9e, 0x72, 0x01, 0x62, 0xc5, 0x20, 0xa2, + 0x14, 0x1c, 0x25, 0x23, 0x0f, 0xde, 0x8e, 0x96, 0x71, 0xa8, 0x2d, 0xc3, 0xb5, 0xdd, 0x9f, 0x45, + 0x5b, 0xb4, 0xf8, 0xa5, 0x4e, 0x44, 0xa6, 0x68, 0x5a, 0x05, 0x16, 0xc6, 0x0c, 0x28, 0xc1, 0x42, + 0xe3, 0xb8, 0x65, 0x3c, 0xad, 0xeb, 0x87, 0x0f, 0x0f, 0xeb, 0x7b, 0xa5, 0x39, 0xb4, 0x00, 0xf9, + 0xe1, 0x4a, 0xd1, 0x6e, 0x8d, 0xb7, 0x15, 0xc9, 0xb5, 0x04, 0xf0, 0xb8, 0xdd, 0x6c, 0x19, 0x8f, + 0x77, 0x5a, 0xb5, 0x83, 0xd2, 0x1c, 0x5a, 0x86, 0xe2, 0xce, 0xd1, 0xd1, 0xf1, 0x33, 0xe3, 0xe8, + 0xf8, 0xb8, 0x59, 0x2f, 0x29, 0x9b, 0xff, 0x2e, 0x02, 0xd4, 0x98, 0xb1, 0xe2, 0x67, 0x8b, 0x5f, + 0x2b, 0x50, 0x8c, 0x34, 0x12, 0xb4, 0x11, 0x73, 0xb3, 0x69, 0xe8, 0x51, 0xbe, 0x1a, 0xb2, 0x44, + 0x7e, 0x45, 0xad, 0x0c, 0x9f, 0x0a, 0x5a, 0xf5, 0xcb, 0x7f, 0xfc, 0xeb, 0x37, 0xa9, 0x8f, 0xb4, + 0xb5, 0xea, 0x60, 0xa3, 0x2a, 0x93, 0x95, 0x54, 0x5f, 0x8c, 0x12, 0xf9, 0x65, 0x55, 0x4c, 0xd2, + 0x2d, 0x39, 0x0c, 0x7e, 0xa5, 0x40, 0x3e, 0x6c, 0x2c, 0xa8, 0x12, 0x63, 0xcf, 0x04, 0x32, 0x29, + 0x27, 0x1a, 0x3c, 0xda, 0xa7, 0xdc, 0xa6, 0xaf, 0xa3, 0x0f, 0xe2, 0x6c, 0xaa, 0xbe, 0xb0, 0xad, + 0x97, 0xe8, 0xb7, 0x0a, 0xc0, 0x08, 0x37, 0xa0, 0xcf, 0x62, 0x74, 0x4c, 0x41, 0x9d, 0xf2, 0xc6, + 0x0c, 0x1c, 0xa2, 0x5b, 0x69, 0xeb, 0xdc, 0x44, 0x0d, 0xc5, 0xba, 0x0d, 0xfd, 0x8e, 0x85, 0x70, + 0x84, 0x2c, 0xe2, 0x43, 0x38, 0x85, 0x42, 0x12, 0x7a, 0xed, 0x36, 0x37, 0x69, 0x43, 0xfb, 0x24, + 0x91, 0xd7, 0xb6, 0x3a, 0x5c, 0xcf, 0x96, 0x72, 0x03, 0xfd, 0x59, 0x19, 0xc3, 0xac, 0x21, 0x76, + 0xb9, 0x93, 0x3c, 0xd7, 0xc6, 0x1b, 0x61, 0x79, 0x96, 0xce, 0xa5, 0xdd, 0xe4, 0x66, 0x7f, 0xaa, + 0x69, 0xaf, 0x37, 0x3b, 0x6c, 0xed, 0x5b, 0x61, 0x97, 0x43, 0x7f, 0x52, 0x46, 0xa8, 0x37, 0xb4, + 0xf7, 0x56, 0xc2, 0x5c, 0x7c, 0x13, 0x63, 0xa5, 0x8f, 0x51, 0x35, 0xde, 0xd8, 0xea, 0x8b, 0x51, + 0xb7, 0x7f, 0x89, 0xfe, 0x12, 0xc5, 0xd8, 0xe1, 0xec, 0x43, 0xb7, 0x93, 0x26, 0xde, 0xc4, 0xa0, + 0x2d, 0xdf, 0x99, 0x9d, 0x51, 0x26, 0xee, 0x0d, 0x7e, 0x83, 0xf7, 0x51, 0x02, 0x77, 0xb3, 0xd4, + 0x45, 0xd3, 0xa3, 0x34, 0x36, 0x31, 0x5e, 0x3b, 0x7d, 0xcb, 0x2b, 0x53, 0xa8, 0xb0, 0xde, 0xf3, + 0xe9, 0x45, 0xe8, 0xd6, 0x1b, 0x33, 0xbb, 0xf5, 0x2b, 0x05, 0xd0, 0xf4, 0x40, 0x8e, 0xb5, 0xf0, + 0xb5, 0x33, 0x7c, 0xb6, 0x6c, 0xd8, 0xe6, 0x66, 0x6f, 0x6d, 0xce, 0x6a, 0xf6, 0x30, 0x8f, 0x77, + 0xcf, 0x41, 0xed, 0x78, 0xbd, 0x50, 0xe7, 0x98, 0xaa, 0x27, 0xca, 0x77, 0xf7, 0x25, 0xbd, 0xeb, + 0x39, 0xa6, 0xdb, 0xad, 0x78, 0x41, 0xb7, 0xda, 0xc5, 0x2e, 0xf7, 0x5d, 0x55, 0x6c, 0x99, 0xbe, + 0x4d, 0x5e, 0xf3, 0xb7, 0xb8, 0xbb, 0xa3, 0xd5, 0x1f, 0x53, 0xe9, 0xfd, 0xda, 0xee, 0x49, 0x96, + 0x73, 0xde, 0xfc, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0x36, 0x27, 0x4f, 0xc4, 0x1b, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/controller.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/controller.pb.go new file mode 100644 index 000000000..1e54c283d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/controller.pb.go @@ -0,0 +1,475 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouddebugger/v2/controller.proto + +/* +Package clouddebugger is a generated protocol buffer package. + +It is generated from these files: + google/devtools/clouddebugger/v2/controller.proto + google/devtools/clouddebugger/v2/data.proto + google/devtools/clouddebugger/v2/debugger.proto + +It has these top-level messages: + RegisterDebuggeeRequest + RegisterDebuggeeResponse + ListActiveBreakpointsRequest + ListActiveBreakpointsResponse + UpdateActiveBreakpointRequest + UpdateActiveBreakpointResponse + FormatMessage + StatusMessage + SourceLocation + Variable + StackFrame + Breakpoint + Debuggee + SetBreakpointRequest + SetBreakpointResponse + GetBreakpointRequest + GetBreakpointResponse + DeleteBreakpointRequest + ListBreakpointsRequest + ListBreakpointsResponse + ListDebuggeesRequest + ListDebuggeesResponse +*/ +package clouddebugger + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request to register a debuggee. +type RegisterDebuggeeRequest struct { + // Debuggee information to register. + // The fields `project`, `uniquifier`, `description` and `agent_version` + // of the debuggee must be set. + Debuggee *Debuggee `protobuf:"bytes,1,opt,name=debuggee" json:"debuggee,omitempty"` +} + +func (m *RegisterDebuggeeRequest) Reset() { *m = RegisterDebuggeeRequest{} } +func (m *RegisterDebuggeeRequest) String() string { return proto.CompactTextString(m) } +func (*RegisterDebuggeeRequest) ProtoMessage() {} +func (*RegisterDebuggeeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *RegisterDebuggeeRequest) GetDebuggee() *Debuggee { + if m != nil { + return m.Debuggee + } + return nil +} + +// Response for registering a debuggee. +type RegisterDebuggeeResponse struct { + // Debuggee resource. + // The field `id` is guranteed to be set (in addition to the echoed fields). + // If the field `is_disabled` is set to `true`, the agent should disable + // itself by removing all breakpoints and detaching from the application. + // It should however continue to poll `RegisterDebuggee` until reenabled. + Debuggee *Debuggee `protobuf:"bytes,1,opt,name=debuggee" json:"debuggee,omitempty"` +} + +func (m *RegisterDebuggeeResponse) Reset() { *m = RegisterDebuggeeResponse{} } +func (m *RegisterDebuggeeResponse) String() string { return proto.CompactTextString(m) } +func (*RegisterDebuggeeResponse) ProtoMessage() {} +func (*RegisterDebuggeeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *RegisterDebuggeeResponse) GetDebuggee() *Debuggee { + if m != nil { + return m.Debuggee + } + return nil +} + +// Request to list active breakpoints. +type ListActiveBreakpointsRequest struct { + // Identifies the debuggee. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // A token that, if specified, blocks the method call until the list + // of active breakpoints has changed, or a server-selected timeout has + // expired. The value should be set from the `next_wait_token` field in + // the last response. The initial value should be set to `"init"`. + WaitToken string `protobuf:"bytes,2,opt,name=wait_token,json=waitToken" json:"wait_token,omitempty"` + // If set to `true` (recommended), returns `google.rpc.Code.OK` status and + // sets the `wait_expired` response field to `true` when the server-selected + // timeout has expired. + // + // If set to `false` (deprecated), returns `google.rpc.Code.ABORTED` status + // when the server-selected timeout has expired. + SuccessOnTimeout bool `protobuf:"varint,3,opt,name=success_on_timeout,json=successOnTimeout" json:"success_on_timeout,omitempty"` +} + +func (m *ListActiveBreakpointsRequest) Reset() { *m = ListActiveBreakpointsRequest{} } +func (m *ListActiveBreakpointsRequest) String() string { return proto.CompactTextString(m) } +func (*ListActiveBreakpointsRequest) ProtoMessage() {} +func (*ListActiveBreakpointsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListActiveBreakpointsRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *ListActiveBreakpointsRequest) GetWaitToken() string { + if m != nil { + return m.WaitToken + } + return "" +} + +func (m *ListActiveBreakpointsRequest) GetSuccessOnTimeout() bool { + if m != nil { + return m.SuccessOnTimeout + } + return false +} + +// Response for listing active breakpoints. +type ListActiveBreakpointsResponse struct { + // List of all active breakpoints. + // The fields `id` and `location` are guaranteed to be set on each breakpoint. + Breakpoints []*Breakpoint `protobuf:"bytes,1,rep,name=breakpoints" json:"breakpoints,omitempty"` + // A token that can be used in the next method call to block until + // the list of breakpoints changes. + NextWaitToken string `protobuf:"bytes,2,opt,name=next_wait_token,json=nextWaitToken" json:"next_wait_token,omitempty"` + // If set to `true`, indicates that there is no change to the + // list of active breakpoints and the server-selected timeout has expired. + // The `breakpoints` field would be empty and should be ignored. + WaitExpired bool `protobuf:"varint,3,opt,name=wait_expired,json=waitExpired" json:"wait_expired,omitempty"` +} + +func (m *ListActiveBreakpointsResponse) Reset() { *m = ListActiveBreakpointsResponse{} } +func (m *ListActiveBreakpointsResponse) String() string { return proto.CompactTextString(m) } +func (*ListActiveBreakpointsResponse) ProtoMessage() {} +func (*ListActiveBreakpointsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListActiveBreakpointsResponse) GetBreakpoints() []*Breakpoint { + if m != nil { + return m.Breakpoints + } + return nil +} + +func (m *ListActiveBreakpointsResponse) GetNextWaitToken() string { + if m != nil { + return m.NextWaitToken + } + return "" +} + +func (m *ListActiveBreakpointsResponse) GetWaitExpired() bool { + if m != nil { + return m.WaitExpired + } + return false +} + +// Request to update an active breakpoint. +type UpdateActiveBreakpointRequest struct { + // Identifies the debuggee being debugged. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // Updated breakpoint information. + // The field `id` must be set. + // The agent must echo all Breakpoint specification fields in the update. + Breakpoint *Breakpoint `protobuf:"bytes,2,opt,name=breakpoint" json:"breakpoint,omitempty"` +} + +func (m *UpdateActiveBreakpointRequest) Reset() { *m = UpdateActiveBreakpointRequest{} } +func (m *UpdateActiveBreakpointRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateActiveBreakpointRequest) ProtoMessage() {} +func (*UpdateActiveBreakpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *UpdateActiveBreakpointRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *UpdateActiveBreakpointRequest) GetBreakpoint() *Breakpoint { + if m != nil { + return m.Breakpoint + } + return nil +} + +// Response for updating an active breakpoint. +// The message is defined to allow future extensions. +type UpdateActiveBreakpointResponse struct { +} + +func (m *UpdateActiveBreakpointResponse) Reset() { *m = UpdateActiveBreakpointResponse{} } +func (m *UpdateActiveBreakpointResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateActiveBreakpointResponse) ProtoMessage() {} +func (*UpdateActiveBreakpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func init() { + proto.RegisterType((*RegisterDebuggeeRequest)(nil), "google.devtools.clouddebugger.v2.RegisterDebuggeeRequest") + proto.RegisterType((*RegisterDebuggeeResponse)(nil), "google.devtools.clouddebugger.v2.RegisterDebuggeeResponse") + proto.RegisterType((*ListActiveBreakpointsRequest)(nil), "google.devtools.clouddebugger.v2.ListActiveBreakpointsRequest") + proto.RegisterType((*ListActiveBreakpointsResponse)(nil), "google.devtools.clouddebugger.v2.ListActiveBreakpointsResponse") + proto.RegisterType((*UpdateActiveBreakpointRequest)(nil), "google.devtools.clouddebugger.v2.UpdateActiveBreakpointRequest") + proto.RegisterType((*UpdateActiveBreakpointResponse)(nil), "google.devtools.clouddebugger.v2.UpdateActiveBreakpointResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Controller2 service + +type Controller2Client interface { + // Registers the debuggee with the controller service. + // + // All agents attached to the same application must call this method with + // exactly the same request content to get back the same stable `debuggee_id`. + // Agents should call this method again whenever `google.rpc.Code.NOT_FOUND` + // is returned from any controller method. + // + // This protocol allows the controller service to disable debuggees, recover + // from data loss, or change the `debuggee_id` format. Agents must handle + // `debuggee_id` value changing upon re-registration. + RegisterDebuggee(ctx context.Context, in *RegisterDebuggeeRequest, opts ...grpc.CallOption) (*RegisterDebuggeeResponse, error) + // Returns the list of all active breakpoints for the debuggee. + // + // The breakpoint specification (`location`, `condition`, and `expressions` + // fields) is semantically immutable, although the field values may + // change. For example, an agent may update the location line number + // to reflect the actual line where the breakpoint was set, but this + // doesn't change the breakpoint semantics. + // + // This means that an agent does not need to check if a breakpoint has changed + // when it encounters the same breakpoint on a successive call. + // Moreover, an agent should remember the breakpoints that are completed + // until the controller removes them from the active list to avoid + // setting those breakpoints again. + ListActiveBreakpoints(ctx context.Context, in *ListActiveBreakpointsRequest, opts ...grpc.CallOption) (*ListActiveBreakpointsResponse, error) + // Updates the breakpoint state or mutable fields. + // The entire Breakpoint message must be sent back to the controller service. + // + // Updates to active breakpoint fields are only allowed if the new value + // does not change the breakpoint specification. Updates to the `location`, + // `condition` and `expressions` fields should not alter the breakpoint + // semantics. These may only make changes such as canonicalizing a value + // or snapping the location to the correct line of code. + UpdateActiveBreakpoint(ctx context.Context, in *UpdateActiveBreakpointRequest, opts ...grpc.CallOption) (*UpdateActiveBreakpointResponse, error) +} + +type controller2Client struct { + cc *grpc.ClientConn +} + +func NewController2Client(cc *grpc.ClientConn) Controller2Client { + return &controller2Client{cc} +} + +func (c *controller2Client) RegisterDebuggee(ctx context.Context, in *RegisterDebuggeeRequest, opts ...grpc.CallOption) (*RegisterDebuggeeResponse, error) { + out := new(RegisterDebuggeeResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Controller2/RegisterDebuggee", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controller2Client) ListActiveBreakpoints(ctx context.Context, in *ListActiveBreakpointsRequest, opts ...grpc.CallOption) (*ListActiveBreakpointsResponse, error) { + out := new(ListActiveBreakpointsResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Controller2/ListActiveBreakpoints", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controller2Client) UpdateActiveBreakpoint(ctx context.Context, in *UpdateActiveBreakpointRequest, opts ...grpc.CallOption) (*UpdateActiveBreakpointResponse, error) { + out := new(UpdateActiveBreakpointResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Controller2/UpdateActiveBreakpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller2 service + +type Controller2Server interface { + // Registers the debuggee with the controller service. + // + // All agents attached to the same application must call this method with + // exactly the same request content to get back the same stable `debuggee_id`. + // Agents should call this method again whenever `google.rpc.Code.NOT_FOUND` + // is returned from any controller method. + // + // This protocol allows the controller service to disable debuggees, recover + // from data loss, or change the `debuggee_id` format. Agents must handle + // `debuggee_id` value changing upon re-registration. + RegisterDebuggee(context.Context, *RegisterDebuggeeRequest) (*RegisterDebuggeeResponse, error) + // Returns the list of all active breakpoints for the debuggee. + // + // The breakpoint specification (`location`, `condition`, and `expressions` + // fields) is semantically immutable, although the field values may + // change. For example, an agent may update the location line number + // to reflect the actual line where the breakpoint was set, but this + // doesn't change the breakpoint semantics. + // + // This means that an agent does not need to check if a breakpoint has changed + // when it encounters the same breakpoint on a successive call. + // Moreover, an agent should remember the breakpoints that are completed + // until the controller removes them from the active list to avoid + // setting those breakpoints again. + ListActiveBreakpoints(context.Context, *ListActiveBreakpointsRequest) (*ListActiveBreakpointsResponse, error) + // Updates the breakpoint state or mutable fields. + // The entire Breakpoint message must be sent back to the controller service. + // + // Updates to active breakpoint fields are only allowed if the new value + // does not change the breakpoint specification. Updates to the `location`, + // `condition` and `expressions` fields should not alter the breakpoint + // semantics. These may only make changes such as canonicalizing a value + // or snapping the location to the correct line of code. + UpdateActiveBreakpoint(context.Context, *UpdateActiveBreakpointRequest) (*UpdateActiveBreakpointResponse, error) +} + +func RegisterController2Server(s *grpc.Server, srv Controller2Server) { + s.RegisterService(&_Controller2_serviceDesc, srv) +} + +func _Controller2_RegisterDebuggee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterDebuggeeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Controller2Server).RegisterDebuggee(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Controller2/RegisterDebuggee", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Controller2Server).RegisterDebuggee(ctx, req.(*RegisterDebuggeeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller2_ListActiveBreakpoints_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListActiveBreakpointsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Controller2Server).ListActiveBreakpoints(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Controller2/ListActiveBreakpoints", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Controller2Server).ListActiveBreakpoints(ctx, req.(*ListActiveBreakpointsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller2_UpdateActiveBreakpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateActiveBreakpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Controller2Server).UpdateActiveBreakpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Controller2/UpdateActiveBreakpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Controller2Server).UpdateActiveBreakpoint(ctx, req.(*UpdateActiveBreakpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.clouddebugger.v2.Controller2", + HandlerType: (*Controller2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RegisterDebuggee", + Handler: _Controller2_RegisterDebuggee_Handler, + }, + { + MethodName: "ListActiveBreakpoints", + Handler: _Controller2_ListActiveBreakpoints_Handler, + }, + { + MethodName: "UpdateActiveBreakpoint", + Handler: _Controller2_UpdateActiveBreakpoint_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/clouddebugger/v2/controller.proto", +} + +func init() { proto.RegisterFile("google/devtools/clouddebugger/v2/controller.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x6a, 0xd4, 0x40, + 0x14, 0x66, 0x5a, 0x94, 0xf6, 0x44, 0x69, 0x19, 0x50, 0x43, 0x6c, 0x75, 0x1b, 0xa4, 0x94, 0x5a, + 0x32, 0x18, 0xbd, 0x71, 0x05, 0x7f, 0xb6, 0x6a, 0x11, 0x5a, 0x2d, 0x4b, 0x55, 0xf0, 0x66, 0xc9, + 0x26, 0xc7, 0x30, 0x34, 0x9d, 0x89, 0x99, 0xc9, 0x5a, 0x29, 0xbd, 0xf1, 0x56, 0xf1, 0xc6, 0x47, + 0xf0, 0xce, 0x17, 0x10, 0x7c, 0x0d, 0x7d, 0x04, 0xaf, 0x7c, 0x0a, 0xc9, 0xdf, 0xee, 0xf6, 0x67, + 0x9b, 0x76, 0xf1, 0x32, 0xdf, 0x9c, 0xef, 0x3b, 0xdf, 0x37, 0x39, 0x73, 0xe0, 0x56, 0x28, 0x65, + 0x18, 0x21, 0x0b, 0xb0, 0xa7, 0xa5, 0x8c, 0x14, 0xf3, 0x23, 0x99, 0x06, 0x01, 0x76, 0xd3, 0x30, + 0xc4, 0x84, 0xf5, 0x5c, 0xe6, 0x4b, 0xa1, 0x13, 0x19, 0x45, 0x98, 0x38, 0x71, 0x22, 0xb5, 0xa4, + 0x8d, 0x82, 0xe2, 0x54, 0x14, 0xe7, 0x00, 0xc5, 0xe9, 0xb9, 0xd6, 0x5c, 0x29, 0xea, 0xc5, 0x9c, + 0x79, 0x42, 0x48, 0xed, 0x69, 0x2e, 0x85, 0x2a, 0xf8, 0xd6, 0xcd, 0xda, 0x96, 0x81, 0xa7, 0xbd, + 0xb2, 0xf8, 0x6a, 0x59, 0x9c, 0x7f, 0x75, 0xd3, 0xb7, 0x0c, 0x77, 0x62, 0xfd, 0xa1, 0x38, 0xb4, + 0x3d, 0xb8, 0xd2, 0xc6, 0x90, 0x2b, 0x8d, 0xc9, 0xe3, 0x82, 0x8e, 0x6d, 0x7c, 0x97, 0xa2, 0xd2, + 0xf4, 0x29, 0x4c, 0x95, 0x8a, 0x68, 0x92, 0x06, 0x59, 0x32, 0xdc, 0x65, 0xa7, 0xce, 0xb7, 0xd3, + 0x17, 0xe9, 0x73, 0xed, 0x2e, 0x98, 0x47, 0x5b, 0xa8, 0x58, 0x0a, 0x85, 0xff, 0xad, 0xc7, 0x67, + 0x02, 0x73, 0xeb, 0x5c, 0xe9, 0x47, 0xbe, 0xe6, 0x3d, 0x6c, 0x25, 0xe8, 0x6d, 0xc7, 0x92, 0x0b, + 0xad, 0xaa, 0x30, 0xd7, 0xc1, 0xa8, 0x8a, 0x3b, 0x3c, 0xc8, 0x7b, 0x4d, 0xb7, 0xa1, 0x82, 0x9e, + 0x05, 0x74, 0x1e, 0xe0, 0xbd, 0xc7, 0x75, 0x47, 0xcb, 0x6d, 0x14, 0xe6, 0x44, 0x7e, 0x3e, 0x9d, + 0x21, 0x5b, 0x19, 0x40, 0x57, 0x80, 0xaa, 0xd4, 0xf7, 0x51, 0xa9, 0x8e, 0x14, 0x1d, 0xcd, 0x77, + 0x50, 0xa6, 0xda, 0x9c, 0x6c, 0x90, 0xa5, 0xa9, 0xf6, 0x6c, 0x79, 0xf2, 0x42, 0x6c, 0x15, 0xb8, + 0xfd, 0x93, 0xc0, 0xfc, 0x08, 0x3b, 0x65, 0xf0, 0xe7, 0x60, 0x74, 0x07, 0xb0, 0x49, 0x1a, 0x93, + 0x4b, 0x86, 0xbb, 0x52, 0x9f, 0x7d, 0xa0, 0xd5, 0x1e, 0x16, 0xa0, 0x8b, 0x30, 0x23, 0x70, 0x57, + 0x77, 0x8e, 0x64, 0xb8, 0x98, 0xc1, 0xaf, 0xfb, 0x39, 0x16, 0xe0, 0x42, 0x5e, 0x82, 0xbb, 0x31, + 0x4f, 0x30, 0x28, 0x13, 0x18, 0x19, 0xf6, 0xa4, 0x80, 0xec, 0x2f, 0x04, 0xe6, 0x5f, 0xc6, 0x81, + 0xa7, 0xf1, 0xb0, 0xfd, 0x53, 0x5f, 0xe6, 0x3a, 0xc0, 0xc0, 0x5c, 0x6e, 0xe4, 0xac, 0xe1, 0x86, + 0xf8, 0x76, 0x03, 0xae, 0x8d, 0xf2, 0x53, 0xdc, 0xa6, 0xfb, 0xe9, 0x1c, 0x18, 0xab, 0xfd, 0x47, + 0xe6, 0xd2, 0x1f, 0x04, 0x66, 0x0f, 0xcf, 0x1c, 0xbd, 0x5b, 0x6f, 0x60, 0xc4, 0x53, 0xb0, 0x9a, + 0xe3, 0x50, 0x0b, 0x6f, 0xf6, 0xca, 0xc7, 0x5f, 0x7f, 0xbe, 0x4e, 0x2c, 0xda, 0x0b, 0x07, 0x37, + 0x01, 0xab, 0xae, 0x4b, 0xb1, 0xa4, 0xa4, 0x36, 0xc9, 0x32, 0xfd, 0x4d, 0xe0, 0xd2, 0xb1, 0x93, + 0x43, 0xef, 0xd7, 0x7b, 0x38, 0xe9, 0x05, 0x58, 0x0f, 0xc6, 0xe6, 0x97, 0x41, 0x9a, 0x79, 0x90, + 0x3b, 0xd4, 0x1d, 0x19, 0x64, 0x6f, 0x68, 0x2a, 0xf6, 0xd9, 0xf0, 0x78, 0xfe, 0x25, 0x70, 0xf9, + 0xf8, 0x7f, 0x48, 0x4f, 0xe1, 0xeb, 0xc4, 0x69, 0xb4, 0x1e, 0x8e, 0x2f, 0x50, 0x26, 0xdb, 0xc8, + 0x93, 0xad, 0x59, 0xad, 0xb3, 0x27, 0x63, 0x7b, 0x83, 0x0f, 0x87, 0x07, 0xfb, 0x4d, 0xb2, 0xdc, + 0xfa, 0x46, 0xe0, 0x86, 0x2f, 0x77, 0x6a, 0x6d, 0xb5, 0x66, 0x06, 0x33, 0xbb, 0x99, 0x6d, 0xe3, + 0x4d, 0xf2, 0x66, 0xa3, 0x24, 0x85, 0x32, 0xf2, 0x44, 0xe8, 0xc8, 0x24, 0x64, 0x21, 0x8a, 0x7c, + 0x57, 0xb3, 0xe2, 0xc8, 0x8b, 0xb9, 0x1a, 0xbd, 0xf8, 0xef, 0x1d, 0x00, 0xbe, 0x4f, 0x98, 0x6b, + 0x85, 0xde, 0x6a, 0x06, 0x57, 0x9b, 0x33, 0x71, 0x5e, 0xb9, 0xdd, 0xf3, 0xb9, 0xe8, 0xed, 0x7f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x05, 0xef, 0x37, 0xb4, 0xbf, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/data.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/data.pb.go new file mode 100644 index 000000000..b41e215e8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/data.pb.go @@ -0,0 +1,889 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouddebugger/v2/data.proto + +package clouddebugger + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_devtools_source_v1 "google.golang.org/genproto/googleapis/devtools/source/v1" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf2 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Enumerates references to which the message applies. +type StatusMessage_Reference int32 + +const ( + // Status doesn't refer to any particular input. + StatusMessage_UNSPECIFIED StatusMessage_Reference = 0 + // Status applies to the breakpoint and is related to its location. + StatusMessage_BREAKPOINT_SOURCE_LOCATION StatusMessage_Reference = 3 + // Status applies to the breakpoint and is related to its condition. + StatusMessage_BREAKPOINT_CONDITION StatusMessage_Reference = 4 + // Status applies to the breakpoint and is related to its expressions. + StatusMessage_BREAKPOINT_EXPRESSION StatusMessage_Reference = 7 + // Status applies to the breakpoint and is related to its age. + StatusMessage_BREAKPOINT_AGE StatusMessage_Reference = 8 + // Status applies to the entire variable. + StatusMessage_VARIABLE_NAME StatusMessage_Reference = 5 + // Status applies to variable value (variable name is valid). + StatusMessage_VARIABLE_VALUE StatusMessage_Reference = 6 +) + +var StatusMessage_Reference_name = map[int32]string{ + 0: "UNSPECIFIED", + 3: "BREAKPOINT_SOURCE_LOCATION", + 4: "BREAKPOINT_CONDITION", + 7: "BREAKPOINT_EXPRESSION", + 8: "BREAKPOINT_AGE", + 5: "VARIABLE_NAME", + 6: "VARIABLE_VALUE", +} +var StatusMessage_Reference_value = map[string]int32{ + "UNSPECIFIED": 0, + "BREAKPOINT_SOURCE_LOCATION": 3, + "BREAKPOINT_CONDITION": 4, + "BREAKPOINT_EXPRESSION": 7, + "BREAKPOINT_AGE": 8, + "VARIABLE_NAME": 5, + "VARIABLE_VALUE": 6, +} + +func (x StatusMessage_Reference) String() string { + return proto.EnumName(StatusMessage_Reference_name, int32(x)) +} +func (StatusMessage_Reference) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 0} } + +// Actions that can be taken when a breakpoint hits. +// Agents should reject breakpoints with unsupported or unknown action values. +type Breakpoint_Action int32 + +const ( + // Capture stack frame and variables and update the breakpoint. + // The data is only captured once. After that the breakpoint is set + // in a final state. + Breakpoint_CAPTURE Breakpoint_Action = 0 + // Log each breakpoint hit. The breakpoint remains active until + // deleted or expired. + Breakpoint_LOG Breakpoint_Action = 1 +) + +var Breakpoint_Action_name = map[int32]string{ + 0: "CAPTURE", + 1: "LOG", +} +var Breakpoint_Action_value = map[string]int32{ + "CAPTURE": 0, + "LOG": 1, +} + +func (x Breakpoint_Action) String() string { + return proto.EnumName(Breakpoint_Action_name, int32(x)) +} +func (Breakpoint_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{5, 0} } + +// Log severity levels. +type Breakpoint_LogLevel int32 + +const ( + // Information log message. + Breakpoint_INFO Breakpoint_LogLevel = 0 + // Warning log message. + Breakpoint_WARNING Breakpoint_LogLevel = 1 + // Error log message. + Breakpoint_ERROR Breakpoint_LogLevel = 2 +) + +var Breakpoint_LogLevel_name = map[int32]string{ + 0: "INFO", + 1: "WARNING", + 2: "ERROR", +} +var Breakpoint_LogLevel_value = map[string]int32{ + "INFO": 0, + "WARNING": 1, + "ERROR": 2, +} + +func (x Breakpoint_LogLevel) String() string { + return proto.EnumName(Breakpoint_LogLevel_name, int32(x)) +} +func (Breakpoint_LogLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{5, 1} } + +// Represents a message with parameters. +type FormatMessage struct { + // Format template for the message. The `format` uses placeholders `$0`, + // `$1`, etc. to reference parameters. `$$` can be used to denote the `$` + // character. + // + // Examples: + // + // * `Failed to load '$0' which helps debug $1 the first time it + // is loaded. Again, $0 is very important.` + // * `Please pay $$10 to use $0 instead of $1.` + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + // Optional parameters to be embedded into the message. + Parameters []string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty"` +} + +func (m *FormatMessage) Reset() { *m = FormatMessage{} } +func (m *FormatMessage) String() string { return proto.CompactTextString(m) } +func (*FormatMessage) ProtoMessage() {} +func (*FormatMessage) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *FormatMessage) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormatMessage) GetParameters() []string { + if m != nil { + return m.Parameters + } + return nil +} + +// Represents a contextual status message. +// The message can indicate an error or informational status, and refer to +// specific parts of the containing object. +// For example, the `Breakpoint.status` field can indicate an error referring +// to the `BREAKPOINT_SOURCE_LOCATION` with the message `Location not found`. +type StatusMessage struct { + // Distinguishes errors from informational messages. + IsError bool `protobuf:"varint,1,opt,name=is_error,json=isError" json:"is_error,omitempty"` + // Reference to which the message applies. + RefersTo StatusMessage_Reference `protobuf:"varint,2,opt,name=refers_to,json=refersTo,enum=google.devtools.clouddebugger.v2.StatusMessage_Reference" json:"refers_to,omitempty"` + // Status message text. + Description *FormatMessage `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *StatusMessage) Reset() { *m = StatusMessage{} } +func (m *StatusMessage) String() string { return proto.CompactTextString(m) } +func (*StatusMessage) ProtoMessage() {} +func (*StatusMessage) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *StatusMessage) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +func (m *StatusMessage) GetRefersTo() StatusMessage_Reference { + if m != nil { + return m.RefersTo + } + return StatusMessage_UNSPECIFIED +} + +func (m *StatusMessage) GetDescription() *FormatMessage { + if m != nil { + return m.Description + } + return nil +} + +// Represents a location in the source code. +type SourceLocation struct { + // Path to the source file within the source context of the target binary. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // Line inside the file. The first line in the file has the value `1`. + Line int32 `protobuf:"varint,2,opt,name=line" json:"line,omitempty"` +} + +func (m *SourceLocation) Reset() { *m = SourceLocation{} } +func (m *SourceLocation) String() string { return proto.CompactTextString(m) } +func (*SourceLocation) ProtoMessage() {} +func (*SourceLocation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *SourceLocation) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *SourceLocation) GetLine() int32 { + if m != nil { + return m.Line + } + return 0 +} + +// Represents a variable or an argument possibly of a compound object type. +// Note how the following variables are represented: +// +// 1) A simple variable: +// +// int x = 5 +// +// { name: "x", value: "5", type: "int" } // Captured variable +// +// 2) A compound object: +// +// struct T { +// int m1; +// int m2; +// }; +// T x = { 3, 7 }; +// +// { // Captured variable +// name: "x", +// type: "T", +// members { name: "m1", value: "3", type: "int" }, +// members { name: "m2", value: "7", type: "int" } +// } +// +// 3) A pointer where the pointee was captured: +// +// T x = { 3, 7 }; +// T* p = &x; +// +// { // Captured variable +// name: "p", +// type: "T*", +// value: "0x00500500", +// members { name: "m1", value: "3", type: "int" }, +// members { name: "m2", value: "7", type: "int" } +// } +// +// 4) A pointer where the pointee was not captured: +// +// T* p = new T; +// +// { // Captured variable +// name: "p", +// type: "T*", +// value: "0x00400400" +// status { is_error: true, description { format: "unavailable" } } +// } +// +// The status should describe the reason for the missing value, +// such as ``, ``, ``. +// +// Note that a null pointer should not have members. +// +// 5) An unnamed value: +// +// int* p = new int(7); +// +// { // Captured variable +// name: "p", +// value: "0x00500500", +// type: "int*", +// members { value: "7", type: "int" } } +// +// 6) An unnamed pointer where the pointee was not captured: +// +// int* p = new int(7); +// int** pp = &p; +// +// { // Captured variable +// name: "pp", +// value: "0x00500500", +// type: "int**", +// members { +// value: "0x00400400", +// type: "int*" +// status { +// is_error: true, +// description: { format: "unavailable" } } +// } +// } +// } +// +// To optimize computation, memory and network traffic, variables that +// repeat in the output multiple times can be stored once in a shared +// variable table and be referenced using the `var_table_index` field. The +// variables stored in the shared table are nameless and are essentially +// a partition of the complete variable. To reconstruct the complete +// variable, merge the referencing variable with the referenced variable. +// +// When using the shared variable table, the following variables: +// +// T x = { 3, 7 }; +// T* p = &x; +// T& r = x; +// +// { name: "x", var_table_index: 3, type: "T" } // Captured variables +// { name: "p", value "0x00500500", type="T*", var_table_index: 3 } +// { name: "r", type="T&", var_table_index: 3 } +// +// { // Shared variable table entry #3: +// members { name: "m1", value: "3", type: "int" }, +// members { name: "m2", value: "7", type: "int" } +// } +// +// Note that the pointer address is stored with the referencing variable +// and not with the referenced variable. This allows the referenced variable +// to be shared between pointers and references. +// +// The type field is optional. The debugger agent may or may not support it. +type Variable struct { + // Name of the variable, if any. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Simple value of the variable. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + // Variable type (e.g. `MyClass`). If the variable is split with + // `var_table_index`, `type` goes next to `value`. The interpretation of + // a type is agent specific. It is recommended to include the dynamic type + // rather than a static type of an object. + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + // Members contained or pointed to by the variable. + Members []*Variable `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` + // Reference to a variable in the shared variable table. More than + // one variable can reference the same variable in the table. The + // `var_table_index` field is an index into `variable_table` in Breakpoint. + VarTableIndex *google_protobuf2.Int32Value `protobuf:"bytes,4,opt,name=var_table_index,json=varTableIndex" json:"var_table_index,omitempty"` + // Status associated with the variable. This field will usually stay + // unset. A status of a single variable only applies to that variable or + // expression. The rest of breakpoint data still remains valid. Variables + // might be reported in error state even when breakpoint is not in final + // state. + // + // The message may refer to variable name with `refers_to` set to + // `VARIABLE_NAME`. Alternatively `refers_to` will be set to `VARIABLE_VALUE`. + // In either case variable value and members will be unset. + // + // Example of error message applied to name: `Invalid expression syntax`. + // + // Example of information message applied to value: `Not captured`. + // + // Examples of error message applied to value: + // + // * `Malformed string`, + // * `Field f not found in class C` + // * `Null pointer dereference` + Status *StatusMessage `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"` +} + +func (m *Variable) Reset() { *m = Variable{} } +func (m *Variable) String() string { return proto.CompactTextString(m) } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Variable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Variable) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *Variable) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Variable) GetMembers() []*Variable { + if m != nil { + return m.Members + } + return nil +} + +func (m *Variable) GetVarTableIndex() *google_protobuf2.Int32Value { + if m != nil { + return m.VarTableIndex + } + return nil +} + +func (m *Variable) GetStatus() *StatusMessage { + if m != nil { + return m.Status + } + return nil +} + +// Represents a stack frame context. +type StackFrame struct { + // Demangled function name at the call site. + Function string `protobuf:"bytes,1,opt,name=function" json:"function,omitempty"` + // Source location of the call site. + Location *SourceLocation `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` + // Set of arguments passed to this function. + // Note that this might not be populated for all stack frames. + Arguments []*Variable `protobuf:"bytes,3,rep,name=arguments" json:"arguments,omitempty"` + // Set of local variables at the stack frame location. + // Note that this might not be populated for all stack frames. + Locals []*Variable `protobuf:"bytes,4,rep,name=locals" json:"locals,omitempty"` +} + +func (m *StackFrame) Reset() { *m = StackFrame{} } +func (m *StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackFrame) ProtoMessage() {} +func (*StackFrame) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *StackFrame) GetFunction() string { + if m != nil { + return m.Function + } + return "" +} + +func (m *StackFrame) GetLocation() *SourceLocation { + if m != nil { + return m.Location + } + return nil +} + +func (m *StackFrame) GetArguments() []*Variable { + if m != nil { + return m.Arguments + } + return nil +} + +func (m *StackFrame) GetLocals() []*Variable { + if m != nil { + return m.Locals + } + return nil +} + +// Represents the breakpoint specification, status and results. +type Breakpoint struct { + // Breakpoint identifier, unique in the scope of the debuggee. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Action that the agent should perform when the code at the + // breakpoint location is hit. + Action Breakpoint_Action `protobuf:"varint,13,opt,name=action,enum=google.devtools.clouddebugger.v2.Breakpoint_Action" json:"action,omitempty"` + // Breakpoint source location. + Location *SourceLocation `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` + // Condition that triggers the breakpoint. + // The condition is a compound boolean expression composed using expressions + // in a programming language at the source location. + Condition string `protobuf:"bytes,3,opt,name=condition" json:"condition,omitempty"` + // List of read-only expressions to evaluate at the breakpoint location. + // The expressions are composed using expressions in the programming language + // at the source location. If the breakpoint action is `LOG`, the evaluated + // expressions are included in log statements. + Expressions []string `protobuf:"bytes,4,rep,name=expressions" json:"expressions,omitempty"` + // Only relevant when action is `LOG`. Defines the message to log when + // the breakpoint hits. The message may include parameter placeholders `$0`, + // `$1`, etc. These placeholders are replaced with the evaluated value + // of the appropriate expression. Expressions not referenced in + // `log_message_format` are not logged. + // + // Example: `Message received, id = $0, count = $1` with + // `expressions` = `[ message.id, message.count ]`. + LogMessageFormat string `protobuf:"bytes,14,opt,name=log_message_format,json=logMessageFormat" json:"log_message_format,omitempty"` + // Indicates the severity of the log. Only relevant when action is `LOG`. + LogLevel Breakpoint_LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,enum=google.devtools.clouddebugger.v2.Breakpoint_LogLevel" json:"log_level,omitempty"` + // When true, indicates that this is a final result and the + // breakpoint state will not change from here on. + IsFinalState bool `protobuf:"varint,5,opt,name=is_final_state,json=isFinalState" json:"is_final_state,omitempty"` + // Time this breakpoint was created by the server in seconds resolution. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time this breakpoint was finalized as seen by the server in seconds + // resolution. + FinalTime *google_protobuf1.Timestamp `protobuf:"bytes,12,opt,name=final_time,json=finalTime" json:"final_time,omitempty"` + // E-mail address of the user that created this breakpoint + UserEmail string `protobuf:"bytes,16,opt,name=user_email,json=userEmail" json:"user_email,omitempty"` + // Breakpoint status. + // + // The status includes an error flag and a human readable message. + // This field is usually unset. The message can be either + // informational or an error message. Regardless, clients should always + // display the text message back to the user. + // + // Error status indicates complete failure of the breakpoint. + // + // Example (non-final state): `Still loading symbols...` + // + // Examples (final state): + // + // * `Invalid line number` referring to location + // * `Field f not found in class C` referring to condition + Status *StatusMessage `protobuf:"bytes,10,opt,name=status" json:"status,omitempty"` + // The stack at breakpoint time. + StackFrames []*StackFrame `protobuf:"bytes,7,rep,name=stack_frames,json=stackFrames" json:"stack_frames,omitempty"` + // Values of evaluated expressions at breakpoint time. + // The evaluated expressions appear in exactly the same order they + // are listed in the `expressions` field. + // The `name` field holds the original expression text, the `value` or + // `members` field holds the result of the evaluated expression. + // If the expression cannot be evaluated, the `status` inside the `Variable` + // will indicate an error and contain the error text. + EvaluatedExpressions []*Variable `protobuf:"bytes,8,rep,name=evaluated_expressions,json=evaluatedExpressions" json:"evaluated_expressions,omitempty"` + // The `variable_table` exists to aid with computation, memory and network + // traffic optimization. It enables storing a variable once and reference + // it from multiple variables, including variables stored in the + // `variable_table` itself. + // For example, the same `this` object, which may appear at many levels of + // the stack, can have all of its data stored once in this table. The + // stack frame variables then would hold only a reference to it. + // + // The variable `var_table_index` field is an index into this repeated field. + // The stored objects are nameless and get their name from the referencing + // variable. The effective variable is a merge of the referencing variable + // and the referenced variable. + VariableTable []*Variable `protobuf:"bytes,9,rep,name=variable_table,json=variableTable" json:"variable_table,omitempty"` + // A set of custom breakpoint properties, populated by the agent, to be + // displayed to the user. + Labels map[string]string `protobuf:"bytes,17,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Breakpoint) Reset() { *m = Breakpoint{} } +func (m *Breakpoint) String() string { return proto.CompactTextString(m) } +func (*Breakpoint) ProtoMessage() {} +func (*Breakpoint) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *Breakpoint) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Breakpoint) GetAction() Breakpoint_Action { + if m != nil { + return m.Action + } + return Breakpoint_CAPTURE +} + +func (m *Breakpoint) GetLocation() *SourceLocation { + if m != nil { + return m.Location + } + return nil +} + +func (m *Breakpoint) GetCondition() string { + if m != nil { + return m.Condition + } + return "" +} + +func (m *Breakpoint) GetExpressions() []string { + if m != nil { + return m.Expressions + } + return nil +} + +func (m *Breakpoint) GetLogMessageFormat() string { + if m != nil { + return m.LogMessageFormat + } + return "" +} + +func (m *Breakpoint) GetLogLevel() Breakpoint_LogLevel { + if m != nil { + return m.LogLevel + } + return Breakpoint_INFO +} + +func (m *Breakpoint) GetIsFinalState() bool { + if m != nil { + return m.IsFinalState + } + return false +} + +func (m *Breakpoint) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Breakpoint) GetFinalTime() *google_protobuf1.Timestamp { + if m != nil { + return m.FinalTime + } + return nil +} + +func (m *Breakpoint) GetUserEmail() string { + if m != nil { + return m.UserEmail + } + return "" +} + +func (m *Breakpoint) GetStatus() *StatusMessage { + if m != nil { + return m.Status + } + return nil +} + +func (m *Breakpoint) GetStackFrames() []*StackFrame { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *Breakpoint) GetEvaluatedExpressions() []*Variable { + if m != nil { + return m.EvaluatedExpressions + } + return nil +} + +func (m *Breakpoint) GetVariableTable() []*Variable { + if m != nil { + return m.VariableTable + } + return nil +} + +func (m *Breakpoint) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Represents the debugged application. The application may include one or more +// replicated processes executing the same code. Each of these processes is +// attached with a debugger agent, carrying out the debugging commands. +// Agents attached to the same debuggee identify themselves as such by using +// exactly the same Debuggee message value when registering. +type Debuggee struct { + // Unique identifier for the debuggee generated by the controller service. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Project the debuggee is associated with. + // Use project number or id when registering a Google Cloud Platform project. + Project string `protobuf:"bytes,2,opt,name=project" json:"project,omitempty"` + // Uniquifier to further distiguish the application. + // It is possible that different applications might have identical values in + // the debuggee message, thus, incorrectly identified as a single application + // by the Controller service. This field adds salt to further distiguish the + // application. Agents should consider seeding this field with value that + // identifies the code, binary, configuration and environment. + Uniquifier string `protobuf:"bytes,3,opt,name=uniquifier" json:"uniquifier,omitempty"` + // Human readable description of the debuggee. + // Including a human-readable project name, environment name and version + // information is recommended. + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // If set to `true`, indicates that Controller service does not detect any + // activity from the debuggee agents and the application is possibly stopped. + IsInactive bool `protobuf:"varint,5,opt,name=is_inactive,json=isInactive" json:"is_inactive,omitempty"` + // Version ID of the agent. + // Schema: `domain/language-platform/vmajor.minor` (for example + // `google.com/java-gcp/v1.1`). + AgentVersion string `protobuf:"bytes,6,opt,name=agent_version,json=agentVersion" json:"agent_version,omitempty"` + // If set to `true`, indicates that the agent should disable itself and + // detach from the debuggee. + IsDisabled bool `protobuf:"varint,7,opt,name=is_disabled,json=isDisabled" json:"is_disabled,omitempty"` + // Human readable message to be displayed to the user about this debuggee. + // Absence of this field indicates no status. The message can be either + // informational or an error status. + Status *StatusMessage `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` + // References to the locations and revisions of the source code used in the + // deployed application. + SourceContexts []*google_devtools_source_v1.SourceContext `protobuf:"bytes,9,rep,name=source_contexts,json=sourceContexts" json:"source_contexts,omitempty"` + // References to the locations and revisions of the source code used in the + // deployed application. + // + // NOTE: this field is experimental and can be ignored. + ExtSourceContexts []*google_devtools_source_v1.ExtendedSourceContext `protobuf:"bytes,13,rep,name=ext_source_contexts,json=extSourceContexts" json:"ext_source_contexts,omitempty"` + // A set of custom debuggee properties, populated by the agent, to be + // displayed to the user. + Labels map[string]string `protobuf:"bytes,11,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Debuggee) Reset() { *m = Debuggee{} } +func (m *Debuggee) String() string { return proto.CompactTextString(m) } +func (*Debuggee) ProtoMessage() {} +func (*Debuggee) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *Debuggee) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Debuggee) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *Debuggee) GetUniquifier() string { + if m != nil { + return m.Uniquifier + } + return "" +} + +func (m *Debuggee) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Debuggee) GetIsInactive() bool { + if m != nil { + return m.IsInactive + } + return false +} + +func (m *Debuggee) GetAgentVersion() string { + if m != nil { + return m.AgentVersion + } + return "" +} + +func (m *Debuggee) GetIsDisabled() bool { + if m != nil { + return m.IsDisabled + } + return false +} + +func (m *Debuggee) GetStatus() *StatusMessage { + if m != nil { + return m.Status + } + return nil +} + +func (m *Debuggee) GetSourceContexts() []*google_devtools_source_v1.SourceContext { + if m != nil { + return m.SourceContexts + } + return nil +} + +func (m *Debuggee) GetExtSourceContexts() []*google_devtools_source_v1.ExtendedSourceContext { + if m != nil { + return m.ExtSourceContexts + } + return nil +} + +func (m *Debuggee) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*FormatMessage)(nil), "google.devtools.clouddebugger.v2.FormatMessage") + proto.RegisterType((*StatusMessage)(nil), "google.devtools.clouddebugger.v2.StatusMessage") + proto.RegisterType((*SourceLocation)(nil), "google.devtools.clouddebugger.v2.SourceLocation") + proto.RegisterType((*Variable)(nil), "google.devtools.clouddebugger.v2.Variable") + proto.RegisterType((*StackFrame)(nil), "google.devtools.clouddebugger.v2.StackFrame") + proto.RegisterType((*Breakpoint)(nil), "google.devtools.clouddebugger.v2.Breakpoint") + proto.RegisterType((*Debuggee)(nil), "google.devtools.clouddebugger.v2.Debuggee") + proto.RegisterEnum("google.devtools.clouddebugger.v2.StatusMessage_Reference", StatusMessage_Reference_name, StatusMessage_Reference_value) + proto.RegisterEnum("google.devtools.clouddebugger.v2.Breakpoint_Action", Breakpoint_Action_name, Breakpoint_Action_value) + proto.RegisterEnum("google.devtools.clouddebugger.v2.Breakpoint_LogLevel", Breakpoint_LogLevel_name, Breakpoint_LogLevel_value) +} + +func init() { proto.RegisterFile("google/devtools/clouddebugger/v2/data.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x72, 0xda, 0x46, + 0x14, 0x0e, 0x3f, 0x06, 0xe9, 0x60, 0x30, 0xd9, 0x26, 0x1d, 0xc5, 0x4d, 0x1d, 0x86, 0xe6, 0xc2, + 0xd3, 0x66, 0x20, 0x21, 0xd3, 0x4e, 0xd2, 0x5c, 0x61, 0x2c, 0xbb, 0x4c, 0x08, 0x90, 0xc5, 0xa6, + 0x9d, 0xde, 0xa8, 0x6b, 0xb4, 0xa8, 0x6a, 0x84, 0xa4, 0xee, 0x2e, 0xd4, 0xb9, 0xcf, 0x63, 0xb4, + 0x2f, 0xd0, 0xe9, 0x3b, 0xf5, 0xae, 0xcf, 0xd1, 0xd9, 0xd5, 0x8a, 0x88, 0xa4, 0x2d, 0x71, 0x93, + 0xbb, 0xb3, 0xdf, 0x7e, 0xe7, 0x5b, 0x71, 0xf6, 0x3b, 0x47, 0x02, 0xbe, 0xf0, 0xa2, 0xc8, 0x0b, + 0x68, 0xdb, 0xa5, 0x2b, 0x11, 0x45, 0x01, 0x6f, 0xcf, 0x82, 0x68, 0xe9, 0xba, 0xf4, 0x62, 0xe9, + 0x79, 0x94, 0xb5, 0x57, 0x9d, 0xb6, 0x4b, 0x04, 0x69, 0xc5, 0x2c, 0x12, 0x11, 0x6a, 0x24, 0xe4, + 0x56, 0x4a, 0x6e, 0x6d, 0x90, 0x5b, 0xab, 0xce, 0xfe, 0x6d, 0x2d, 0x47, 0x62, 0xbf, 0x4d, 0xc2, + 0x30, 0x12, 0x44, 0xf8, 0x51, 0xc8, 0x93, 0xfc, 0xfd, 0xd6, 0x9b, 0x87, 0xf1, 0x68, 0xc9, 0x66, + 0xb4, 0xbd, 0x7a, 0xa0, 0x23, 0x67, 0x16, 0x85, 0x82, 0x5e, 0x0a, 0xcd, 0xbf, 0xa3, 0xf9, 0x6a, + 0x75, 0xb1, 0x9c, 0xb7, 0x85, 0xbf, 0xa0, 0x5c, 0x90, 0x45, 0xac, 0x09, 0x07, 0x6f, 0x12, 0x7e, + 0x61, 0x24, 0x8e, 0x29, 0xd3, 0x07, 0x36, 0x4f, 0xa1, 0x7a, 0x12, 0xb1, 0x05, 0x11, 0xcf, 0x28, + 0xe7, 0xc4, 0xa3, 0xe8, 0x63, 0x28, 0xcd, 0x15, 0x60, 0xe5, 0x1a, 0xb9, 0x43, 0x13, 0xeb, 0x15, + 0x3a, 0x00, 0x88, 0x09, 0x23, 0x0b, 0x2a, 0x28, 0xe3, 0x56, 0xbe, 0x51, 0x38, 0x34, 0x71, 0x06, + 0x69, 0xbe, 0x2a, 0x40, 0x75, 0x22, 0x88, 0x58, 0xf2, 0x54, 0xe9, 0x16, 0x18, 0x3e, 0x77, 0x28, + 0x63, 0x11, 0x53, 0x5a, 0x06, 0x2e, 0xfb, 0xdc, 0x96, 0x4b, 0x34, 0x05, 0x93, 0xd1, 0x39, 0x65, + 0xdc, 0x11, 0x91, 0x95, 0x6f, 0xe4, 0x0e, 0x6b, 0x9d, 0xc7, 0xad, 0x6d, 0xa5, 0x6b, 0x6d, 0xc8, + 0xb7, 0xb0, 0x14, 0xa0, 0xe1, 0x8c, 0x62, 0x23, 0xd1, 0x3a, 0x8b, 0xd0, 0x73, 0xa8, 0xb8, 0x94, + 0xcf, 0x98, 0x1f, 0xcb, 0xa2, 0x5a, 0x85, 0x46, 0xee, 0xb0, 0xd2, 0x69, 0x6f, 0x57, 0xde, 0x28, + 0x01, 0xce, 0x6a, 0x34, 0xff, 0xc8, 0x81, 0xb9, 0x3e, 0x0a, 0xed, 0x41, 0xe5, 0x7c, 0x38, 0x19, + 0xdb, 0xbd, 0xfe, 0x49, 0xdf, 0x3e, 0xae, 0x5f, 0x43, 0x07, 0xb0, 0x7f, 0x84, 0xed, 0xee, 0xd3, + 0xf1, 0xa8, 0x3f, 0x3c, 0x73, 0x26, 0xa3, 0x73, 0xdc, 0xb3, 0x9d, 0xc1, 0xa8, 0xd7, 0x3d, 0xeb, + 0x8f, 0x86, 0xf5, 0x02, 0xb2, 0xe0, 0x46, 0x66, 0xbf, 0x37, 0x1a, 0x1e, 0xf7, 0xd5, 0x4e, 0x11, + 0xdd, 0x82, 0x9b, 0x99, 0x1d, 0xfb, 0xbb, 0x31, 0xb6, 0x27, 0x13, 0xb9, 0x55, 0x46, 0x08, 0x6a, + 0x99, 0xad, 0xee, 0xa9, 0x5d, 0x37, 0xd0, 0x75, 0xa8, 0x4e, 0xbb, 0xb8, 0xdf, 0x3d, 0x1a, 0xd8, + 0xce, 0xb0, 0xfb, 0xcc, 0xae, 0xef, 0x48, 0xda, 0x1a, 0x9a, 0x76, 0x07, 0xe7, 0x76, 0xbd, 0xd4, + 0x7c, 0x04, 0xb5, 0x89, 0x32, 0xca, 0x20, 0x9a, 0x29, 0x67, 0x21, 0x04, 0xc5, 0x98, 0x88, 0x1f, + 0xf5, 0x75, 0xaa, 0x58, 0x62, 0x81, 0x1f, 0x52, 0x55, 0xfa, 0x1d, 0xac, 0xe2, 0xe6, 0xaf, 0x79, + 0x30, 0xa6, 0x84, 0xf9, 0xe4, 0x22, 0xa0, 0x92, 0x10, 0x92, 0x05, 0x4d, 0x93, 0x64, 0x8c, 0x6e, + 0xc0, 0xce, 0x8a, 0x04, 0xcb, 0x24, 0xcb, 0xc4, 0xc9, 0x42, 0x32, 0xc5, 0xcb, 0x98, 0x5a, 0xa5, + 0x84, 0x29, 0x63, 0x74, 0x0c, 0xe5, 0x05, 0x5d, 0x5c, 0x48, 0xa3, 0x14, 0x1a, 0x85, 0xc3, 0x4a, + 0xe7, 0xf3, 0xed, 0x57, 0x90, 0x1e, 0x8d, 0xd3, 0x54, 0xd4, 0x83, 0xbd, 0x15, 0x61, 0x8e, 0x90, + 0xa8, 0xe3, 0x87, 0x2e, 0xbd, 0xb4, 0x8a, 0xea, 0x42, 0x3f, 0x49, 0xd5, 0x52, 0x53, 0xb7, 0xfa, + 0xa1, 0x78, 0xd8, 0x99, 0xca, 0xe7, 0xc1, 0xd5, 0x15, 0x61, 0x67, 0x32, 0xa5, 0x2f, 0x33, 0xd0, + 0x29, 0x94, 0xb8, 0xb2, 0x8d, 0xb5, 0xf3, 0xae, 0x66, 0xd8, 0xb0, 0x19, 0xd6, 0xe9, 0xcd, 0x57, + 0x79, 0x80, 0x89, 0x20, 0xb3, 0x17, 0x27, 0xd2, 0xf2, 0x68, 0x1f, 0x8c, 0xf9, 0x32, 0x9c, 0x29, + 0x9b, 0x25, 0x45, 0x5a, 0xaf, 0xd1, 0x00, 0x8c, 0x40, 0x57, 0x5f, 0xd5, 0xaa, 0xd2, 0xb9, 0xff, + 0x0e, 0xa7, 0x6e, 0xdc, 0x1a, 0x5e, 0x2b, 0xa0, 0x6f, 0xc0, 0x24, 0xcc, 0x5b, 0x2e, 0x68, 0x28, + 0xfe, 0x4f, 0x39, 0x5f, 0x27, 0xa3, 0x23, 0x28, 0x49, 0xd5, 0x80, 0x5b, 0xc5, 0x2b, 0xcb, 0xe8, + 0xcc, 0xe6, 0x9f, 0x06, 0xc0, 0x11, 0xa3, 0xe4, 0x45, 0x1c, 0xf9, 0xa1, 0x40, 0x35, 0xc8, 0xfb, + 0xae, 0x2e, 0x40, 0xde, 0x77, 0xd1, 0x53, 0x28, 0x91, 0xa4, 0x28, 0x55, 0xd5, 0xd5, 0x0f, 0xb7, + 0x1f, 0xf1, 0x5a, 0xad, 0xd5, 0x55, 0xa9, 0x58, 0x4b, 0x7c, 0xe0, 0x3a, 0xde, 0x06, 0x73, 0x16, + 0x85, 0xae, 0xbf, 0x9e, 0x0c, 0x26, 0x7e, 0x0d, 0xa0, 0x06, 0x54, 0xe8, 0x65, 0xcc, 0x28, 0xe7, + 0x72, 0x1a, 0xab, 0x02, 0x99, 0x38, 0x0b, 0xa1, 0x7b, 0x80, 0x82, 0xc8, 0x73, 0x16, 0x89, 0x2f, + 0x1c, 0x3d, 0x24, 0x6b, 0x4a, 0xa8, 0x1e, 0x44, 0x9e, 0x36, 0x4c, 0x32, 0x4a, 0x10, 0x06, 0x53, + 0xb2, 0x03, 0xba, 0xa2, 0x81, 0xb5, 0xa7, 0x6a, 0xf1, 0xe5, 0x95, 0x6a, 0x31, 0x88, 0xbc, 0x81, + 0x4c, 0x96, 0xbf, 0x20, 0x89, 0xd0, 0x5d, 0xa8, 0xf9, 0xdc, 0x99, 0xfb, 0x21, 0x09, 0x1c, 0xe9, + 0x4a, 0xaa, 0x3c, 0x6d, 0xe0, 0x5d, 0x9f, 0x9f, 0x48, 0x50, 0x1a, 0x97, 0xa2, 0x27, 0x50, 0x99, + 0x31, 0x4a, 0x04, 0x75, 0xe4, 0xbb, 0xc0, 0xaa, 0xa8, 0xc2, 0xed, 0xbf, 0xd5, 0x32, 0x67, 0xe9, + 0x8b, 0x02, 0x43, 0x42, 0x97, 0x00, 0x7a, 0x0c, 0x90, 0xe8, 0xab, 0xdc, 0xdd, 0xad, 0xb9, 0xa6, + 0x62, 0xab, 0xd4, 0x4f, 0x01, 0x96, 0x9c, 0x32, 0x87, 0x2e, 0x88, 0x1f, 0x58, 0xf5, 0xa4, 0xc0, + 0x12, 0xb1, 0x25, 0x90, 0x69, 0x44, 0x78, 0xaf, 0x46, 0x44, 0x23, 0xd8, 0xe5, 0xb2, 0x0f, 0x9d, + 0xb9, 0x6c, 0x44, 0x6e, 0x95, 0x95, 0x97, 0xef, 0xbd, 0x93, 0x9c, 0xee, 0x5e, 0x5c, 0xe1, 0xeb, + 0x98, 0x23, 0x07, 0x6e, 0x52, 0x39, 0xcb, 0x88, 0xa0, 0xae, 0x93, 0x35, 0x81, 0x71, 0xe5, 0x2e, + 0xb9, 0xb1, 0x16, 0xb2, 0x33, 0xce, 0x79, 0x0e, 0xb5, 0x95, 0x66, 0x24, 0xd3, 0xcc, 0x32, 0xaf, + 0xac, 0x5c, 0x4d, 0x15, 0xd4, 0x6c, 0x43, 0x63, 0x28, 0x05, 0xe4, 0x82, 0x06, 0xdc, 0xba, 0xae, + 0xa4, 0x1e, 0x5d, 0xcd, 0x5b, 0x2a, 0xd5, 0x0e, 0x05, 0x7b, 0x89, 0xb5, 0xce, 0xfe, 0x63, 0xa8, + 0x64, 0x60, 0x54, 0x87, 0xc2, 0x0b, 0xfa, 0x52, 0x77, 0xb6, 0x0c, 0xff, 0x79, 0xfc, 0x7f, 0x9d, + 0x7f, 0x94, 0x6b, 0x1e, 0x40, 0x29, 0xe9, 0x5c, 0x54, 0x81, 0x72, 0xaf, 0x3b, 0x3e, 0x3b, 0xc7, + 0x76, 0xfd, 0x1a, 0x2a, 0x43, 0x61, 0x30, 0x3a, 0xad, 0xe7, 0x9a, 0xf7, 0xc0, 0x48, 0xdd, 0x8c, + 0x0c, 0x28, 0xf6, 0x87, 0x27, 0xa3, 0xfa, 0x35, 0xc9, 0xfd, 0xb6, 0x8b, 0x87, 0xfd, 0xe1, 0x69, + 0x3d, 0x87, 0x4c, 0xd8, 0xb1, 0x31, 0x1e, 0xe1, 0x7a, 0xbe, 0xf9, 0x57, 0x11, 0x8c, 0xe3, 0xe4, + 0xb9, 0xe9, 0x5b, 0xf3, 0xc5, 0x82, 0x72, 0xcc, 0xa2, 0x9f, 0xe8, 0x4c, 0xe8, 0xc7, 0x48, 0x97, + 0xf2, 0xfb, 0x64, 0x19, 0xfa, 0x3f, 0x2f, 0xfd, 0xb9, 0x4f, 0x99, 0xee, 0xef, 0x0c, 0x22, 0x1b, + 0x3c, 0xfb, 0x69, 0x50, 0x54, 0x84, 0x2c, 0x84, 0xee, 0x40, 0xc5, 0xe7, 0x8e, 0x1f, 0xca, 0xe9, + 0xb3, 0x4a, 0x7b, 0x0b, 0x7c, 0xde, 0xd7, 0x08, 0xfa, 0x0c, 0xaa, 0xc4, 0xa3, 0xa1, 0x70, 0x56, + 0x94, 0xc9, 0x9b, 0xd5, 0xef, 0xbc, 0x5d, 0x05, 0x4e, 0x13, 0x4c, 0xab, 0xb8, 0x3e, 0x97, 0xf7, + 0xe4, 0x5a, 0xe5, 0x54, 0xe5, 0x58, 0x23, 0x99, 0x46, 0x30, 0xde, 0xaf, 0x11, 0x9e, 0xc3, 0xde, + 0xe6, 0x37, 0x21, 0xd7, 0xbe, 0x3a, 0x7c, 0x4b, 0x31, 0xe1, 0xb5, 0x56, 0x0f, 0xf4, 0x78, 0xec, + 0x25, 0x09, 0xb8, 0xc6, 0xb3, 0x4b, 0x8e, 0x7e, 0x80, 0x8f, 0xe8, 0xa5, 0x70, 0xde, 0x94, 0xad, + 0x2a, 0xd9, 0xfb, 0xff, 0x21, 0x6b, 0x5f, 0x0a, 0x1a, 0xba, 0xd4, 0xdd, 0x94, 0xbf, 0x4e, 0x2f, + 0xc5, 0x64, 0xf3, 0x84, 0xe1, 0xda, 0xb8, 0x15, 0x25, 0xfa, 0xd5, 0xf6, 0x5f, 0x9f, 0x9a, 0xe1, + 0x03, 0xdb, 0xf6, 0xe8, 0xb7, 0x1c, 0xdc, 0x9d, 0x45, 0x8b, 0xad, 0x0f, 0x70, 0x64, 0x1e, 0x13, + 0x41, 0xc6, 0x72, 0xf8, 0x8d, 0x73, 0xdf, 0x3f, 0xd3, 0x74, 0x2f, 0x0a, 0x48, 0xe8, 0xb5, 0x22, + 0xe6, 0xb5, 0x3d, 0x1a, 0xaa, 0xd1, 0xd8, 0x4e, 0xb6, 0x48, 0xec, 0xf3, 0x7f, 0xff, 0xb7, 0xf0, + 0x64, 0x03, 0xf8, 0x3d, 0x6f, 0x9d, 0x26, 0x7a, 0x3d, 0x09, 0xa7, 0xbf, 0x95, 0xb5, 0xa6, 0x9d, + 0x8b, 0x92, 0x12, 0x7d, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x06, 0xfd, 0x4c, 0xc8, 0x81, + 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/debugger.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/debugger.pb.go new file mode 100644 index 000000000..7454ee2e0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouddebugger/v2/debugger.pb.go @@ -0,0 +1,647 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouddebugger/v2/debugger.proto + +package clouddebugger + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Request to set a breakpoint +type SetBreakpointRequest struct { + // ID of the debuggee where the breakpoint is to be set. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // Breakpoint specification to set. + // The field `location` of the breakpoint must be set. + Breakpoint *Breakpoint `protobuf:"bytes,2,opt,name=breakpoint" json:"breakpoint,omitempty"` + // The client version making the call. + // Schema: `domain/type/version` (e.g., `google.com/intellij/v1`). + ClientVersion string `protobuf:"bytes,4,opt,name=client_version,json=clientVersion" json:"client_version,omitempty"` +} + +func (m *SetBreakpointRequest) Reset() { *m = SetBreakpointRequest{} } +func (m *SetBreakpointRequest) String() string { return proto.CompactTextString(m) } +func (*SetBreakpointRequest) ProtoMessage() {} +func (*SetBreakpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *SetBreakpointRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *SetBreakpointRequest) GetBreakpoint() *Breakpoint { + if m != nil { + return m.Breakpoint + } + return nil +} + +func (m *SetBreakpointRequest) GetClientVersion() string { + if m != nil { + return m.ClientVersion + } + return "" +} + +// Response for setting a breakpoint. +type SetBreakpointResponse struct { + // Breakpoint resource. + // The field `id` is guaranteed to be set (in addition to the echoed fileds). + Breakpoint *Breakpoint `protobuf:"bytes,1,opt,name=breakpoint" json:"breakpoint,omitempty"` +} + +func (m *SetBreakpointResponse) Reset() { *m = SetBreakpointResponse{} } +func (m *SetBreakpointResponse) String() string { return proto.CompactTextString(m) } +func (*SetBreakpointResponse) ProtoMessage() {} +func (*SetBreakpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *SetBreakpointResponse) GetBreakpoint() *Breakpoint { + if m != nil { + return m.Breakpoint + } + return nil +} + +// Request to get breakpoint information. +type GetBreakpointRequest struct { + // ID of the debuggee whose breakpoint to get. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // ID of the breakpoint to get. + BreakpointId string `protobuf:"bytes,2,opt,name=breakpoint_id,json=breakpointId" json:"breakpoint_id,omitempty"` + // The client version making the call. + // Schema: `domain/type/version` (e.g., `google.com/intellij/v1`). + ClientVersion string `protobuf:"bytes,4,opt,name=client_version,json=clientVersion" json:"client_version,omitempty"` +} + +func (m *GetBreakpointRequest) Reset() { *m = GetBreakpointRequest{} } +func (m *GetBreakpointRequest) String() string { return proto.CompactTextString(m) } +func (*GetBreakpointRequest) ProtoMessage() {} +func (*GetBreakpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *GetBreakpointRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *GetBreakpointRequest) GetBreakpointId() string { + if m != nil { + return m.BreakpointId + } + return "" +} + +func (m *GetBreakpointRequest) GetClientVersion() string { + if m != nil { + return m.ClientVersion + } + return "" +} + +// Response for getting breakpoint information. +type GetBreakpointResponse struct { + // Complete breakpoint state. + // The fields `id` and `location` are guaranteed to be set. + Breakpoint *Breakpoint `protobuf:"bytes,1,opt,name=breakpoint" json:"breakpoint,omitempty"` +} + +func (m *GetBreakpointResponse) Reset() { *m = GetBreakpointResponse{} } +func (m *GetBreakpointResponse) String() string { return proto.CompactTextString(m) } +func (*GetBreakpointResponse) ProtoMessage() {} +func (*GetBreakpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *GetBreakpointResponse) GetBreakpoint() *Breakpoint { + if m != nil { + return m.Breakpoint + } + return nil +} + +// Request to delete a breakpoint. +type DeleteBreakpointRequest struct { + // ID of the debuggee whose breakpoint to delete. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // ID of the breakpoint to delete. + BreakpointId string `protobuf:"bytes,2,opt,name=breakpoint_id,json=breakpointId" json:"breakpoint_id,omitempty"` + // The client version making the call. + // Schema: `domain/type/version` (e.g., `google.com/intellij/v1`). + ClientVersion string `protobuf:"bytes,3,opt,name=client_version,json=clientVersion" json:"client_version,omitempty"` +} + +func (m *DeleteBreakpointRequest) Reset() { *m = DeleteBreakpointRequest{} } +func (m *DeleteBreakpointRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteBreakpointRequest) ProtoMessage() {} +func (*DeleteBreakpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *DeleteBreakpointRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *DeleteBreakpointRequest) GetBreakpointId() string { + if m != nil { + return m.BreakpointId + } + return "" +} + +func (m *DeleteBreakpointRequest) GetClientVersion() string { + if m != nil { + return m.ClientVersion + } + return "" +} + +// Request to list breakpoints. +type ListBreakpointsRequest struct { + // ID of the debuggee whose breakpoints to list. + DebuggeeId string `protobuf:"bytes,1,opt,name=debuggee_id,json=debuggeeId" json:"debuggee_id,omitempty"` + // When set to `true`, the response includes the list of breakpoints set by + // any user. Otherwise, it includes only breakpoints set by the caller. + IncludeAllUsers bool `protobuf:"varint,2,opt,name=include_all_users,json=includeAllUsers" json:"include_all_users,omitempty"` + // When set to `true`, the response includes active and inactive + // breakpoints. Otherwise, it includes only active breakpoints. + IncludeInactive bool `protobuf:"varint,3,opt,name=include_inactive,json=includeInactive" json:"include_inactive,omitempty"` + // When set, the response includes only breakpoints with the specified action. + Action *ListBreakpointsRequest_BreakpointActionValue `protobuf:"bytes,4,opt,name=action" json:"action,omitempty"` + // This field is deprecated. The following fields are always stripped out of + // the result: `stack_frames`, `evaluated_expressions` and `variable_table`. + StripResults bool `protobuf:"varint,5,opt,name=strip_results,json=stripResults" json:"strip_results,omitempty"` + // A wait token that, if specified, blocks the call until the breakpoints + // list has changed, or a server selected timeout has expired. The value + // should be set from the last response. The error code + // `google.rpc.Code.ABORTED` (RPC) is returned on wait timeout, which + // should be called again with the same `wait_token`. + WaitToken string `protobuf:"bytes,6,opt,name=wait_token,json=waitToken" json:"wait_token,omitempty"` + // The client version making the call. + // Schema: `domain/type/version` (e.g., `google.com/intellij/v1`). + ClientVersion string `protobuf:"bytes,8,opt,name=client_version,json=clientVersion" json:"client_version,omitempty"` +} + +func (m *ListBreakpointsRequest) Reset() { *m = ListBreakpointsRequest{} } +func (m *ListBreakpointsRequest) String() string { return proto.CompactTextString(m) } +func (*ListBreakpointsRequest) ProtoMessage() {} +func (*ListBreakpointsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *ListBreakpointsRequest) GetDebuggeeId() string { + if m != nil { + return m.DebuggeeId + } + return "" +} + +func (m *ListBreakpointsRequest) GetIncludeAllUsers() bool { + if m != nil { + return m.IncludeAllUsers + } + return false +} + +func (m *ListBreakpointsRequest) GetIncludeInactive() bool { + if m != nil { + return m.IncludeInactive + } + return false +} + +func (m *ListBreakpointsRequest) GetAction() *ListBreakpointsRequest_BreakpointActionValue { + if m != nil { + return m.Action + } + return nil +} + +func (m *ListBreakpointsRequest) GetStripResults() bool { + if m != nil { + return m.StripResults + } + return false +} + +func (m *ListBreakpointsRequest) GetWaitToken() string { + if m != nil { + return m.WaitToken + } + return "" +} + +func (m *ListBreakpointsRequest) GetClientVersion() string { + if m != nil { + return m.ClientVersion + } + return "" +} + +// Wrapper message for `Breakpoint.Action`. Defines a filter on the action +// field of breakpoints. +type ListBreakpointsRequest_BreakpointActionValue struct { + // Only breakpoints with the specified action will pass the filter. + Value Breakpoint_Action `protobuf:"varint,1,opt,name=value,enum=google.devtools.clouddebugger.v2.Breakpoint_Action" json:"value,omitempty"` +} + +func (m *ListBreakpointsRequest_BreakpointActionValue) Reset() { + *m = ListBreakpointsRequest_BreakpointActionValue{} +} +func (m *ListBreakpointsRequest_BreakpointActionValue) String() string { + return proto.CompactTextString(m) +} +func (*ListBreakpointsRequest_BreakpointActionValue) ProtoMessage() {} +func (*ListBreakpointsRequest_BreakpointActionValue) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{5, 0} +} + +func (m *ListBreakpointsRequest_BreakpointActionValue) GetValue() Breakpoint_Action { + if m != nil { + return m.Value + } + return Breakpoint_CAPTURE +} + +// Response for listing breakpoints. +type ListBreakpointsResponse struct { + // List of breakpoints matching the request. + // The fields `id` and `location` are guaranteed to be set on each breakpoint. + // The fields: `stack_frames`, `evaluated_expressions` and `variable_table` + // are cleared on each breakpoint regardless of its status. + Breakpoints []*Breakpoint `protobuf:"bytes,1,rep,name=breakpoints" json:"breakpoints,omitempty"` + // A wait token that can be used in the next call to `list` (REST) or + // `ListBreakpoints` (RPC) to block until the list of breakpoints has changes. + NextWaitToken string `protobuf:"bytes,2,opt,name=next_wait_token,json=nextWaitToken" json:"next_wait_token,omitempty"` +} + +func (m *ListBreakpointsResponse) Reset() { *m = ListBreakpointsResponse{} } +func (m *ListBreakpointsResponse) String() string { return proto.CompactTextString(m) } +func (*ListBreakpointsResponse) ProtoMessage() {} +func (*ListBreakpointsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *ListBreakpointsResponse) GetBreakpoints() []*Breakpoint { + if m != nil { + return m.Breakpoints + } + return nil +} + +func (m *ListBreakpointsResponse) GetNextWaitToken() string { + if m != nil { + return m.NextWaitToken + } + return "" +} + +// Request to list debuggees. +type ListDebuggeesRequest struct { + // Project number of a Google Cloud project whose debuggees to list. + Project string `protobuf:"bytes,2,opt,name=project" json:"project,omitempty"` + // When set to `true`, the result includes all debuggees. Otherwise, the + // result includes only debuggees that are active. + IncludeInactive bool `protobuf:"varint,3,opt,name=include_inactive,json=includeInactive" json:"include_inactive,omitempty"` + // The client version making the call. + // Schema: `domain/type/version` (e.g., `google.com/intellij/v1`). + ClientVersion string `protobuf:"bytes,4,opt,name=client_version,json=clientVersion" json:"client_version,omitempty"` +} + +func (m *ListDebuggeesRequest) Reset() { *m = ListDebuggeesRequest{} } +func (m *ListDebuggeesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDebuggeesRequest) ProtoMessage() {} +func (*ListDebuggeesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *ListDebuggeesRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListDebuggeesRequest) GetIncludeInactive() bool { + if m != nil { + return m.IncludeInactive + } + return false +} + +func (m *ListDebuggeesRequest) GetClientVersion() string { + if m != nil { + return m.ClientVersion + } + return "" +} + +// Response for listing debuggees. +type ListDebuggeesResponse struct { + // List of debuggees accessible to the calling user. + // The fields `debuggee.id` and `description` are guaranteed to be set. + // The `description` field is a human readable field provided by agents and + // can be displayed to users. + Debuggees []*Debuggee `protobuf:"bytes,1,rep,name=debuggees" json:"debuggees,omitempty"` +} + +func (m *ListDebuggeesResponse) Reset() { *m = ListDebuggeesResponse{} } +func (m *ListDebuggeesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDebuggeesResponse) ProtoMessage() {} +func (*ListDebuggeesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *ListDebuggeesResponse) GetDebuggees() []*Debuggee { + if m != nil { + return m.Debuggees + } + return nil +} + +func init() { + proto.RegisterType((*SetBreakpointRequest)(nil), "google.devtools.clouddebugger.v2.SetBreakpointRequest") + proto.RegisterType((*SetBreakpointResponse)(nil), "google.devtools.clouddebugger.v2.SetBreakpointResponse") + proto.RegisterType((*GetBreakpointRequest)(nil), "google.devtools.clouddebugger.v2.GetBreakpointRequest") + proto.RegisterType((*GetBreakpointResponse)(nil), "google.devtools.clouddebugger.v2.GetBreakpointResponse") + proto.RegisterType((*DeleteBreakpointRequest)(nil), "google.devtools.clouddebugger.v2.DeleteBreakpointRequest") + proto.RegisterType((*ListBreakpointsRequest)(nil), "google.devtools.clouddebugger.v2.ListBreakpointsRequest") + proto.RegisterType((*ListBreakpointsRequest_BreakpointActionValue)(nil), "google.devtools.clouddebugger.v2.ListBreakpointsRequest.BreakpointActionValue") + proto.RegisterType((*ListBreakpointsResponse)(nil), "google.devtools.clouddebugger.v2.ListBreakpointsResponse") + proto.RegisterType((*ListDebuggeesRequest)(nil), "google.devtools.clouddebugger.v2.ListDebuggeesRequest") + proto.RegisterType((*ListDebuggeesResponse)(nil), "google.devtools.clouddebugger.v2.ListDebuggeesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Debugger2 service + +type Debugger2Client interface { + // Sets the breakpoint to the debuggee. + SetBreakpoint(ctx context.Context, in *SetBreakpointRequest, opts ...grpc.CallOption) (*SetBreakpointResponse, error) + // Gets breakpoint information. + GetBreakpoint(ctx context.Context, in *GetBreakpointRequest, opts ...grpc.CallOption) (*GetBreakpointResponse, error) + // Deletes the breakpoint from the debuggee. + DeleteBreakpoint(ctx context.Context, in *DeleteBreakpointRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Lists all breakpoints for the debuggee. + ListBreakpoints(ctx context.Context, in *ListBreakpointsRequest, opts ...grpc.CallOption) (*ListBreakpointsResponse, error) + // Lists all the debuggees that the user has access to. + ListDebuggees(ctx context.Context, in *ListDebuggeesRequest, opts ...grpc.CallOption) (*ListDebuggeesResponse, error) +} + +type debugger2Client struct { + cc *grpc.ClientConn +} + +func NewDebugger2Client(cc *grpc.ClientConn) Debugger2Client { + return &debugger2Client{cc} +} + +func (c *debugger2Client) SetBreakpoint(ctx context.Context, in *SetBreakpointRequest, opts ...grpc.CallOption) (*SetBreakpointResponse, error) { + out := new(SetBreakpointResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Debugger2/SetBreakpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *debugger2Client) GetBreakpoint(ctx context.Context, in *GetBreakpointRequest, opts ...grpc.CallOption) (*GetBreakpointResponse, error) { + out := new(GetBreakpointResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Debugger2/GetBreakpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *debugger2Client) DeleteBreakpoint(ctx context.Context, in *DeleteBreakpointRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Debugger2/DeleteBreakpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *debugger2Client) ListBreakpoints(ctx context.Context, in *ListBreakpointsRequest, opts ...grpc.CallOption) (*ListBreakpointsResponse, error) { + out := new(ListBreakpointsResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Debugger2/ListBreakpoints", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *debugger2Client) ListDebuggees(ctx context.Context, in *ListDebuggeesRequest, opts ...grpc.CallOption) (*ListDebuggeesResponse, error) { + out := new(ListDebuggeesResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouddebugger.v2.Debugger2/ListDebuggees", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Debugger2 service + +type Debugger2Server interface { + // Sets the breakpoint to the debuggee. + SetBreakpoint(context.Context, *SetBreakpointRequest) (*SetBreakpointResponse, error) + // Gets breakpoint information. + GetBreakpoint(context.Context, *GetBreakpointRequest) (*GetBreakpointResponse, error) + // Deletes the breakpoint from the debuggee. + DeleteBreakpoint(context.Context, *DeleteBreakpointRequest) (*google_protobuf3.Empty, error) + // Lists all breakpoints for the debuggee. + ListBreakpoints(context.Context, *ListBreakpointsRequest) (*ListBreakpointsResponse, error) + // Lists all the debuggees that the user has access to. + ListDebuggees(context.Context, *ListDebuggeesRequest) (*ListDebuggeesResponse, error) +} + +func RegisterDebugger2Server(s *grpc.Server, srv Debugger2Server) { + s.RegisterService(&_Debugger2_serviceDesc, srv) +} + +func _Debugger2_SetBreakpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetBreakpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Debugger2Server).SetBreakpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Debugger2/SetBreakpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Debugger2Server).SetBreakpoint(ctx, req.(*SetBreakpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Debugger2_GetBreakpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBreakpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Debugger2Server).GetBreakpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Debugger2/GetBreakpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Debugger2Server).GetBreakpoint(ctx, req.(*GetBreakpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Debugger2_DeleteBreakpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBreakpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Debugger2Server).DeleteBreakpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Debugger2/DeleteBreakpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Debugger2Server).DeleteBreakpoint(ctx, req.(*DeleteBreakpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Debugger2_ListBreakpoints_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBreakpointsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Debugger2Server).ListBreakpoints(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Debugger2/ListBreakpoints", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Debugger2Server).ListBreakpoints(ctx, req.(*ListBreakpointsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Debugger2_ListDebuggees_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDebuggeesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Debugger2Server).ListDebuggees(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouddebugger.v2.Debugger2/ListDebuggees", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Debugger2Server).ListDebuggees(ctx, req.(*ListDebuggeesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Debugger2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.clouddebugger.v2.Debugger2", + HandlerType: (*Debugger2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetBreakpoint", + Handler: _Debugger2_SetBreakpoint_Handler, + }, + { + MethodName: "GetBreakpoint", + Handler: _Debugger2_GetBreakpoint_Handler, + }, + { + MethodName: "DeleteBreakpoint", + Handler: _Debugger2_DeleteBreakpoint_Handler, + }, + { + MethodName: "ListBreakpoints", + Handler: _Debugger2_ListBreakpoints_Handler, + }, + { + MethodName: "ListDebuggees", + Handler: _Debugger2_ListDebuggees_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/clouddebugger/v2/debugger.proto", +} + +func init() { proto.RegisterFile("google/devtools/clouddebugger/v2/debugger.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 781 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcb, 0x6e, 0xd3, 0x4c, + 0x18, 0xd5, 0xa4, 0x7f, 0x2f, 0xf9, 0xd2, 0xb4, 0xfd, 0x47, 0xbd, 0x58, 0xe1, 0x16, 0x99, 0x8b, + 0x4a, 0x41, 0x36, 0x72, 0x11, 0xb4, 0xb0, 0xa1, 0xa1, 0x28, 0x8d, 0x54, 0xaa, 0x2a, 0x40, 0x91, + 0xd8, 0x44, 0x4e, 0x3c, 0xb5, 0x4c, 0x5d, 0x8f, 0xf1, 0x8c, 0x03, 0xa8, 0xea, 0xa6, 0x48, 0xec, + 0x11, 0x2f, 0x00, 0x5b, 0x84, 0xc4, 0x13, 0x20, 0xb1, 0x43, 0x62, 0xcb, 0x2b, 0xf0, 0x20, 0xc8, + 0xf6, 0xb8, 0x71, 0x82, 0x21, 0x71, 0x2a, 0x75, 0xe7, 0x9c, 0xcc, 0x77, 0x7c, 0xce, 0x99, 0x6f, + 0xbe, 0x31, 0xa8, 0x26, 0xa5, 0xa6, 0x4d, 0x54, 0x83, 0xb4, 0x39, 0xa5, 0x36, 0x53, 0x5b, 0x36, + 0xf5, 0x0d, 0x83, 0x34, 0x7d, 0xd3, 0x24, 0x9e, 0xda, 0xd6, 0xd4, 0xf8, 0x59, 0x71, 0x3d, 0xca, + 0x29, 0x2e, 0x47, 0x05, 0x4a, 0x5c, 0xa0, 0x74, 0x15, 0x28, 0x6d, 0xad, 0x74, 0x56, 0x50, 0xea, + 0xae, 0xa5, 0xea, 0x8e, 0x43, 0xb9, 0xce, 0x2d, 0xea, 0xb0, 0xa8, 0xbe, 0x74, 0xad, 0xff, 0x0b, + 0x75, 0xae, 0x8b, 0xc5, 0x67, 0xc4, 0xe2, 0xf0, 0x57, 0xd3, 0xdf, 0x55, 0xc9, 0xbe, 0xcb, 0x5f, + 0x47, 0x7f, 0xca, 0x9f, 0x11, 0xcc, 0x3e, 0x22, 0xbc, 0xe2, 0x11, 0x7d, 0xcf, 0xa5, 0x96, 0xc3, + 0xeb, 0xe4, 0x85, 0x4f, 0x18, 0xc7, 0x17, 0xa0, 0x20, 0xf8, 0x48, 0xc3, 0x32, 0x24, 0x54, 0x46, + 0x8b, 0xf9, 0x3a, 0xc4, 0x50, 0xcd, 0xc0, 0x9b, 0x00, 0xcd, 0xe3, 0x2a, 0x29, 0x57, 0x46, 0x8b, + 0x05, 0xed, 0xba, 0xd2, 0xcf, 0x98, 0x92, 0x78, 0x53, 0xa2, 0x1e, 0x5f, 0x86, 0xa9, 0x96, 0x6d, + 0x11, 0x87, 0x37, 0xda, 0xc4, 0x63, 0x16, 0x75, 0xa4, 0xff, 0xc2, 0x37, 0x16, 0x23, 0x74, 0x27, + 0x02, 0x65, 0x02, 0x73, 0x3d, 0x6a, 0x99, 0x4b, 0x1d, 0x46, 0x7a, 0xd4, 0xa0, 0x93, 0xa9, 0x91, + 0xdf, 0x20, 0x98, 0xad, 0x0e, 0x95, 0xca, 0x45, 0x28, 0x76, 0x78, 0x82, 0x25, 0xb9, 0x70, 0xc9, + 0x64, 0x07, 0xac, 0x19, 0x19, 0xcc, 0x56, 0x4f, 0xc1, 0xec, 0x5b, 0x04, 0x0b, 0xeb, 0xc4, 0x26, + 0x9c, 0x9c, 0x9e, 0xdf, 0x91, 0x34, 0xbf, 0xdf, 0x47, 0x60, 0x7e, 0xd3, 0x62, 0x09, 0xc7, 0x6c, + 0x60, 0x1d, 0x4b, 0xf0, 0xbf, 0xe5, 0xb4, 0x6c, 0xdf, 0x20, 0x0d, 0xdd, 0xb6, 0x1b, 0x3e, 0x23, + 0x1e, 0x0b, 0xb5, 0x4c, 0xd4, 0xa7, 0xc5, 0x1f, 0x6b, 0xb6, 0xfd, 0x24, 0x80, 0xf1, 0x55, 0x98, + 0x89, 0xd7, 0x5a, 0x8e, 0xde, 0xe2, 0x56, 0x9b, 0x84, 0x82, 0x3a, 0x4b, 0x6b, 0x02, 0xc6, 0xbb, + 0x30, 0x16, 0x3c, 0x89, 0x1d, 0x2a, 0x68, 0x5b, 0xfd, 0x53, 0x4e, 0x77, 0x90, 0x08, 0x7f, 0x2d, + 0x24, 0xdc, 0xd1, 0x6d, 0x9f, 0xd4, 0x05, 0x7b, 0x10, 0x23, 0xe3, 0x9e, 0xe5, 0x36, 0x3c, 0xc2, + 0x7c, 0x9b, 0x33, 0x69, 0x34, 0xd4, 0x33, 0x19, 0x82, 0xf5, 0x08, 0xc3, 0xe7, 0x00, 0x5e, 0xea, + 0x16, 0x6f, 0x70, 0xba, 0x47, 0x1c, 0x69, 0x2c, 0xcc, 0x20, 0x1f, 0x20, 0x8f, 0x03, 0x20, 0x25, + 0xe5, 0x89, 0x94, 0x94, 0x4b, 0x4d, 0x98, 0x4b, 0xd5, 0x82, 0x6b, 0x30, 0xda, 0x0e, 0x1e, 0xc2, + 0x74, 0xa7, 0xb4, 0xe5, 0x2c, 0x0d, 0xa5, 0x44, 0x44, 0xf5, 0x88, 0x41, 0x7e, 0x87, 0x60, 0xe1, + 0x8f, 0x1c, 0x44, 0xf3, 0x6e, 0x41, 0xa1, 0xd3, 0x1c, 0x4c, 0x42, 0xe5, 0x91, 0xcc, 0xdd, 0x9b, + 0x24, 0xc0, 0x57, 0x60, 0xda, 0x21, 0xaf, 0x78, 0x23, 0x11, 0x4d, 0xd4, 0x83, 0xc5, 0x00, 0x7e, + 0x1a, 0xc7, 0x23, 0x1f, 0x21, 0x98, 0x0d, 0x34, 0xad, 0x8b, 0xa6, 0x39, 0xee, 0x2d, 0x09, 0xc6, + 0x5d, 0x8f, 0x3e, 0x27, 0x2d, 0x2e, 0x0a, 0xe3, 0x9f, 0x59, 0x1a, 0x65, 0xc0, 0x23, 0xad, 0xc3, + 0x5c, 0x8f, 0x06, 0x91, 0xca, 0x06, 0xe4, 0xe3, 0x6e, 0x8e, 0x33, 0x59, 0xea, 0x9f, 0x49, 0xcc, + 0x53, 0xef, 0x14, 0x6b, 0x5f, 0xc7, 0x21, 0x2f, 0x70, 0x4f, 0xc3, 0x3f, 0x10, 0x14, 0xbb, 0x26, + 0x26, 0xbe, 0xd5, 0x9f, 0x36, 0xed, 0x42, 0x28, 0xdd, 0xce, 0x5c, 0x17, 0x59, 0x93, 0x37, 0x8e, + 0x7e, 0xfe, 0x7a, 0x9f, 0xab, 0xc8, 0x37, 0x93, 0x17, 0xa1, 0x7a, 0x2c, 0x58, 0x3d, 0x48, 0x9c, + 0xec, 0x43, 0x35, 0xb1, 0xb5, 0x2a, 0x23, 0xfc, 0x4e, 0xf2, 0x92, 0x08, 0xcc, 0x54, 0xb3, 0x9a, + 0xa9, 0x0e, 0x69, 0xa6, 0xfa, 0x2f, 0x33, 0xf8, 0x5e, 0x66, 0x33, 0x07, 0x5d, 0x73, 0xf2, 0x10, + 0x7f, 0x41, 0x30, 0xd3, 0x3b, 0x76, 0xf1, 0xea, 0x20, 0x7b, 0x9e, 0x3a, 0xaa, 0x4b, 0xf3, 0x71, + 0x69, 0x7c, 0xcf, 0x2b, 0x0f, 0x82, 0x7b, 0x3e, 0x56, 0xbc, 0x74, 0x72, 0xc5, 0xdf, 0x10, 0x4c, + 0xf7, 0x9c, 0x6a, 0xbc, 0x32, 0xec, 0x40, 0x2c, 0xad, 0x0e, 0x51, 0x29, 0x36, 0x61, 0x25, 0xb4, + 0xa4, 0xe1, 0x1b, 0x59, 0x2d, 0xe1, 0x0f, 0x08, 0x8a, 0x5d, 0x07, 0x70, 0x90, 0x0e, 0x4a, 0x9b, + 0x1a, 0x83, 0x74, 0x50, 0xea, 0x49, 0x97, 0xcf, 0x87, 0xe2, 0x25, 0x3c, 0x9f, 0x2e, 0xbe, 0xf2, + 0x11, 0xc1, 0xa5, 0x16, 0xdd, 0xef, 0x4b, 0x5f, 0x29, 0xc6, 0xa7, 0x7c, 0x3b, 0xd8, 0xf0, 0x6d, + 0xf4, 0xec, 0xa1, 0x28, 0x31, 0xa9, 0xad, 0x3b, 0xa6, 0x42, 0x3d, 0x53, 0x35, 0x89, 0x13, 0xb6, + 0x83, 0xf8, 0x42, 0xd5, 0x5d, 0x8b, 0xfd, 0xfd, 0xa3, 0xf1, 0x6e, 0x17, 0xf0, 0x29, 0x27, 0x55, + 0x23, 0xbe, 0xfb, 0x01, 0x1c, 0xcf, 0x1a, 0x4f, 0xd9, 0xd1, 0x9a, 0x63, 0x21, 0xe9, 0xf2, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xf8, 0x5d, 0x68, 0xf9, 0x0a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/common.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/common.pb.go new file mode 100644 index 000000000..97bd317fc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/common.pb.go @@ -0,0 +1,433 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouderrorreporting/v1beta1/common.proto + +/* +Package clouderrorreporting is a generated protocol buffer package. + +It is generated from these files: + google/devtools/clouderrorreporting/v1beta1/common.proto + google/devtools/clouderrorreporting/v1beta1/error_group_service.proto + google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto + google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto + +It has these top-level messages: + ErrorGroup + TrackingIssue + ErrorEvent + ServiceContext + ErrorContext + HttpRequestContext + SourceLocation + GetGroupRequest + UpdateGroupRequest + ListGroupStatsRequest + ListGroupStatsResponse + ErrorGroupStats + TimedCount + ListEventsRequest + ListEventsResponse + QueryTimeRange + ServiceContextFilter + DeleteEventsRequest + DeleteEventsResponse + ReportErrorEventRequest + ReportErrorEventResponse + ReportedErrorEvent +*/ +package clouderrorreporting + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/monitoredres" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Description of a group of similar error events. +type ErrorGroup struct { + // The group resource name. + // Example: projects/my-project-123/groups/my-groupid + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Group IDs are unique for a given project. If the same kind of error + // occurs in different service contexts, it will receive the same group ID. + GroupId string `protobuf:"bytes,2,opt,name=group_id,json=groupId" json:"group_id,omitempty"` + // Associated tracking issues. + TrackingIssues []*TrackingIssue `protobuf:"bytes,3,rep,name=tracking_issues,json=trackingIssues" json:"tracking_issues,omitempty"` +} + +func (m *ErrorGroup) Reset() { *m = ErrorGroup{} } +func (m *ErrorGroup) String() string { return proto.CompactTextString(m) } +func (*ErrorGroup) ProtoMessage() {} +func (*ErrorGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ErrorGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ErrorGroup) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *ErrorGroup) GetTrackingIssues() []*TrackingIssue { + if m != nil { + return m.TrackingIssues + } + return nil +} + +// Information related to tracking the progress on resolving the error. +type TrackingIssue struct { + // A URL pointing to a related entry in an issue tracking system. + // Example: https://github.com/user/project/issues/4 + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` +} + +func (m *TrackingIssue) Reset() { *m = TrackingIssue{} } +func (m *TrackingIssue) String() string { return proto.CompactTextString(m) } +func (*TrackingIssue) ProtoMessage() {} +func (*TrackingIssue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *TrackingIssue) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// An error event which is returned by the Error Reporting system. +type ErrorEvent struct { + // Time when the event occurred as provided in the error report. + // If the report did not contain a timestamp, the time the error was received + // by the Error Reporting system is used. + EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"` + // The `ServiceContext` for which this error was reported. + ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"` + // The stack trace that was reported or logged by the service. + Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + // Data about the context in which the error occurred. + Context *ErrorContext `protobuf:"bytes,5,opt,name=context" json:"context,omitempty"` +} + +func (m *ErrorEvent) Reset() { *m = ErrorEvent{} } +func (m *ErrorEvent) String() string { return proto.CompactTextString(m) } +func (*ErrorEvent) ProtoMessage() {} +func (*ErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ErrorEvent) GetEventTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EventTime + } + return nil +} + +func (m *ErrorEvent) GetServiceContext() *ServiceContext { + if m != nil { + return m.ServiceContext + } + return nil +} + +func (m *ErrorEvent) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *ErrorEvent) GetContext() *ErrorContext { + if m != nil { + return m.Context + } + return nil +} + +// Describes a running service that sends errors. +// Its version changes over time and multiple versions can run in parallel. +type ServiceContext struct { + // An identifier of the service, such as the name of the + // executable, job, or Google App Engine service name. This field is expected + // to have a low number of values that are relatively stable over time, as + // opposed to `version`, which can be changed whenever new code is deployed. + // + // Contains the service name for error reports extracted from Google + // App Engine logs or `default` if the App Engine default service is used. + Service string `protobuf:"bytes,2,opt,name=service" json:"service,omitempty"` + // Represents the source code version that the developer provided, + // which could represent a version label or a Git SHA-1 hash, for example. + Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"` + // Type of the MonitoredResource. List of possible values: + // https://cloud.google.com/monitoring/api/resources + // + // Value is set automatically for incoming errors and must not be set when + // reporting errors. + ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType" json:"resource_type,omitempty"` +} + +func (m *ServiceContext) Reset() { *m = ServiceContext{} } +func (m *ServiceContext) String() string { return proto.CompactTextString(m) } +func (*ServiceContext) ProtoMessage() {} +func (*ServiceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ServiceContext) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func (m *ServiceContext) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ServiceContext) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +// A description of the context in which an error occurred. +// This data should be provided by the application when reporting an error, +// unless the +// error report has been generated automatically from Google App Engine logs. +type ErrorContext struct { + // The HTTP request which was processed when the error was + // triggered. + HttpRequest *HttpRequestContext `protobuf:"bytes,1,opt,name=http_request,json=httpRequest" json:"http_request,omitempty"` + // The user who caused or was affected by the crash. + // This can be a user ID, an email address, or an arbitrary token that + // uniquely identifies the user. + // When sending an error report, leave this field empty if the user was not + // logged in. In this case the + // Error Reporting system will use other data, such as remote IP address, to + // distinguish affected users. See `affected_users_count` in + // `ErrorGroupStats`. + User string `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` + // The location in the source code where the decision was made to + // report the error, usually the place where it was logged. + // For a logged exception this would be the source line where the + // exception is logged, usually close to the place where it was + // caught. This value is in contrast to `Exception.cause_location`, + // which describes the source line where the exception was thrown. + ReportLocation *SourceLocation `protobuf:"bytes,3,opt,name=report_location,json=reportLocation" json:"report_location,omitempty"` +} + +func (m *ErrorContext) Reset() { *m = ErrorContext{} } +func (m *ErrorContext) String() string { return proto.CompactTextString(m) } +func (*ErrorContext) ProtoMessage() {} +func (*ErrorContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ErrorContext) GetHttpRequest() *HttpRequestContext { + if m != nil { + return m.HttpRequest + } + return nil +} + +func (m *ErrorContext) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *ErrorContext) GetReportLocation() *SourceLocation { + if m != nil { + return m.ReportLocation + } + return nil +} + +// HTTP request data that is related to a reported error. +// This data should be provided by the application when reporting an error, +// unless the +// error report has been generated automatically from Google App Engine logs. +type HttpRequestContext struct { + // The type of HTTP request, such as `GET`, `POST`, etc. + Method string `protobuf:"bytes,1,opt,name=method" json:"method,omitempty"` + // The URL of the request. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The user agent information that is provided with the request. + UserAgent string `protobuf:"bytes,3,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` + // The referrer information that is provided with the request. + Referrer string `protobuf:"bytes,4,opt,name=referrer" json:"referrer,omitempty"` + // The HTTP response status code for the request. + ResponseStatusCode int32 `protobuf:"varint,5,opt,name=response_status_code,json=responseStatusCode" json:"response_status_code,omitempty"` + // The IP address from which the request originated. + // This can be IPv4, IPv6, or a token which is derived from the + // IP address, depending on the data that has been provided + // in the error report. + RemoteIp string `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` +} + +func (m *HttpRequestContext) Reset() { *m = HttpRequestContext{} } +func (m *HttpRequestContext) String() string { return proto.CompactTextString(m) } +func (*HttpRequestContext) ProtoMessage() {} +func (*HttpRequestContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *HttpRequestContext) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *HttpRequestContext) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *HttpRequestContext) GetUserAgent() string { + if m != nil { + return m.UserAgent + } + return "" +} + +func (m *HttpRequestContext) GetReferrer() string { + if m != nil { + return m.Referrer + } + return "" +} + +func (m *HttpRequestContext) GetResponseStatusCode() int32 { + if m != nil { + return m.ResponseStatusCode + } + return 0 +} + +func (m *HttpRequestContext) GetRemoteIp() string { + if m != nil { + return m.RemoteIp + } + return "" +} + +// Indicates a location in the source code of the service for which +// errors are reported. +// This data should be provided by the application when reporting an error, +// unless the error report has been generated automatically from Google App +// Engine logs. All fields are optional. +type SourceLocation struct { + // The source code filename, which can include a truncated relative + // path, or a full path from a production machine. + FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath" json:"file_path,omitempty"` + // 1-based. 0 indicates that the line number is unknown. + LineNumber int32 `protobuf:"varint,2,opt,name=line_number,json=lineNumber" json:"line_number,omitempty"` + // Human-readable name of a function or method. + // The value can include optional context like the class or package name. + // For example, `my.package.MyClass.method` in case of Java. + FunctionName string `protobuf:"bytes,4,opt,name=function_name,json=functionName" json:"function_name,omitempty"` +} + +func (m *SourceLocation) Reset() { *m = SourceLocation{} } +func (m *SourceLocation) String() string { return proto.CompactTextString(m) } +func (*SourceLocation) ProtoMessage() {} +func (*SourceLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *SourceLocation) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *SourceLocation) GetLineNumber() int32 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *SourceLocation) GetFunctionName() string { + if m != nil { + return m.FunctionName + } + return "" +} + +func init() { + proto.RegisterType((*ErrorGroup)(nil), "google.devtools.clouderrorreporting.v1beta1.ErrorGroup") + proto.RegisterType((*TrackingIssue)(nil), "google.devtools.clouderrorreporting.v1beta1.TrackingIssue") + proto.RegisterType((*ErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ErrorEvent") + proto.RegisterType((*ServiceContext)(nil), "google.devtools.clouderrorreporting.v1beta1.ServiceContext") + proto.RegisterType((*ErrorContext)(nil), "google.devtools.clouderrorreporting.v1beta1.ErrorContext") + proto.RegisterType((*HttpRequestContext)(nil), "google.devtools.clouderrorreporting.v1beta1.HttpRequestContext") + proto.RegisterType((*SourceLocation)(nil), "google.devtools.clouderrorreporting.v1beta1.SourceLocation") +} + +func init() { + proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/common.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 693 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0x13, 0x3b, + 0x14, 0x56, 0x92, 0xfe, 0xe5, 0xa4, 0x4d, 0xaf, 0xac, 0xab, 0xab, 0xdc, 0x00, 0x6a, 0x49, 0x37, + 0x95, 0x90, 0x66, 0x68, 0xd9, 0x50, 0xba, 0x40, 0x34, 0xaa, 0x4a, 0x25, 0x54, 0x55, 0x93, 0x8a, + 0x05, 0x0b, 0x2c, 0x67, 0xe6, 0x64, 0x62, 0x31, 0x63, 0x0f, 0xb6, 0x27, 0xa2, 0xef, 0xc2, 0x82, + 0x35, 0x0f, 0x82, 0x78, 0x18, 0x1e, 0x02, 0xd9, 0x63, 0x97, 0x56, 0xed, 0x82, 0xec, 0xfc, 0x9d, + 0x9f, 0xef, 0x9c, 0xef, 0xf8, 0xd8, 0xf0, 0x32, 0x97, 0x32, 0x2f, 0x30, 0xce, 0x70, 0x61, 0xa4, + 0x2c, 0x74, 0x9c, 0x16, 0xb2, 0xce, 0x50, 0x29, 0xa9, 0x14, 0x56, 0x52, 0x19, 0x2e, 0xf2, 0x78, + 0x71, 0x30, 0x45, 0xc3, 0x0e, 0xe2, 0x54, 0x96, 0xa5, 0x14, 0x51, 0xa5, 0xa4, 0x91, 0xe4, 0x59, + 0x93, 0x19, 0x85, 0xcc, 0xe8, 0x81, 0xcc, 0xc8, 0x67, 0x0e, 0x1f, 0xfb, 0x32, 0xac, 0xe2, 0x31, + 0x13, 0x42, 0x1a, 0x66, 0xb8, 0x14, 0xba, 0xa1, 0x1a, 0xee, 0xdd, 0xf2, 0x96, 0x52, 0x70, 0x23, + 0x15, 0x66, 0x54, 0xa1, 0x96, 0xb5, 0x4a, 0xd1, 0x07, 0xed, 0xf8, 0x20, 0x87, 0xa6, 0xf5, 0x2c, + 0x36, 0xbc, 0x44, 0x6d, 0x58, 0x59, 0x35, 0x01, 0xa3, 0x6f, 0x2d, 0x80, 0x53, 0x5b, 0xfe, 0x4c, + 0xc9, 0xba, 0x22, 0x04, 0x56, 0x04, 0x2b, 0x71, 0xd0, 0xda, 0x6d, 0xed, 0x77, 0x13, 0x77, 0x26, + 0xff, 0xc3, 0x46, 0x6e, 0x9d, 0x94, 0x67, 0x83, 0xb6, 0xb3, 0xaf, 0x3b, 0x7c, 0x9e, 0x91, 0x14, + 0xb6, 0x8d, 0x62, 0xe9, 0x27, 0x2e, 0x72, 0xca, 0xb5, 0xae, 0x51, 0x0f, 0x3a, 0xbb, 0x9d, 0xfd, + 0xde, 0xe1, 0xab, 0x68, 0x09, 0xa1, 0xd1, 0x95, 0xe7, 0x38, 0xb7, 0x14, 0x49, 0xdf, 0xdc, 0x86, + 0x7a, 0xf4, 0x14, 0xb6, 0xee, 0x04, 0x90, 0x7f, 0xa0, 0x53, 0xab, 0xc2, 0xf7, 0x68, 0x8f, 0xa3, + 0xaf, 0x6d, 0xaf, 0xe2, 0x74, 0x81, 0xc2, 0x90, 0x23, 0x00, 0xb4, 0x07, 0x6a, 0xd5, 0xba, 0xb8, + 0xde, 0xe1, 0x30, 0x74, 0x14, 0x46, 0x11, 0x5d, 0x85, 0x51, 0x24, 0x5d, 0x17, 0x6d, 0x31, 0xc9, + 0x60, 0x5b, 0xa3, 0x5a, 0xf0, 0x14, 0x69, 0x2a, 0x85, 0xc1, 0x2f, 0xc6, 0x69, 0xee, 0x1d, 0x1e, + 0x2f, 0xa5, 0x68, 0xd2, 0x70, 0x8c, 0x1b, 0x8a, 0xa4, 0xaf, 0xef, 0x60, 0x32, 0x80, 0xf5, 0x12, + 0xb5, 0x66, 0x39, 0x0e, 0x3a, 0xcd, 0x44, 0x3d, 0x24, 0x13, 0x58, 0x0f, 0x75, 0x57, 0x5d, 0xdd, + 0xa3, 0xa5, 0xea, 0xba, 0x21, 0x84, 0xaa, 0x81, 0x69, 0xc4, 0xa1, 0x3f, 0xb9, 0xd7, 0x80, 0x6f, + 0x29, 0x5c, 0xa9, 0x87, 0xd6, 0xb3, 0x40, 0xa5, 0xb9, 0x14, 0xa1, 0x35, 0x0f, 0xc9, 0x1e, 0x6c, + 0x85, 0xed, 0xa2, 0xe6, 0xba, 0xc2, 0xc1, 0x8a, 0xf3, 0x6f, 0x06, 0xe3, 0xd5, 0x75, 0x85, 0xa3, + 0x5f, 0x2d, 0xd8, 0xbc, 0xdd, 0x04, 0x99, 0xc2, 0xe6, 0xdc, 0x98, 0x8a, 0x2a, 0xfc, 0x5c, 0xa3, + 0x36, 0xfe, 0x36, 0x5e, 0x2f, 0xa5, 0xea, 0xad, 0x31, 0x55, 0xd2, 0xe4, 0x07, 0x6d, 0xbd, 0xf9, + 0x1f, 0x9b, 0xdd, 0xda, 0x5a, 0xa3, 0xf2, 0x52, 0xdc, 0xd9, 0x5e, 0x64, 0x43, 0x44, 0x0b, 0x99, + 0xba, 0x87, 0xe3, 0xf4, 0x2c, 0x7d, 0x91, 0x4e, 0xda, 0x3b, 0x4f, 0x91, 0xf4, 0x9b, 0x88, 0x80, + 0x47, 0x3f, 0x5b, 0x40, 0xee, 0x77, 0x47, 0xfe, 0x83, 0xb5, 0x12, 0xcd, 0x5c, 0x66, 0x7e, 0x49, + 0x3d, 0x0a, 0x9b, 0xdb, 0xbe, 0xd9, 0x5c, 0xf2, 0x04, 0xc0, 0xb6, 0x4b, 0x59, 0x8e, 0xc2, 0xf8, + 0x89, 0x77, 0xad, 0xe5, 0x8d, 0x35, 0x90, 0x21, 0x6c, 0x28, 0x9c, 0xa1, 0x52, 0xa8, 0xfc, 0xb8, + 0x6f, 0x30, 0x79, 0x0e, 0xff, 0x2a, 0xd4, 0x95, 0x14, 0x1a, 0xa9, 0x36, 0xcc, 0xd4, 0x9a, 0xa6, + 0x32, 0x43, 0xb7, 0x37, 0xab, 0x09, 0x09, 0xbe, 0x89, 0x73, 0x8d, 0x65, 0x86, 0xe4, 0x11, 0x74, + 0x15, 0x96, 0xd2, 0x20, 0xe5, 0xd5, 0x60, 0x2d, 0xd0, 0x59, 0xc3, 0x79, 0x35, 0xd2, 0xd0, 0xbf, + 0x2b, 0xd6, 0x86, 0xcf, 0x78, 0x81, 0xb4, 0x62, 0x66, 0xee, 0x85, 0x6c, 0x58, 0xc3, 0x25, 0x33, + 0x73, 0xb2, 0x03, 0xbd, 0x82, 0x0b, 0xa4, 0xa2, 0x2e, 0xa7, 0x7e, 0xf4, 0xab, 0x09, 0x58, 0xd3, + 0x85, 0xb3, 0xd8, 0x75, 0x99, 0xd5, 0x22, 0xb5, 0x4c, 0xd4, 0xfd, 0x29, 0x7e, 0x5d, 0x82, 0xf1, + 0x82, 0x95, 0x78, 0xf2, 0xa3, 0x05, 0xf6, 0x83, 0x5c, 0xe6, 0x4a, 0x4e, 0x7a, 0x63, 0xf7, 0xa3, + 0x5e, 0xda, 0x77, 0x7c, 0xd9, 0xfa, 0xf0, 0xd1, 0xe7, 0xe6, 0xb2, 0x60, 0x22, 0x8f, 0xa4, 0xca, + 0xe3, 0x1c, 0x85, 0x7b, 0xe5, 0x71, 0xe3, 0x62, 0x15, 0xd7, 0x7f, 0xf5, 0x57, 0x1f, 0x3f, 0xe0, + 0xfb, 0xde, 0xde, 0x3b, 0x6b, 0x0a, 0x8c, 0xad, 0xb3, 0x79, 0x61, 0xc9, 0x4d, 0x53, 0xef, 0x0f, + 0x4e, 0x6c, 0xe6, 0x74, 0xcd, 0x15, 0x7c, 0xf1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf9, 0x6b, 0x08, + 0x8d, 0x1b, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_group_service.pb.go new file mode 100644 index 000000000..0125276fb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_group_service.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouderrorreporting/v1beta1/error_group_service.proto + +package clouderrorreporting + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A request to return an individual group. +type GetGroupRequest struct { + // [Required] The group resource name. Written as + // projects/projectID/groups/group_name. + // Call + // + // groupStats.list to return a list of groups belonging to + // this project. + // + // Example: projects/my-project-123/groups/my-group + GroupName string `protobuf:"bytes,1,opt,name=group_name,json=groupName" json:"group_name,omitempty"` +} + +func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } +func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } +func (*GetGroupRequest) ProtoMessage() {} +func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *GetGroupRequest) GetGroupName() string { + if m != nil { + return m.GroupName + } + return "" +} + +// A request to replace the existing data for the given group. +type UpdateGroupRequest struct { + // [Required] The group which replaces the resource on the server. + Group *ErrorGroup `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` +} + +func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } +func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGroupRequest) ProtoMessage() {} +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *UpdateGroupRequest) GetGroup() *ErrorGroup { + if m != nil { + return m.Group + } + return nil +} + +func init() { + proto.RegisterType((*GetGroupRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.GetGroupRequest") + proto.RegisterType((*UpdateGroupRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.UpdateGroupRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ErrorGroupService service + +type ErrorGroupServiceClient interface { + // Get the specified group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*ErrorGroup, error) + // Replace the data for the specified group. + // Fails if the group does not exist. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*ErrorGroup, error) +} + +type errorGroupServiceClient struct { + cc *grpc.ClientConn +} + +func NewErrorGroupServiceClient(cc *grpc.ClientConn) ErrorGroupServiceClient { + return &errorGroupServiceClient{cc} +} + +func (c *errorGroupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*ErrorGroup, error) { + out := new(ErrorGroup) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/GetGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *errorGroupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*ErrorGroup, error) { + out := new(ErrorGroup) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/UpdateGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ErrorGroupService service + +type ErrorGroupServiceServer interface { + // Get the specified group. + GetGroup(context.Context, *GetGroupRequest) (*ErrorGroup, error) + // Replace the data for the specified group. + // Fails if the group does not exist. + UpdateGroup(context.Context, *UpdateGroupRequest) (*ErrorGroup, error) +} + +func RegisterErrorGroupServiceServer(s *grpc.Server, srv ErrorGroupServiceServer) { + s.RegisterService(&_ErrorGroupService_serviceDesc, srv) +} + +func _ErrorGroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ErrorGroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ErrorGroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ErrorGroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ErrorGroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ErrorGroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ErrorGroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ErrorGroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.clouderrorreporting.v1beta1.ErrorGroupService", + HandlerType: (*ErrorGroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetGroup", + Handler: _ErrorGroupService_GetGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _ErrorGroupService_UpdateGroup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/clouderrorreporting/v1beta1/error_group_service.proto", +} + +func init() { + proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/error_group_service.proto", fileDescriptor1) +} + +var fileDescriptor1 = []byte{ + // 381 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x4a, 0x03, 0x31, + 0x18, 0xc5, 0x99, 0x8a, 0x62, 0xd3, 0x85, 0x98, 0x85, 0xc8, 0xa0, 0x20, 0x75, 0xa3, 0x2d, 0x24, + 0x4e, 0x5d, 0x58, 0xfc, 0x83, 0x50, 0x29, 0x5d, 0x29, 0xa5, 0xa2, 0x0b, 0x17, 0x96, 0x74, 0x1a, + 0xc2, 0xc8, 0x4c, 0xbe, 0x31, 0x93, 0x76, 0x23, 0x6e, 0x3c, 0x80, 0x1b, 0x6f, 0xe1, 0xda, 0x0b, + 0x78, 0x04, 0xf1, 0x0a, 0x1e, 0x44, 0x26, 0xe9, 0x1f, 0x6d, 0x2b, 0x38, 0xdd, 0xbe, 0xe4, 0x7b, + 0xef, 0x97, 0x97, 0x0f, 0xd5, 0x05, 0x80, 0x08, 0x39, 0xed, 0xf2, 0xbe, 0x06, 0x08, 0x13, 0xea, + 0x87, 0xd0, 0xeb, 0x72, 0xa5, 0x40, 0x29, 0x1e, 0x83, 0xd2, 0x81, 0x14, 0xb4, 0xef, 0x75, 0xb8, + 0x66, 0x1e, 0x35, 0x72, 0x5b, 0x28, 0xe8, 0xc5, 0xed, 0x84, 0xab, 0x7e, 0xe0, 0x73, 0x12, 0x2b, + 0xd0, 0x80, 0xcb, 0xd6, 0x86, 0x0c, 0x6d, 0xc8, 0x0c, 0x1b, 0x32, 0xb0, 0x71, 0x37, 0x06, 0x99, + 0x2c, 0x0e, 0x28, 0x93, 0x12, 0x34, 0xd3, 0x01, 0xc8, 0xc4, 0x5a, 0xb9, 0xd5, 0x2c, 0x44, 0x3e, + 0x44, 0x11, 0x48, 0x3b, 0x59, 0xdc, 0x43, 0x2b, 0x0d, 0xae, 0x1b, 0x29, 0x5e, 0x8b, 0xdf, 0xf7, + 0x78, 0xa2, 0xf1, 0x26, 0x42, 0x16, 0x57, 0xb2, 0x88, 0xaf, 0x3b, 0x5b, 0xce, 0x4e, 0xbe, 0x95, + 0x37, 0xca, 0x05, 0x8b, 0x78, 0xd1, 0x47, 0xf8, 0x2a, 0xee, 0x32, 0xcd, 0x7f, 0x0d, 0x9d, 0xa3, + 0x45, 0x73, 0xc5, 0xdc, 0x2f, 0x54, 0x0e, 0x48, 0x86, 0xc7, 0x91, 0x7a, 0x2a, 0x5b, 0x3b, 0xeb, + 0x52, 0x79, 0x5e, 0x40, 0xab, 0x63, 0xf5, 0xd2, 0xf6, 0x86, 0xdf, 0x1c, 0xb4, 0x3c, 0xa4, 0xc5, + 0xc7, 0x99, 0x22, 0x26, 0x1e, 0xe9, 0xce, 0x0b, 0x58, 0xf4, 0x9e, 0x3e, 0xbf, 0x5e, 0x72, 0x65, + 0xbc, 0x3b, 0xea, 0xf3, 0x61, 0xdc, 0xd6, 0x49, 0xac, 0xe0, 0x8e, 0xfb, 0x3a, 0xa1, 0x25, 0x6a, + 0xd4, 0x84, 0x96, 0x1e, 0xf1, 0xbb, 0x83, 0x0a, 0x3f, 0x2a, 0xc3, 0xa7, 0x99, 0xb2, 0xa7, 0xcb, + 0x9e, 0x1f, 0xbe, 0x6a, 0xe0, 0x2b, 0xee, 0x24, 0x3c, 0xf9, 0x13, 0xfe, 0xd0, 0x7e, 0x48, 0xed, + 0xc3, 0x41, 0xe9, 0xe2, 0x64, 0x09, 0xae, 0xad, 0x4d, 0xfd, 0x60, 0x33, 0xdd, 0xb9, 0xa6, 0x73, + 0x73, 0x3b, 0xb0, 0x11, 0x10, 0x32, 0x29, 0x08, 0x28, 0x41, 0x05, 0x97, 0x66, 0x23, 0xa9, 0x3d, + 0x62, 0x71, 0x90, 0xfc, 0x6b, 0x9d, 0x8f, 0x66, 0x9c, 0xbd, 0xe6, 0xb6, 0x1b, 0x36, 0xe0, 0x2c, + 0x3d, 0xb4, 0x0d, 0xb4, 0x46, 0x7c, 0xd7, 0x5e, 0x2d, 0x9d, 0xec, 0x2c, 0x99, 0xc0, 0xfd, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xb7, 0x37, 0x79, 0xd0, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_stats_service.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_stats_service.pb.go new file mode 100644 index 000000000..78f90ffed --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/error_stats_service.pb.go @@ -0,0 +1,909 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto + +package clouderrorreporting + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies how the time periods of error group counts are aligned. +type TimedCountAlignment int32 + +const ( + // No alignment specified. + TimedCountAlignment_ERROR_COUNT_ALIGNMENT_UNSPECIFIED TimedCountAlignment = 0 + // The time periods shall be consecutive, have width equal to the + // requested duration, and be aligned at the `alignment_time` provided in + // the request. + // The `alignment_time` does not have to be inside the query period but + // even if it is outside, only time periods are returned which overlap + // with the query period. + // A rounded alignment will typically result in a + // different size of the first or the last time period. + TimedCountAlignment_ALIGNMENT_EQUAL_ROUNDED TimedCountAlignment = 1 + // The time periods shall be consecutive, have width equal to the + // requested duration, and be aligned at the end of the requested time + // period. This can result in a different size of the + // first time period. + TimedCountAlignment_ALIGNMENT_EQUAL_AT_END TimedCountAlignment = 2 +) + +var TimedCountAlignment_name = map[int32]string{ + 0: "ERROR_COUNT_ALIGNMENT_UNSPECIFIED", + 1: "ALIGNMENT_EQUAL_ROUNDED", + 2: "ALIGNMENT_EQUAL_AT_END", +} +var TimedCountAlignment_value = map[string]int32{ + "ERROR_COUNT_ALIGNMENT_UNSPECIFIED": 0, + "ALIGNMENT_EQUAL_ROUNDED": 1, + "ALIGNMENT_EQUAL_AT_END": 2, +} + +func (x TimedCountAlignment) String() string { + return proto.EnumName(TimedCountAlignment_name, int32(x)) +} +func (TimedCountAlignment) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +// A sorting order of error groups. +type ErrorGroupOrder int32 + +const ( + // No group order specified. + ErrorGroupOrder_GROUP_ORDER_UNSPECIFIED ErrorGroupOrder = 0 + // Total count of errors in the given time window in descending order. + ErrorGroupOrder_COUNT_DESC ErrorGroupOrder = 1 + // Timestamp when the group was last seen in the given time window + // in descending order. + ErrorGroupOrder_LAST_SEEN_DESC ErrorGroupOrder = 2 + // Timestamp when the group was created in descending order. + ErrorGroupOrder_CREATED_DESC ErrorGroupOrder = 3 + // Number of affected users in the given time window in descending order. + ErrorGroupOrder_AFFECTED_USERS_DESC ErrorGroupOrder = 4 +) + +var ErrorGroupOrder_name = map[int32]string{ + 0: "GROUP_ORDER_UNSPECIFIED", + 1: "COUNT_DESC", + 2: "LAST_SEEN_DESC", + 3: "CREATED_DESC", + 4: "AFFECTED_USERS_DESC", +} +var ErrorGroupOrder_value = map[string]int32{ + "GROUP_ORDER_UNSPECIFIED": 0, + "COUNT_DESC": 1, + "LAST_SEEN_DESC": 2, + "CREATED_DESC": 3, + "AFFECTED_USERS_DESC": 4, +} + +func (x ErrorGroupOrder) String() string { + return proto.EnumName(ErrorGroupOrder_name, int32(x)) +} +func (ErrorGroupOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +// The supported time ranges. +type QueryTimeRange_Period int32 + +const ( + // Do not use. + QueryTimeRange_PERIOD_UNSPECIFIED QueryTimeRange_Period = 0 + // Retrieve data for the last hour. + // Recommended minimum timed count duration: 1 min. + QueryTimeRange_PERIOD_1_HOUR QueryTimeRange_Period = 1 + // Retrieve data for the last 6 hours. + // Recommended minimum timed count duration: 10 min. + QueryTimeRange_PERIOD_6_HOURS QueryTimeRange_Period = 2 + // Retrieve data for the last day. + // Recommended minimum timed count duration: 1 hour. + QueryTimeRange_PERIOD_1_DAY QueryTimeRange_Period = 3 + // Retrieve data for the last week. + // Recommended minimum timed count duration: 6 hours. + QueryTimeRange_PERIOD_1_WEEK QueryTimeRange_Period = 4 + // Retrieve data for the last 30 days. + // Recommended minimum timed count duration: 1 day. + QueryTimeRange_PERIOD_30_DAYS QueryTimeRange_Period = 5 +) + +var QueryTimeRange_Period_name = map[int32]string{ + 0: "PERIOD_UNSPECIFIED", + 1: "PERIOD_1_HOUR", + 2: "PERIOD_6_HOURS", + 3: "PERIOD_1_DAY", + 4: "PERIOD_1_WEEK", + 5: "PERIOD_30_DAYS", +} +var QueryTimeRange_Period_value = map[string]int32{ + "PERIOD_UNSPECIFIED": 0, + "PERIOD_1_HOUR": 1, + "PERIOD_6_HOURS": 2, + "PERIOD_1_DAY": 3, + "PERIOD_1_WEEK": 4, + "PERIOD_30_DAYS": 5, +} + +func (x QueryTimeRange_Period) String() string { + return proto.EnumName(QueryTimeRange_Period_name, int32(x)) +} +func (QueryTimeRange_Period) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{6, 0} } + +// Specifies a set of `ErrorGroupStats` to return. +type ListGroupStatsRequest struct { + // [Required] The resource name of the Google Cloud Platform project. Written + // as projects/ plus the + // Google Cloud + // Platform project ID. + // + // Example: projects/my-project-123. + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"` + // [Optional] List all ErrorGroupStats with these IDs. + GroupId []string `protobuf:"bytes,2,rep,name=group_id,json=groupId" json:"group_id,omitempty"` + // [Optional] List only ErrorGroupStats which belong to a service + // context that matches the filter. + // Data for all service contexts is returned if this field is not specified. + ServiceFilter *ServiceContextFilter `protobuf:"bytes,3,opt,name=service_filter,json=serviceFilter" json:"service_filter,omitempty"` + // [Optional] List data for the given time range. + // If not set a default time range is used. The field time_range_begin + // in the response will specify the beginning of this time range. + // Only ErrorGroupStats with a non-zero count in the given time + // range are returned, unless the request contains an explicit group_id list. + // If a group_id list is given, also ErrorGroupStats with zero + // occurrences are returned. + TimeRange *QueryTimeRange `protobuf:"bytes,5,opt,name=time_range,json=timeRange" json:"time_range,omitempty"` + // [Optional] The preferred duration for a single returned `TimedCount`. + // If not set, no timed counts are returned. + TimedCountDuration *google_protobuf2.Duration `protobuf:"bytes,6,opt,name=timed_count_duration,json=timedCountDuration" json:"timed_count_duration,omitempty"` + // [Optional] The alignment of the timed counts to be returned. + // Default is `ALIGNMENT_EQUAL_AT_END`. + Alignment TimedCountAlignment `protobuf:"varint,7,opt,name=alignment,enum=google.devtools.clouderrorreporting.v1beta1.TimedCountAlignment" json:"alignment,omitempty"` + // [Optional] Time where the timed counts shall be aligned if rounded + // alignment is chosen. Default is 00:00 UTC. + AlignmentTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=alignment_time,json=alignmentTime" json:"alignment_time,omitempty"` + // [Optional] The sort order in which the results are returned. + // Default is `COUNT_DESC`. + Order ErrorGroupOrder `protobuf:"varint,9,opt,name=order,enum=google.devtools.clouderrorreporting.v1beta1.ErrorGroupOrder" json:"order,omitempty"` + // [Optional] The maximum number of results to return per response. + // Default is 20. + PageSize int32 `protobuf:"varint,11,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // [Optional] A `next_page_token` provided by a previous response. To view + // additional results, pass this token along with the identical query + // parameters as the first request. + PageToken string `protobuf:"bytes,12,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListGroupStatsRequest) Reset() { *m = ListGroupStatsRequest{} } +func (m *ListGroupStatsRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupStatsRequest) ProtoMessage() {} +func (*ListGroupStatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *ListGroupStatsRequest) GetProjectName() string { + if m != nil { + return m.ProjectName + } + return "" +} + +func (m *ListGroupStatsRequest) GetGroupId() []string { + if m != nil { + return m.GroupId + } + return nil +} + +func (m *ListGroupStatsRequest) GetServiceFilter() *ServiceContextFilter { + if m != nil { + return m.ServiceFilter + } + return nil +} + +func (m *ListGroupStatsRequest) GetTimeRange() *QueryTimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *ListGroupStatsRequest) GetTimedCountDuration() *google_protobuf2.Duration { + if m != nil { + return m.TimedCountDuration + } + return nil +} + +func (m *ListGroupStatsRequest) GetAlignment() TimedCountAlignment { + if m != nil { + return m.Alignment + } + return TimedCountAlignment_ERROR_COUNT_ALIGNMENT_UNSPECIFIED +} + +func (m *ListGroupStatsRequest) GetAlignmentTime() *google_protobuf1.Timestamp { + if m != nil { + return m.AlignmentTime + } + return nil +} + +func (m *ListGroupStatsRequest) GetOrder() ErrorGroupOrder { + if m != nil { + return m.Order + } + return ErrorGroupOrder_GROUP_ORDER_UNSPECIFIED +} + +func (m *ListGroupStatsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupStatsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Contains a set of requested error group stats. +type ListGroupStatsResponse struct { + // The error group stats which match the given request. + ErrorGroupStats []*ErrorGroupStats `protobuf:"bytes,1,rep,name=error_group_stats,json=errorGroupStats" json:"error_group_stats,omitempty"` + // If non-empty, more results are available. + // Pass this token, along with the same query parameters as the first + // request, to view the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` + // The timestamp specifies the start time to which the request was restricted. + // The start time is set based on the requested time range. It may be adjusted + // to a later time if a project has exceeded the storage quota and older data + // has been deleted. + TimeRangeBegin *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=time_range_begin,json=timeRangeBegin" json:"time_range_begin,omitempty"` +} + +func (m *ListGroupStatsResponse) Reset() { *m = ListGroupStatsResponse{} } +func (m *ListGroupStatsResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupStatsResponse) ProtoMessage() {} +func (*ListGroupStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListGroupStatsResponse) GetErrorGroupStats() []*ErrorGroupStats { + if m != nil { + return m.ErrorGroupStats + } + return nil +} + +func (m *ListGroupStatsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListGroupStatsResponse) GetTimeRangeBegin() *google_protobuf1.Timestamp { + if m != nil { + return m.TimeRangeBegin + } + return nil +} + +// Data extracted for a specific group based on certain filter criteria, +// such as a given time period and/or service filter. +type ErrorGroupStats struct { + // Group data that is independent of the filter criteria. + Group *ErrorGroup `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` + // Approximate total number of events in the given group that match + // the filter criteria. + Count int64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + // Approximate number of affected users in the given group that + // match the filter criteria. + // Users are distinguished by data in the `ErrorContext` of the + // individual error events, such as their login name or their remote + // IP address in case of HTTP requests. + // The number of affected users can be zero even if the number of + // errors is non-zero if no data was provided from which the + // affected user could be deduced. + // Users are counted based on data in the request + // context that was provided in the error report. If more users are + // implicitly affected, such as due to a crash of the whole service, + // this is not reflected here. + AffectedUsersCount int64 `protobuf:"varint,3,opt,name=affected_users_count,json=affectedUsersCount" json:"affected_users_count,omitempty"` + // Approximate number of occurrences over time. + // Timed counts returned by ListGroups are guaranteed to be: + // + // - Inside the requested time interval + // - Non-overlapping, and + // - Ordered by ascending time. + TimedCounts []*TimedCount `protobuf:"bytes,4,rep,name=timed_counts,json=timedCounts" json:"timed_counts,omitempty"` + // Approximate first occurrence that was ever seen for this group + // and which matches the given filter criteria, ignoring the + // time_range that was specified in the request. + FirstSeenTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=first_seen_time,json=firstSeenTime" json:"first_seen_time,omitempty"` + // Approximate last occurrence that was ever seen for this group and + // which matches the given filter criteria, ignoring the time_range + // that was specified in the request. + LastSeenTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=last_seen_time,json=lastSeenTime" json:"last_seen_time,omitempty"` + // Service contexts with a non-zero error count for the given filter + // criteria. This list can be truncated if multiple services are affected. + // Refer to `num_affected_services` for the total count. + AffectedServices []*ServiceContext `protobuf:"bytes,7,rep,name=affected_services,json=affectedServices" json:"affected_services,omitempty"` + // The total number of services with a non-zero error count for the given + // filter criteria. + NumAffectedServices int32 `protobuf:"varint,8,opt,name=num_affected_services,json=numAffectedServices" json:"num_affected_services,omitempty"` + // An arbitrary event that is chosen as representative for the whole group. + // The representative event is intended to be used as a quick preview for + // the whole group. Events in the group are usually sufficiently similar + // to each other such that showing an arbitrary representative provides + // insight into the characteristics of the group as a whole. + Representative *ErrorEvent `protobuf:"bytes,9,opt,name=representative" json:"representative,omitempty"` +} + +func (m *ErrorGroupStats) Reset() { *m = ErrorGroupStats{} } +func (m *ErrorGroupStats) String() string { return proto.CompactTextString(m) } +func (*ErrorGroupStats) ProtoMessage() {} +func (*ErrorGroupStats) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *ErrorGroupStats) GetGroup() *ErrorGroup { + if m != nil { + return m.Group + } + return nil +} + +func (m *ErrorGroupStats) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *ErrorGroupStats) GetAffectedUsersCount() int64 { + if m != nil { + return m.AffectedUsersCount + } + return 0 +} + +func (m *ErrorGroupStats) GetTimedCounts() []*TimedCount { + if m != nil { + return m.TimedCounts + } + return nil +} + +func (m *ErrorGroupStats) GetFirstSeenTime() *google_protobuf1.Timestamp { + if m != nil { + return m.FirstSeenTime + } + return nil +} + +func (m *ErrorGroupStats) GetLastSeenTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastSeenTime + } + return nil +} + +func (m *ErrorGroupStats) GetAffectedServices() []*ServiceContext { + if m != nil { + return m.AffectedServices + } + return nil +} + +func (m *ErrorGroupStats) GetNumAffectedServices() int32 { + if m != nil { + return m.NumAffectedServices + } + return 0 +} + +func (m *ErrorGroupStats) GetRepresentative() *ErrorEvent { + if m != nil { + return m.Representative + } + return nil +} + +// The number of errors in a given time period. +// All numbers are approximate since the error events are sampled +// before counting them. +type TimedCount struct { + // Approximate number of occurrences in the given time period. + Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + // Start of the time period to which `count` refers (included). + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // End of the time period to which `count` refers (excluded). + EndTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *TimedCount) Reset() { *m = TimedCount{} } +func (m *TimedCount) String() string { return proto.CompactTextString(m) } +func (*TimedCount) ProtoMessage() {} +func (*TimedCount) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *TimedCount) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *TimedCount) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *TimedCount) GetEndTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +// Specifies a set of error events to return. +type ListEventsRequest struct { + // [Required] The resource name of the Google Cloud Platform project. Written + // as `projects/` plus the + // [Google Cloud Platform project + // ID](https://support.google.com/cloud/answer/6158840). + // Example: `projects/my-project-123`. + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"` + // [Required] The group for which events shall be returned. + GroupId string `protobuf:"bytes,2,opt,name=group_id,json=groupId" json:"group_id,omitempty"` + // [Optional] List only ErrorGroups which belong to a service context that + // matches the filter. + // Data for all service contexts is returned if this field is not specified. + ServiceFilter *ServiceContextFilter `protobuf:"bytes,3,opt,name=service_filter,json=serviceFilter" json:"service_filter,omitempty"` + // [Optional] List only data for the given time range. + // If not set a default time range is used. The field time_range_begin + // in the response will specify the beginning of this time range. + TimeRange *QueryTimeRange `protobuf:"bytes,4,opt,name=time_range,json=timeRange" json:"time_range,omitempty"` + // [Optional] The maximum number of results to return per response. + PageSize int32 `protobuf:"varint,6,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // [Optional] A `next_page_token` provided by a previous response. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListEventsRequest) Reset() { *m = ListEventsRequest{} } +func (m *ListEventsRequest) String() string { return proto.CompactTextString(m) } +func (*ListEventsRequest) ProtoMessage() {} +func (*ListEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *ListEventsRequest) GetProjectName() string { + if m != nil { + return m.ProjectName + } + return "" +} + +func (m *ListEventsRequest) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *ListEventsRequest) GetServiceFilter() *ServiceContextFilter { + if m != nil { + return m.ServiceFilter + } + return nil +} + +func (m *ListEventsRequest) GetTimeRange() *QueryTimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *ListEventsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListEventsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Contains a set of requested error events. +type ListEventsResponse struct { + // The error events which match the given request. + ErrorEvents []*ErrorEvent `protobuf:"bytes,1,rep,name=error_events,json=errorEvents" json:"error_events,omitempty"` + // If non-empty, more results are available. + // Pass this token, along with the same query parameters as the first + // request, to view the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` + // The timestamp specifies the start time to which the request was restricted. + TimeRangeBegin *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=time_range_begin,json=timeRangeBegin" json:"time_range_begin,omitempty"` +} + +func (m *ListEventsResponse) Reset() { *m = ListEventsResponse{} } +func (m *ListEventsResponse) String() string { return proto.CompactTextString(m) } +func (*ListEventsResponse) ProtoMessage() {} +func (*ListEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *ListEventsResponse) GetErrorEvents() []*ErrorEvent { + if m != nil { + return m.ErrorEvents + } + return nil +} + +func (m *ListEventsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListEventsResponse) GetTimeRangeBegin() *google_protobuf1.Timestamp { + if m != nil { + return m.TimeRangeBegin + } + return nil +} + +// Requests might be rejected or the resulting timed count durations might be +// adjusted for lower durations. +type QueryTimeRange struct { + // Restricts the query to the specified time range. + Period QueryTimeRange_Period `protobuf:"varint,1,opt,name=period,enum=google.devtools.clouderrorreporting.v1beta1.QueryTimeRange_Period" json:"period,omitempty"` +} + +func (m *QueryTimeRange) Reset() { *m = QueryTimeRange{} } +func (m *QueryTimeRange) String() string { return proto.CompactTextString(m) } +func (*QueryTimeRange) ProtoMessage() {} +func (*QueryTimeRange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *QueryTimeRange) GetPeriod() QueryTimeRange_Period { + if m != nil { + return m.Period + } + return QueryTimeRange_PERIOD_UNSPECIFIED +} + +// Specifies criteria for filtering a subset of service contexts. +// The fields in the filter correspond to the fields in `ServiceContext`. +// Only exact, case-sensitive matches are supported. +// If a field is unset or empty, it matches arbitrary values. +type ServiceContextFilter struct { + // [Optional] The exact value to match against + // [`ServiceContext.service`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.service). + Service string `protobuf:"bytes,2,opt,name=service" json:"service,omitempty"` + // [Optional] The exact value to match against + // [`ServiceContext.version`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.version). + Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"` + // [Optional] The exact value to match against + // [`ServiceContext.resource_type`](/error-reporting/reference/rest/v1beta1/ServiceContext#FIELDS.resource_type). + ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType" json:"resource_type,omitempty"` +} + +func (m *ServiceContextFilter) Reset() { *m = ServiceContextFilter{} } +func (m *ServiceContextFilter) String() string { return proto.CompactTextString(m) } +func (*ServiceContextFilter) ProtoMessage() {} +func (*ServiceContextFilter) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *ServiceContextFilter) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func (m *ServiceContextFilter) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ServiceContextFilter) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +// Deletes all events in the project. +type DeleteEventsRequest struct { + // [Required] The resource name of the Google Cloud Platform project. Written + // as `projects/` plus the + // [Google Cloud Platform project + // ID](https://support.google.com/cloud/answer/6158840). + // Example: `projects/my-project-123`. + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"` +} + +func (m *DeleteEventsRequest) Reset() { *m = DeleteEventsRequest{} } +func (m *DeleteEventsRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteEventsRequest) ProtoMessage() {} +func (*DeleteEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *DeleteEventsRequest) GetProjectName() string { + if m != nil { + return m.ProjectName + } + return "" +} + +// Response message for deleting error events. +type DeleteEventsResponse struct { +} + +func (m *DeleteEventsResponse) Reset() { *m = DeleteEventsResponse{} } +func (m *DeleteEventsResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteEventsResponse) ProtoMessage() {} +func (*DeleteEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func init() { + proto.RegisterType((*ListGroupStatsRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ListGroupStatsRequest") + proto.RegisterType((*ListGroupStatsResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ListGroupStatsResponse") + proto.RegisterType((*ErrorGroupStats)(nil), "google.devtools.clouderrorreporting.v1beta1.ErrorGroupStats") + proto.RegisterType((*TimedCount)(nil), "google.devtools.clouderrorreporting.v1beta1.TimedCount") + proto.RegisterType((*ListEventsRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ListEventsRequest") + proto.RegisterType((*ListEventsResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ListEventsResponse") + proto.RegisterType((*QueryTimeRange)(nil), "google.devtools.clouderrorreporting.v1beta1.QueryTimeRange") + proto.RegisterType((*ServiceContextFilter)(nil), "google.devtools.clouderrorreporting.v1beta1.ServiceContextFilter") + proto.RegisterType((*DeleteEventsRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.DeleteEventsRequest") + proto.RegisterType((*DeleteEventsResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.DeleteEventsResponse") + proto.RegisterEnum("google.devtools.clouderrorreporting.v1beta1.TimedCountAlignment", TimedCountAlignment_name, TimedCountAlignment_value) + proto.RegisterEnum("google.devtools.clouderrorreporting.v1beta1.ErrorGroupOrder", ErrorGroupOrder_name, ErrorGroupOrder_value) + proto.RegisterEnum("google.devtools.clouderrorreporting.v1beta1.QueryTimeRange_Period", QueryTimeRange_Period_name, QueryTimeRange_Period_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ErrorStatsService service + +type ErrorStatsServiceClient interface { + // Lists the specified groups. + ListGroupStats(ctx context.Context, in *ListGroupStatsRequest, opts ...grpc.CallOption) (*ListGroupStatsResponse, error) + // Lists the specified events. + ListEvents(ctx context.Context, in *ListEventsRequest, opts ...grpc.CallOption) (*ListEventsResponse, error) + // Deletes all error events of a given project. + DeleteEvents(ctx context.Context, in *DeleteEventsRequest, opts ...grpc.CallOption) (*DeleteEventsResponse, error) +} + +type errorStatsServiceClient struct { + cc *grpc.ClientConn +} + +func NewErrorStatsServiceClient(cc *grpc.ClientConn) ErrorStatsServiceClient { + return &errorStatsServiceClient{cc} +} + +func (c *errorStatsServiceClient) ListGroupStats(ctx context.Context, in *ListGroupStatsRequest, opts ...grpc.CallOption) (*ListGroupStatsResponse, error) { + out := new(ListGroupStatsResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListGroupStats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *errorStatsServiceClient) ListEvents(ctx context.Context, in *ListEventsRequest, opts ...grpc.CallOption) (*ListEventsResponse, error) { + out := new(ListEventsResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListEvents", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *errorStatsServiceClient) DeleteEvents(ctx context.Context, in *DeleteEventsRequest, opts ...grpc.CallOption) (*DeleteEventsResponse, error) { + out := new(DeleteEventsResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/DeleteEvents", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ErrorStatsService service + +type ErrorStatsServiceServer interface { + // Lists the specified groups. + ListGroupStats(context.Context, *ListGroupStatsRequest) (*ListGroupStatsResponse, error) + // Lists the specified events. + ListEvents(context.Context, *ListEventsRequest) (*ListEventsResponse, error) + // Deletes all error events of a given project. + DeleteEvents(context.Context, *DeleteEventsRequest) (*DeleteEventsResponse, error) +} + +func RegisterErrorStatsServiceServer(s *grpc.Server, srv ErrorStatsServiceServer) { + s.RegisterService(&_ErrorStatsService_serviceDesc, srv) +} + +func _ErrorStatsService_ListGroupStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ErrorStatsServiceServer).ListGroupStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListGroupStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ErrorStatsServiceServer).ListGroupStats(ctx, req.(*ListGroupStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ErrorStatsService_ListEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEventsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ErrorStatsServiceServer).ListEvents(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListEvents", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ErrorStatsServiceServer).ListEvents(ctx, req.(*ListEventsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ErrorStatsService_DeleteEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteEventsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ErrorStatsServiceServer).DeleteEvents(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/DeleteEvents", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ErrorStatsServiceServer).DeleteEvents(ctx, req.(*DeleteEventsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ErrorStatsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.clouderrorreporting.v1beta1.ErrorStatsService", + HandlerType: (*ErrorStatsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroupStats", + Handler: _ErrorStatsService_ListGroupStats_Handler, + }, + { + MethodName: "ListEvents", + Handler: _ErrorStatsService_ListEvents_Handler, + }, + { + MethodName: "DeleteEvents", + Handler: _ErrorStatsService_DeleteEvents_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto", +} + +func init() { + proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/error_stats_service.proto", fileDescriptor2) +} + +var fileDescriptor2 = []byte{ + // 1312 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4b, 0x6f, 0x1b, 0x55, + 0x14, 0x66, 0xec, 0x38, 0x89, 0x4f, 0x1c, 0xc7, 0xb9, 0x49, 0xd3, 0xa9, 0xcb, 0x23, 0x75, 0x05, + 0x0a, 0xa9, 0xb0, 0x9b, 0x54, 0xa5, 0x45, 0xe5, 0x51, 0xc7, 0x9e, 0x84, 0xa8, 0xa9, 0xed, 0x5e, + 0xdb, 0x20, 0xb2, 0xe8, 0x68, 0x62, 0x9f, 0x4c, 0x07, 0xec, 0x99, 0x61, 0xe6, 0x3a, 0x6a, 0x8b, + 0x2a, 0x21, 0x76, 0xac, 0x61, 0xc7, 0x3f, 0xe0, 0x57, 0xb0, 0x62, 0x0d, 0xea, 0x9e, 0x15, 0x7b, + 0x10, 0xbf, 0x00, 0xdd, 0xc7, 0xf8, 0xd5, 0x88, 0xd4, 0x0e, 0x42, 0xec, 0xe6, 0x9e, 0x73, 0xcf, + 0x77, 0x1e, 0xf7, 0x3b, 0xe7, 0xde, 0x01, 0xc3, 0xf6, 0x3c, 0xbb, 0x83, 0x85, 0x36, 0x9e, 0x30, + 0xcf, 0xeb, 0x84, 0x85, 0x56, 0xc7, 0xeb, 0xb5, 0x31, 0x08, 0xbc, 0x20, 0x40, 0xdf, 0x0b, 0x98, + 0xe3, 0xda, 0x85, 0x93, 0xad, 0x23, 0x64, 0xd6, 0x56, 0x41, 0x88, 0xcd, 0x90, 0x59, 0x2c, 0x34, + 0x43, 0x0c, 0x4e, 0x9c, 0x16, 0xe6, 0xfd, 0xc0, 0x63, 0x1e, 0xb9, 0x26, 0x61, 0xf2, 0x11, 0x4c, + 0xfe, 0x14, 0x98, 0xbc, 0x82, 0xc9, 0xbe, 0xaa, 0x7c, 0x5a, 0xbe, 0x53, 0xb0, 0x5c, 0xd7, 0x63, + 0x16, 0x73, 0x3c, 0x37, 0x94, 0x50, 0xd9, 0xdb, 0x93, 0x44, 0xd4, 0xf2, 0xba, 0x5d, 0xcf, 0x55, + 0x96, 0xaf, 0x2b, 0x4b, 0xb1, 0x3a, 0xea, 0x1d, 0x17, 0xda, 0xbd, 0x40, 0x40, 0x2b, 0xfd, 0x1b, + 0xe3, 0x7a, 0xe6, 0x74, 0x31, 0x64, 0x56, 0xd7, 0x97, 0x1b, 0x72, 0x3f, 0x24, 0xe0, 0xc2, 0x81, + 0x13, 0xb2, 0xbd, 0xc0, 0xeb, 0xf9, 0x75, 0x9e, 0x26, 0xc5, 0x2f, 0x7b, 0x18, 0x32, 0x72, 0x05, + 0x52, 0x7e, 0xe0, 0x7d, 0x8e, 0x2d, 0x66, 0xba, 0x56, 0x17, 0x75, 0x6d, 0x5d, 0xdb, 0x48, 0xd2, + 0x05, 0x25, 0xab, 0x58, 0x5d, 0x24, 0x97, 0x60, 0xde, 0xe6, 0x76, 0xa6, 0xd3, 0xd6, 0x63, 0xeb, + 0xf1, 0x8d, 0x24, 0x9d, 0x13, 0xeb, 0xfd, 0x36, 0x79, 0x04, 0x69, 0x55, 0x2e, 0xf3, 0xd8, 0xe9, + 0x30, 0x0c, 0xf4, 0xf8, 0xba, 0xb6, 0xb1, 0xb0, 0x5d, 0xcc, 0x4f, 0x50, 0xb6, 0x7c, 0x5d, 0x42, + 0x94, 0x3c, 0x97, 0xe1, 0x63, 0xb6, 0x2b, 0x80, 0xe8, 0xa2, 0x02, 0x96, 0x4b, 0x72, 0x08, 0xc0, + 0x93, 0x32, 0x03, 0xcb, 0xb5, 0x51, 0x4f, 0x08, 0x2f, 0x77, 0x26, 0xf2, 0xf2, 0xa0, 0x87, 0xc1, + 0x93, 0x86, 0xd3, 0x45, 0xca, 0x21, 0x68, 0x92, 0x45, 0x9f, 0xe4, 0x1e, 0xac, 0xf2, 0x45, 0xdb, + 0x6c, 0x79, 0x3d, 0x97, 0x99, 0x51, 0x71, 0xf5, 0x59, 0xe1, 0xe5, 0x52, 0xe4, 0x25, 0xaa, 0x6e, + 0xbe, 0xac, 0x36, 0x50, 0x22, 0xcc, 0x4a, 0xdc, 0x2a, 0x92, 0x91, 0x87, 0x90, 0xb4, 0x3a, 0x8e, + 0xed, 0x76, 0xd1, 0x65, 0xfa, 0xdc, 0xba, 0xb6, 0x91, 0xde, 0xbe, 0x3b, 0x51, 0x9c, 0x8d, 0x3e, + 0x66, 0x31, 0xc2, 0xa1, 0x03, 0x48, 0x52, 0x84, 0x74, 0x7f, 0x61, 0x72, 0xff, 0xfa, 0xbc, 0x08, + 0x33, 0xfb, 0x42, 0x98, 0x8d, 0x88, 0x04, 0x74, 0xb1, 0x6f, 0xc1, 0x65, 0x84, 0x42, 0xc2, 0x0b, + 0xda, 0x18, 0xe8, 0x49, 0x11, 0xde, 0xfb, 0x13, 0x85, 0x67, 0x70, 0xb1, 0xe0, 0x51, 0x95, 0x63, + 0x50, 0x09, 0x45, 0x2e, 0x43, 0xd2, 0xb7, 0x6c, 0x34, 0x43, 0xe7, 0x29, 0xea, 0x0b, 0xeb, 0xda, + 0x46, 0x82, 0xce, 0x73, 0x41, 0xdd, 0x79, 0x8a, 0xe4, 0x35, 0x00, 0xa1, 0x64, 0xde, 0x17, 0xe8, + 0xea, 0x29, 0x41, 0x31, 0xb1, 0xbd, 0xc1, 0x05, 0xb9, 0x3f, 0x35, 0x58, 0x1b, 0x67, 0x67, 0xe8, + 0x7b, 0x6e, 0x88, 0xe4, 0x11, 0x2c, 0xcb, 0xde, 0x94, 0x0c, 0x14, 0x1d, 0xaa, 0x6b, 0xeb, 0xf1, + 0x8d, 0x85, 0xa9, 0xc3, 0x96, 0x0e, 0x96, 0x70, 0x54, 0x40, 0xde, 0x82, 0x25, 0x17, 0x1f, 0x33, + 0x73, 0x28, 0xd0, 0x98, 0x08, 0x74, 0x91, 0x8b, 0x6b, 0x51, 0xb0, 0xa4, 0x0c, 0x99, 0x01, 0x11, + 0xcd, 0x23, 0xb4, 0x1d, 0x57, 0x9f, 0x39, 0xf3, 0x04, 0xd2, 0x7d, 0xb6, 0xed, 0x70, 0x8b, 0xdc, + 0xb7, 0x09, 0x58, 0x1a, 0x0b, 0x89, 0xdc, 0x87, 0x84, 0xc8, 0x52, 0xf4, 0xe0, 0xc2, 0xf6, 0xad, + 0x29, 0xf3, 0xa3, 0x12, 0x85, 0xac, 0x42, 0x42, 0xf0, 0x59, 0xa4, 0x11, 0xa7, 0x72, 0x41, 0xae, + 0xc3, 0xaa, 0x75, 0x7c, 0x8c, 0x2d, 0x86, 0x6d, 0xb3, 0x17, 0x62, 0x10, 0x4a, 0xd2, 0x8b, 0xbe, + 0x8d, 0x53, 0x12, 0xe9, 0x9a, 0x5c, 0x25, 0x48, 0x48, 0x0e, 0x21, 0x35, 0xd4, 0x1d, 0xa1, 0x3e, + 0x23, 0xaa, 0x7f, 0x6b, 0x4a, 0x4e, 0xd3, 0x85, 0x41, 0xcf, 0x84, 0x64, 0x07, 0x96, 0x8e, 0x9d, + 0x20, 0x64, 0x66, 0x88, 0xe8, 0x4a, 0x36, 0x27, 0xce, 0x66, 0xb3, 0x30, 0xa9, 0x23, 0xba, 0x82, + 0xcd, 0x77, 0x21, 0xdd, 0xb1, 0x46, 0x20, 0x66, 0xcf, 0x84, 0x48, 0x71, 0x8b, 0x3e, 0xc2, 0x23, + 0x58, 0xee, 0xd7, 0x44, 0x4d, 0x9d, 0x50, 0x9f, 0x13, 0x69, 0xde, 0x39, 0xc7, 0x20, 0xa3, 0x99, + 0x08, 0x55, 0xc9, 0x43, 0xb2, 0x0d, 0x17, 0xdc, 0x5e, 0xd7, 0x7c, 0xd1, 0xdb, 0xbc, 0xe8, 0x98, + 0x15, 0xb7, 0xd7, 0x2d, 0x8e, 0xdb, 0x98, 0x90, 0x0e, 0xd0, 0x0f, 0x30, 0x44, 0x97, 0xdf, 0x27, + 0x27, 0x28, 0xda, 0x76, 0x2a, 0x7e, 0x18, 0x27, 0x7c, 0x98, 0x8c, 0xc1, 0xe5, 0xbe, 0xd7, 0x00, + 0x06, 0x07, 0x34, 0xe0, 0x8d, 0x36, 0xcc, 0x9b, 0xf7, 0x00, 0x42, 0x66, 0x05, 0x6a, 0xe4, 0xc4, + 0xce, 0xac, 0x70, 0x52, 0xec, 0x16, 0xe5, 0xbd, 0x09, 0xf3, 0xe8, 0xb6, 0xa5, 0x61, 0xfc, 0x4c, + 0xc3, 0x39, 0x74, 0xdb, 0x7c, 0x95, 0x7b, 0x1e, 0x83, 0x65, 0x3e, 0x15, 0x44, 0xd0, 0xd3, 0xdf, + 0x57, 0xda, 0xff, 0xe1, 0xbe, 0x9a, 0xf9, 0x57, 0xef, 0xab, 0x91, 0x59, 0x3b, 0xfb, 0x8f, 0xb3, + 0x76, 0x6e, 0x7c, 0xd6, 0xfe, 0xa6, 0x01, 0x19, 0xae, 0xaa, 0x9a, 0xb3, 0x87, 0x90, 0x92, 0x73, + 0x16, 0x85, 0x5c, 0x8d, 0xd8, 0xa9, 0x29, 0xb6, 0x80, 0xfd, 0xef, 0xff, 0x7a, 0xb2, 0xfe, 0xa1, + 0x41, 0x7a, 0xb4, 0x74, 0xe4, 0x10, 0x66, 0x7d, 0x0c, 0x1c, 0xaf, 0x2d, 0xd8, 0x92, 0xde, 0xde, + 0x39, 0xc7, 0x39, 0xe4, 0x6b, 0x02, 0x89, 0x2a, 0xc4, 0xdc, 0xd7, 0x1a, 0xcc, 0x4a, 0x11, 0x59, + 0x03, 0x52, 0x33, 0xe8, 0x7e, 0xb5, 0x6c, 0x36, 0x2b, 0xf5, 0x9a, 0x51, 0xda, 0xdf, 0xdd, 0x37, + 0xca, 0x99, 0x57, 0xc8, 0x32, 0x2c, 0x2a, 0xf9, 0x96, 0xf9, 0x71, 0xb5, 0x49, 0x33, 0x1a, 0x21, + 0x90, 0x56, 0xa2, 0x77, 0x85, 0xa8, 0x9e, 0x89, 0x91, 0x0c, 0xa4, 0xfa, 0xdb, 0xca, 0xc5, 0xcf, + 0x32, 0xf1, 0x11, 0xc3, 0x4f, 0x0d, 0xe3, 0x5e, 0x66, 0x66, 0xc8, 0xf0, 0xc6, 0x75, 0xbe, 0xab, + 0x9e, 0x49, 0xe4, 0x3c, 0x58, 0x3d, 0x8d, 0x91, 0x44, 0x87, 0x39, 0xc5, 0xc9, 0xa8, 0x0d, 0xd4, + 0x92, 0x6b, 0x4e, 0x30, 0x08, 0xf9, 0x1b, 0x27, 0x2e, 0x35, 0x6a, 0x49, 0xae, 0xc2, 0x62, 0x80, + 0xa1, 0xd7, 0x0b, 0x5a, 0x68, 0xb2, 0x27, 0xbe, 0x64, 0x6e, 0x92, 0xa6, 0x22, 0x61, 0xe3, 0x89, + 0x8f, 0xb9, 0xdb, 0xb0, 0x52, 0xc6, 0x0e, 0x32, 0x9c, 0xb4, 0x35, 0x73, 0x6b, 0xb0, 0x3a, 0x6a, + 0x29, 0xe9, 0xb7, 0xd9, 0x83, 0x95, 0x53, 0x9e, 0x3d, 0xe4, 0x4d, 0xb8, 0x62, 0x50, 0x5a, 0xa5, + 0x66, 0xa9, 0xda, 0xac, 0x34, 0xcc, 0xe2, 0xc1, 0xfe, 0x5e, 0xe5, 0xbe, 0x51, 0x69, 0x8c, 0x15, + 0xf8, 0x32, 0x5c, 0x1c, 0xa8, 0x8c, 0x07, 0xcd, 0xe2, 0x81, 0x49, 0xab, 0xcd, 0x4a, 0xd9, 0x28, + 0x67, 0x34, 0x92, 0x85, 0xb5, 0x71, 0x65, 0xb1, 0x61, 0x1a, 0x95, 0x72, 0x26, 0xb6, 0xf9, 0x6c, + 0xf8, 0x12, 0xae, 0xaa, 0x77, 0xcc, 0xc5, 0x3d, 0x5a, 0x6d, 0xd6, 0xcc, 0x2a, 0x2d, 0x1b, 0x74, + 0xcc, 0x51, 0x1a, 0x40, 0x46, 0x52, 0x36, 0xea, 0x25, 0x79, 0x8c, 0x07, 0xc5, 0x7a, 0xc3, 0xac, + 0x1b, 0x46, 0x45, 0xca, 0xc4, 0x31, 0x96, 0xa8, 0x51, 0x6c, 0x18, 0x65, 0x29, 0x89, 0x93, 0x8b, + 0xb0, 0x52, 0xdc, 0xdd, 0x35, 0x4a, 0x5c, 0xd4, 0xac, 0x1b, 0xb4, 0x2e, 0x15, 0x33, 0xdb, 0x7f, + 0xcd, 0xc0, 0xb2, 0xf0, 0x2f, 0xee, 0x7f, 0x75, 0x86, 0xe4, 0x17, 0x0d, 0xd2, 0xa3, 0xaf, 0x21, + 0x32, 0x19, 0x61, 0x4f, 0x7d, 0xe8, 0x67, 0x4b, 0xe7, 0xc2, 0x90, 0xe7, 0x94, 0xbb, 0xf9, 0xcd, + 0xf3, 0xdf, 0xbf, 0x8b, 0x15, 0xc8, 0x3b, 0xfd, 0xff, 0x94, 0xaf, 0x86, 0x8f, 0xfc, 0x03, 0xb5, + 0x08, 0x0b, 0x9b, 0xcf, 0x0a, 0xf6, 0x20, 0xfe, 0x9f, 0x34, 0x80, 0xc1, 0xd0, 0x21, 0x1f, 0x4e, + 0x1c, 0xca, 0x08, 0xd1, 0xb2, 0x1f, 0x4d, 0x6d, 0xaf, 0xd2, 0xd8, 0x12, 0x69, 0x5c, 0x23, 0x6f, + 0xbf, 0x44, 0x1a, 0x72, 0x20, 0x92, 0x9f, 0x35, 0x48, 0x0d, 0x53, 0x97, 0x4c, 0xf6, 0xa8, 0x3f, + 0xa5, 0x5f, 0xb2, 0xc5, 0x73, 0x20, 0x8c, 0x26, 0xb2, 0xf9, 0xf2, 0x89, 0xec, 0xfc, 0xaa, 0x01, + 0xff, 0xb9, 0x9c, 0xc4, 0xf7, 0xce, 0xda, 0x0b, 0x2c, 0xad, 0xf1, 0x41, 0x5c, 0xd3, 0x0e, 0x1f, + 0x2a, 0x18, 0xdb, 0xeb, 0x58, 0xae, 0x9d, 0xf7, 0x02, 0xbb, 0x60, 0xa3, 0x2b, 0xc6, 0x74, 0x41, + 0xaa, 0x2c, 0xdf, 0x09, 0x5f, 0xea, 0x97, 0xf7, 0xce, 0x29, 0xba, 0x1f, 0x63, 0x57, 0xf7, 0xa4, + 0x83, 0x12, 0x57, 0xca, 0x2b, 0x86, 0xf6, 0xe3, 0xfb, 0x64, 0x6b, 0x87, 0x5b, 0x1e, 0xcd, 0x0a, + 0x87, 0x37, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x2a, 0x59, 0xfa, 0xf4, 0x0f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/report_errors_service.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/report_errors_service.pb.go new file mode 100644 index 000000000..cd709fd11 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1/report_errors_service.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto + +package clouderrorreporting + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A request for reporting an individual error event. +type ReportErrorEventRequest struct { + // [Required] The resource name of the Google Cloud Platform project. Written + // as `projects/` plus the + // [Google Cloud Platform project ID](https://support.google.com/cloud/answer/6158840). + // Example: `projects/my-project-123`. + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName" json:"project_name,omitempty"` + // [Required] The error event to be reported. + Event *ReportedErrorEvent `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"` +} + +func (m *ReportErrorEventRequest) Reset() { *m = ReportErrorEventRequest{} } +func (m *ReportErrorEventRequest) String() string { return proto.CompactTextString(m) } +func (*ReportErrorEventRequest) ProtoMessage() {} +func (*ReportErrorEventRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *ReportErrorEventRequest) GetProjectName() string { + if m != nil { + return m.ProjectName + } + return "" +} + +func (m *ReportErrorEventRequest) GetEvent() *ReportedErrorEvent { + if m != nil { + return m.Event + } + return nil +} + +// Response for reporting an individual error event. +// Data may be added to this message in the future. +type ReportErrorEventResponse struct { +} + +func (m *ReportErrorEventResponse) Reset() { *m = ReportErrorEventResponse{} } +func (m *ReportErrorEventResponse) String() string { return proto.CompactTextString(m) } +func (*ReportErrorEventResponse) ProtoMessage() {} +func (*ReportErrorEventResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +// An error event which is reported to the Error Reporting system. +type ReportedErrorEvent struct { + // [Optional] Time when the event occurred. + // If not provided, the time when the event was received by the + // Error Reporting system will be used. + EventTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=event_time,json=eventTime" json:"event_time,omitempty"` + // [Required] The service context in which this error has occurred. + ServiceContext *ServiceContext `protobuf:"bytes,2,opt,name=service_context,json=serviceContext" json:"service_context,omitempty"` + // [Required] A message describing the error. The message can contain an + // exception stack in one of the supported programming languages and formats. + // In that case, the message is parsed and detailed exception information + // is returned when retrieving the error event again. + Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + // [Optional] A description of the context in which the error occurred. + Context *ErrorContext `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` +} + +func (m *ReportedErrorEvent) Reset() { *m = ReportedErrorEvent{} } +func (m *ReportedErrorEvent) String() string { return proto.CompactTextString(m) } +func (*ReportedErrorEvent) ProtoMessage() {} +func (*ReportedErrorEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *ReportedErrorEvent) GetEventTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EventTime + } + return nil +} + +func (m *ReportedErrorEvent) GetServiceContext() *ServiceContext { + if m != nil { + return m.ServiceContext + } + return nil +} + +func (m *ReportedErrorEvent) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *ReportedErrorEvent) GetContext() *ErrorContext { + if m != nil { + return m.Context + } + return nil +} + +func init() { + proto.RegisterType((*ReportErrorEventRequest)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventRequest") + proto.RegisterType((*ReportErrorEventResponse)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportErrorEventResponse") + proto.RegisterType((*ReportedErrorEvent)(nil), "google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ReportErrorsService service + +type ReportErrorsServiceClient interface { + // Report an individual error event. + // + // This endpoint accepts either an OAuth token, + // or an + // API key + // for authentication. To use an API key, append it to the URL as the value of + // a `key` parameter. For example: + //
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
+ ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) +} + +type reportErrorsServiceClient struct { + cc *grpc.ClientConn +} + +func NewReportErrorsServiceClient(cc *grpc.ClientConn) ReportErrorsServiceClient { + return &reportErrorsServiceClient{cc} +} + +func (c *reportErrorsServiceClient) ReportErrorEvent(ctx context.Context, in *ReportErrorEventRequest, opts ...grpc.CallOption) (*ReportErrorEventResponse, error) { + out := new(ReportErrorEventResponse) + err := grpc.Invoke(ctx, "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ReportErrorsService service + +type ReportErrorsServiceServer interface { + // Report an individual error event. + // + // This endpoint accepts either an OAuth token, + // or an + // API key + // for authentication. To use an API key, append it to the URL as the value of + // a `key` parameter. For example: + //
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
+ ReportErrorEvent(context.Context, *ReportErrorEventRequest) (*ReportErrorEventResponse, error) +} + +func RegisterReportErrorsServiceServer(s *grpc.Server, srv ReportErrorsServiceServer) { + s.RegisterService(&_ReportErrorsService_serviceDesc, srv) +} + +func _ReportErrorsService_ReportErrorEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReportErrorEventRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.clouderrorreporting.v1beta1.ReportErrorsService/ReportErrorEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReportErrorsServiceServer).ReportErrorEvent(ctx, req.(*ReportErrorEventRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ReportErrorsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.clouderrorreporting.v1beta1.ReportErrorsService", + HandlerType: (*ReportErrorsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ReportErrorEvent", + Handler: _ReportErrorsService_ReportErrorEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", +} + +func init() { + proto.RegisterFile("google/devtools/clouderrorreporting/v1beta1/report_errors_service.proto", fileDescriptor3) +} + +var fileDescriptor3 = []byte{ + // 475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe5, 0xf0, 0x51, 0x75, 0x83, 0x00, 0x2d, 0x07, 0x2c, 0x0b, 0x89, 0x12, 0x2e, 0x15, + 0x48, 0x5e, 0x12, 0x2e, 0xa4, 0x15, 0xaa, 0x94, 0x12, 0xf5, 0x86, 0x2a, 0x17, 0x38, 0x70, 0xc0, + 0xda, 0x38, 0x83, 0x65, 0x14, 0xef, 0x98, 0xdd, 0x8d, 0x85, 0x84, 0xb8, 0xf0, 0x0a, 0x7d, 0x05, + 0x4e, 0xbc, 0x4e, 0x5f, 0x80, 0x03, 0x0f, 0xc1, 0x11, 0xed, 0x57, 0x14, 0x48, 0x0e, 0x98, 0x1e, + 0xc7, 0xe3, 0xf9, 0xfd, 0xff, 0xf3, 0xb1, 0xe4, 0xa4, 0x44, 0x2c, 0x17, 0xc0, 0xe6, 0xd0, 0x6a, + 0xc4, 0x85, 0x62, 0xc5, 0x02, 0x97, 0x73, 0x90, 0x12, 0xa5, 0x84, 0x06, 0xa5, 0xae, 0x44, 0xc9, + 0xda, 0xe1, 0x0c, 0x34, 0x1f, 0x32, 0xf7, 0x25, 0xb7, 0x59, 0x95, 0x2b, 0x90, 0x6d, 0x55, 0x40, + 0xda, 0x48, 0xd4, 0x48, 0x1f, 0x3b, 0x50, 0x1a, 0x40, 0xe9, 0x16, 0x50, 0xea, 0x41, 0xc9, 0x3d, + 0xaf, 0xca, 0x9b, 0x8a, 0x71, 0x21, 0x50, 0x73, 0x5d, 0xa1, 0x50, 0x0e, 0x95, 0x3c, 0xeb, 0xe2, + 0xa9, 0xc0, 0xba, 0x46, 0xe1, 0x2b, 0xef, 0xfb, 0x4a, 0x1b, 0xcd, 0x96, 0xef, 0x99, 0xae, 0x6a, + 0x50, 0x9a, 0xd7, 0x8d, 0xfb, 0x61, 0x70, 0x1e, 0x91, 0xbb, 0x99, 0x65, 0x4c, 0x0d, 0x6e, 0xda, + 0x82, 0xd0, 0x19, 0x7c, 0x5c, 0x82, 0xd2, 0xf4, 0x01, 0xb9, 0xd1, 0x48, 0xfc, 0x00, 0x85, 0xce, + 0x05, 0xaf, 0x21, 0x8e, 0xf6, 0xa2, 0xfd, 0xdd, 0xac, 0xef, 0xbf, 0xbd, 0xe4, 0x35, 0xd0, 0xd7, + 0xe4, 0x1a, 0x98, 0x92, 0xb8, 0xb7, 0x17, 0xed, 0xf7, 0x47, 0x47, 0x69, 0x87, 0xa6, 0x53, 0xa7, + 0x0b, 0xf3, 0x35, 0x65, 0x47, 0x1b, 0x24, 0x24, 0xde, 0x34, 0xa5, 0x1a, 0x14, 0x0a, 0x06, 0xdf, + 0x7a, 0x84, 0x6e, 0x56, 0xd2, 0x31, 0x21, 0xb6, 0x36, 0x37, 0x1d, 0x5a, 0xab, 0xfd, 0x51, 0x12, + 0xec, 0x84, 0xf6, 0xd3, 0x57, 0xa1, 0xfd, 0x6c, 0xd7, 0xfe, 0x6d, 0x62, 0x3a, 0x27, 0xb7, 0xfc, + 0xea, 0xf2, 0x02, 0x85, 0x86, 0x4f, 0xa1, 0x9d, 0xc3, 0x4e, 0xed, 0x9c, 0x39, 0xc6, 0xb1, 0x43, + 0x64, 0x37, 0xd5, 0x1f, 0x31, 0x8d, 0xc9, 0x4e, 0x0d, 0x4a, 0xf1, 0x12, 0xe2, 0x2b, 0x76, 0x90, + 0x21, 0xa4, 0x67, 0x64, 0x27, 0xe8, 0x5e, 0xb5, 0xba, 0xe3, 0x4e, 0xba, 0x76, 0x08, 0x41, 0x35, + 0x90, 0x46, 0xbf, 0x22, 0x72, 0x67, 0x6d, 0x86, 0xca, 0xbb, 0xa3, 0x3f, 0x22, 0x72, 0xfb, 0xef, + 0xd9, 0xd2, 0x17, 0xff, 0xb1, 0xb7, 0x8d, 0x7b, 0x49, 0xa6, 0x97, 0xa4, 0xf8, 0x05, 0x1f, 0x7d, + 0xbd, 0xf8, 0x79, 0xde, 0x1b, 0x0f, 0x9e, 0xac, 0x4e, 0xfa, 0xf3, 0xfa, 0x19, 0x3e, 0xf7, 0x81, + 0x62, 0x8f, 0xbe, 0x30, 0xbb, 0x44, 0x75, 0xe0, 0xe8, 0x07, 0xee, 0x7a, 0x26, 0x17, 0x11, 0x31, + 0xaf, 0xa0, 0x8b, 0x9b, 0x49, 0xbc, 0x65, 0x56, 0xa7, 0xe6, 0x6a, 0x4e, 0xa3, 0xb7, 0xef, 0x3c, + 0xa8, 0xc4, 0x05, 0x17, 0x65, 0x8a, 0xb2, 0x64, 0x25, 0x08, 0x7b, 0x53, 0xcc, 0xa5, 0x78, 0x53, + 0xa9, 0x7f, 0x7a, 0x9d, 0x87, 0x5b, 0x72, 0xdf, 0x7b, 0x0f, 0x4f, 0x9c, 0xc0, 0xb1, 0x49, 0xba, + 0x7d, 0x66, 0x2b, 0x87, 0x6f, 0x86, 0x13, 0x53, 0x39, 0xbb, 0x6e, 0x05, 0x9f, 0xfe, 0x0e, 0x00, + 0x00, 0xff, 0xff, 0x9d, 0xe5, 0xc6, 0x91, 0xa1, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2/profiler.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2/profiler.pb.go new file mode 100644 index 000000000..205d1a1d8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2/profiler.pb.go @@ -0,0 +1,464 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudprofiler/v2/profiler.proto + +/* +Package cloudprofiler is a generated protocol buffer package. + +It is generated from these files: + google/devtools/cloudprofiler/v2/profiler.proto + +It has these top-level messages: + CreateProfileRequest + UpdateProfileRequest + Profile + Deployment +*/ +package cloudprofiler + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// ProfileType is type of profiling data. +// NOTE: the enumeration member names are used (in lowercase) as unique string +// identifiers of profile types, so they must not be renamed. +type ProfileType int32 + +const ( + // Unspecified profile type. + ProfileType_PROFILE_TYPE_UNSPECIFIED ProfileType = 0 + // Thread CPU time sampling. + ProfileType_CPU ProfileType = 1 + // Wallclock time sampling. More expensive as stops all threads. + ProfileType_WALL ProfileType = 2 + // Heap allocation sampling. + ProfileType_HEAP ProfileType = 3 + // Single-shot collection of all thread stacks. + ProfileType_THREADS ProfileType = 4 + // Synchronization contention profile. + ProfileType_CONTENTION ProfileType = 5 +) + +var ProfileType_name = map[int32]string{ + 0: "PROFILE_TYPE_UNSPECIFIED", + 1: "CPU", + 2: "WALL", + 3: "HEAP", + 4: "THREADS", + 5: "CONTENTION", +} +var ProfileType_value = map[string]int32{ + "PROFILE_TYPE_UNSPECIFIED": 0, + "CPU": 1, + "WALL": 2, + "HEAP": 3, + "THREADS": 4, + "CONTENTION": 5, +} + +func (x ProfileType) String() string { + return proto.EnumName(ProfileType_name, int32(x)) +} +func (ProfileType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// CreateProfileRequest describes a profile resource creation request. +// Deployment field must be populated for both online and offline modes. +// For the online mode, profile field is not set and the profile_type specifies +// the list of profile types supported by the agent. The creation call will hang +// until a profile of one of these types needs to be collected. For offline +// mode, profile field must be set, profile_type must be empty, and deployment +// field must be identical to the deployment in the profile. +type CreateProfileRequest struct { + // Deployment details. + Deployment *Deployment `protobuf:"bytes,1,opt,name=deployment" json:"deployment,omitempty"` + // Online mode: One or more profile types that the agent is capable of + // providing. + ProfileType []ProfileType `protobuf:"varint,2,rep,packed,name=profile_type,json=profileType,enum=google.devtools.cloudprofiler.v2.ProfileType" json:"profile_type,omitempty"` + // Offline mode: Contents of the profile to create. + Profile *Profile `protobuf:"bytes,3,opt,name=profile" json:"profile,omitempty"` +} + +func (m *CreateProfileRequest) Reset() { *m = CreateProfileRequest{} } +func (m *CreateProfileRequest) String() string { return proto.CompactTextString(m) } +func (*CreateProfileRequest) ProtoMessage() {} +func (*CreateProfileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CreateProfileRequest) GetDeployment() *Deployment { + if m != nil { + return m.Deployment + } + return nil +} + +func (m *CreateProfileRequest) GetProfileType() []ProfileType { + if m != nil { + return m.ProfileType + } + return nil +} + +func (m *CreateProfileRequest) GetProfile() *Profile { + if m != nil { + return m.Profile + } + return nil +} + +// UpdateProfileRequest contains the profile to update. +type UpdateProfileRequest struct { + // Profile to update + Profile *Profile `protobuf:"bytes,1,opt,name=profile" json:"profile,omitempty"` +} + +func (m *UpdateProfileRequest) Reset() { *m = UpdateProfileRequest{} } +func (m *UpdateProfileRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProfileRequest) ProtoMessage() {} +func (*UpdateProfileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *UpdateProfileRequest) GetProfile() *Profile { + if m != nil { + return m.Profile + } + return nil +} + +// Profile resource. +type Profile struct { + // Opaque, server-assigned, unique ID for this profile. + // Output only. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Type of profile. + // Input (for the offline mode) or output (for the online mode). + ProfileType ProfileType `protobuf:"varint,2,opt,name=profile_type,json=profileType,enum=google.devtools.cloudprofiler.v2.ProfileType" json:"profile_type,omitempty"` + // Deployment this profile corresponds to. + Deployment *Deployment `protobuf:"bytes,3,opt,name=deployment" json:"deployment,omitempty"` + // Duration of the profiling session. + // Input (for the offline mode) or output (for the online mode). + // The field represents requested profiling duration. It may slightly differ + // from the effective profiling duration, which is recorded in the profile + // data, in case the profiling can't be stopped immediately (e.g. in case + // stopping the profiling is handled asynchronously). + Duration *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=duration" json:"duration,omitempty"` + // Profile bytes, as a gzip compressed serialized proto, the format is + // https://github.com/google/pprof/blob/master/proto/profile.proto. + ProfileBytes []byte `protobuf:"bytes,5,opt,name=profile_bytes,json=profileBytes,proto3" json:"profile_bytes,omitempty"` + // Labels associated to this specific profile. These labels will get merged + // with the deployment labels for the final data set. + // See documentation on deployment labels for validation rules and limits. + // Input only, will not be populated on responses. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Profile) Reset() { *m = Profile{} } +func (m *Profile) String() string { return proto.CompactTextString(m) } +func (*Profile) ProtoMessage() {} +func (*Profile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Profile) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Profile) GetProfileType() ProfileType { + if m != nil { + return m.ProfileType + } + return ProfileType_PROFILE_TYPE_UNSPECIFIED +} + +func (m *Profile) GetDeployment() *Deployment { + if m != nil { + return m.Deployment + } + return nil +} + +func (m *Profile) GetDuration() *google_protobuf1.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *Profile) GetProfileBytes() []byte { + if m != nil { + return m.ProfileBytes + } + return nil +} + +func (m *Profile) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Deployment contains the deployment identification information. +type Deployment struct { + // Project ID is the ID of a cloud project. + // Validation regex: `^[a-z][-a-z0-9:.]{4,61}[a-z0-9]$`. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Target is the service name used to group related deployments: + // * Service name for GAE Flex / Standard. + // * Cluster and container name for GKE. + // * User-specified string for direct GCE profiling (e.g. Java). + // * Job name for Dataflow. + // Validation regex: `^[a-z]([-a-z0-9_.]{0,253}[a-z0-9])?$`. + Target string `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"` + // Labels identify the deployment within the user universe and same target. + // Validation regex for label names: `^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`. + // Value for an individual label must be <= 512 bytes, the total + // size of all label names and values must be <= 1024 bytes. + // + // Either "zone" or "region" label must be present describing the deployment + // location. An example of a zone is "us-central1-a", an example of a region + // is "us-central1" or "us-central". + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Deployment) Reset() { *m = Deployment{} } +func (m *Deployment) String() string { return proto.CompactTextString(m) } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Deployment) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Deployment) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +func (m *Deployment) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*CreateProfileRequest)(nil), "google.devtools.cloudprofiler.v2.CreateProfileRequest") + proto.RegisterType((*UpdateProfileRequest)(nil), "google.devtools.cloudprofiler.v2.UpdateProfileRequest") + proto.RegisterType((*Profile)(nil), "google.devtools.cloudprofiler.v2.Profile") + proto.RegisterType((*Deployment)(nil), "google.devtools.cloudprofiler.v2.Deployment") + proto.RegisterEnum("google.devtools.cloudprofiler.v2.ProfileType", ProfileType_name, ProfileType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ProfilerService service + +type ProfilerServiceClient interface { + // CreateProfile creates a new profile resource. + // + // In the online creation mode: + // * The server ensures that the new profiles are created at a constant rate + // per deployment, so the creation request may hang for some time until the + // next profile session is available. + // * The request may fail with ABORTED error if the creation is not + // available within ~1m, the response will indicate the duration of the + // backoff the client should take before attempting creating a profile + // again. The backoff duration is returned in google.rpc.RetryInfo extension + // on the response status. To a gRPC client, the extension will be return as + // a binary-serialized proto in the trailing metadata item named + // "google.rpc.retryinfo-bin". + // + // In the offline creation mode: + // * The client provides the profile to create along with the profile bytes, + // the server records it. + CreateProfile(ctx context.Context, in *CreateProfileRequest, opts ...grpc.CallOption) (*Profile, error) + // UpdateProfile updates the profile bytes and labels on the profile resource + // created in the online mode. + UpdateProfile(ctx context.Context, in *UpdateProfileRequest, opts ...grpc.CallOption) (*Profile, error) +} + +type profilerServiceClient struct { + cc *grpc.ClientConn +} + +func NewProfilerServiceClient(cc *grpc.ClientConn) ProfilerServiceClient { + return &profilerServiceClient{cc} +} + +func (c *profilerServiceClient) CreateProfile(ctx context.Context, in *CreateProfileRequest, opts ...grpc.CallOption) (*Profile, error) { + out := new(Profile) + err := grpc.Invoke(ctx, "/google.devtools.cloudprofiler.v2.ProfilerService/CreateProfile", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *profilerServiceClient) UpdateProfile(ctx context.Context, in *UpdateProfileRequest, opts ...grpc.CallOption) (*Profile, error) { + out := new(Profile) + err := grpc.Invoke(ctx, "/google.devtools.cloudprofiler.v2.ProfilerService/UpdateProfile", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ProfilerService service + +type ProfilerServiceServer interface { + // CreateProfile creates a new profile resource. + // + // In the online creation mode: + // * The server ensures that the new profiles are created at a constant rate + // per deployment, so the creation request may hang for some time until the + // next profile session is available. + // * The request may fail with ABORTED error if the creation is not + // available within ~1m, the response will indicate the duration of the + // backoff the client should take before attempting creating a profile + // again. The backoff duration is returned in google.rpc.RetryInfo extension + // on the response status. To a gRPC client, the extension will be return as + // a binary-serialized proto in the trailing metadata item named + // "google.rpc.retryinfo-bin". + // + // In the offline creation mode: + // * The client provides the profile to create along with the profile bytes, + // the server records it. + CreateProfile(context.Context, *CreateProfileRequest) (*Profile, error) + // UpdateProfile updates the profile bytes and labels on the profile resource + // created in the online mode. + UpdateProfile(context.Context, *UpdateProfileRequest) (*Profile, error) +} + +func RegisterProfilerServiceServer(s *grpc.Server, srv ProfilerServiceServer) { + s.RegisterService(&_ProfilerService_serviceDesc, srv) +} + +func _ProfilerService_CreateProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProfilerServiceServer).CreateProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudprofiler.v2.ProfilerService/CreateProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProfilerServiceServer).CreateProfile(ctx, req.(*CreateProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ProfilerService_UpdateProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProfilerServiceServer).UpdateProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudprofiler.v2.ProfilerService/UpdateProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProfilerServiceServer).UpdateProfile(ctx, req.(*UpdateProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ProfilerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudprofiler.v2.ProfilerService", + HandlerType: (*ProfilerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateProfile", + Handler: _ProfilerService_CreateProfile_Handler, + }, + { + MethodName: "UpdateProfile", + Handler: _ProfilerService_UpdateProfile_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudprofiler/v2/profiler.proto", +} + +func init() { proto.RegisterFile("google/devtools/cloudprofiler/v2/profiler.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0x66, 0xe3, 0x34, 0x69, 0x27, 0x6d, 0xb1, 0x56, 0x15, 0x32, 0x51, 0x81, 0x28, 0x5c, 0x42, + 0x44, 0x6d, 0xc9, 0x55, 0x51, 0x5b, 0xc4, 0xa1, 0x4d, 0x5c, 0x35, 0x52, 0x9a, 0x58, 0x6e, 0x2a, + 0x04, 0x1c, 0x22, 0xa7, 0xde, 0x5a, 0x06, 0xc7, 0x6b, 0xec, 0x4d, 0xa4, 0xa8, 0xea, 0x85, 0x57, + 0xe0, 0x11, 0xb8, 0xc2, 0xbb, 0x20, 0xf1, 0x0a, 0x3c, 0x00, 0x77, 0x2e, 0xc8, 0xf6, 0x3a, 0x3f, + 0x50, 0x94, 0x94, 0x72, 0xdb, 0x99, 0x9d, 0xef, 0xdb, 0x6f, 0x66, 0xd7, 0x9f, 0x41, 0xb1, 0x29, + 0xb5, 0x5d, 0xa2, 0x58, 0x64, 0xc8, 0x28, 0x75, 0x43, 0xe5, 0xdc, 0xa5, 0x03, 0xcb, 0x0f, 0xe8, + 0x85, 0xe3, 0x92, 0x40, 0x19, 0xaa, 0x4a, 0xba, 0x96, 0xfd, 0x80, 0x32, 0x8a, 0x4b, 0x09, 0x40, + 0x4e, 0x01, 0xf2, 0x0c, 0x40, 0x1e, 0xaa, 0xc5, 0x4d, 0x4e, 0x69, 0xfa, 0x8e, 0x62, 0x7a, 0x1e, + 0x65, 0x26, 0x73, 0xa8, 0x17, 0x26, 0xf8, 0xe2, 0x43, 0xbe, 0x1b, 0x47, 0xbd, 0xc1, 0x85, 0x62, + 0x0d, 0x82, 0xb8, 0x80, 0xef, 0x3f, 0xfa, 0x7d, 0x9f, 0x39, 0x7d, 0x12, 0x32, 0xb3, 0xef, 0x27, + 0x05, 0xe5, 0x9f, 0x08, 0x36, 0x6a, 0x01, 0x31, 0x19, 0xd1, 0x93, 0x43, 0x0d, 0xf2, 0x7e, 0x40, + 0x42, 0x86, 0x9b, 0x00, 0x16, 0xf1, 0x5d, 0x3a, 0xea, 0x13, 0x8f, 0x49, 0xa8, 0x84, 0x2a, 0x05, + 0xf5, 0xa9, 0x3c, 0x4f, 0xae, 0x5c, 0x1f, 0x63, 0x8c, 0x29, 0x3c, 0xd6, 0x61, 0x95, 0x57, 0x75, + 0xd9, 0xc8, 0x27, 0x52, 0xa6, 0x24, 0x54, 0xd6, 0xd5, 0xad, 0xf9, 0x7c, 0x5c, 0x55, 0x67, 0xe4, + 0x13, 0xa3, 0xe0, 0x4f, 0x02, 0x5c, 0x83, 0x3c, 0x0f, 0x25, 0x21, 0x16, 0xf7, 0x64, 0x61, 0x32, + 0x23, 0x45, 0x96, 0xdf, 0xc0, 0xc6, 0x99, 0x6f, 0xfd, 0xd9, 0xfc, 0x14, 0x39, 0xfa, 0x67, 0xf2, + 0x4f, 0x02, 0xe4, 0x79, 0x12, 0x63, 0xc8, 0x7a, 0x66, 0x3f, 0x61, 0x5b, 0x31, 0xe2, 0xf5, 0x35, + 0x33, 0x41, 0xb7, 0x9c, 0xc9, 0xec, 0x9d, 0x09, 0xb7, 0xbc, 0xb3, 0x1d, 0x58, 0x4e, 0x5f, 0x93, + 0x94, 0x8d, 0xb9, 0xee, 0xa7, 0x5c, 0xe9, 0x73, 0x92, 0xeb, 0xbc, 0xc0, 0x18, 0x97, 0xe2, 0xc7, + 0xb0, 0x96, 0xb6, 0xd5, 0x1b, 0x31, 0x12, 0x4a, 0x4b, 0x25, 0x54, 0x59, 0x35, 0xd2, 0x5e, 0x0f, + 0xa3, 0x1c, 0x3e, 0x81, 0x9c, 0x6b, 0xf6, 0x88, 0x1b, 0x4a, 0xb9, 0x92, 0x50, 0x29, 0xa8, 0x3b, + 0x0b, 0x77, 0x2d, 0x37, 0x63, 0x9c, 0xe6, 0xb1, 0x60, 0x64, 0x70, 0x92, 0xe2, 0x1e, 0x14, 0xa6, + 0xd2, 0x58, 0x04, 0xe1, 0x1d, 0x19, 0xf1, 0x61, 0x47, 0x4b, 0xbc, 0x01, 0x4b, 0x43, 0xd3, 0x1d, + 0x24, 0x43, 0x5e, 0x31, 0x92, 0x60, 0x3f, 0xb3, 0x8b, 0xca, 0x5f, 0x11, 0xc0, 0x64, 0x00, 0xf8, + 0x01, 0x80, 0x1f, 0xd0, 0xb7, 0xe4, 0x9c, 0x75, 0x1d, 0x8b, 0x33, 0xac, 0xf0, 0x4c, 0xc3, 0xc2, + 0xf7, 0x20, 0xc7, 0xcc, 0xc0, 0x26, 0x8c, 0x13, 0xf1, 0x08, 0xeb, 0xe3, 0x7e, 0x84, 0xb8, 0x9f, + 0xdd, 0x9b, 0x4c, 0xfd, 0x3f, 0xb7, 0x54, 0x25, 0x50, 0x98, 0x7a, 0x22, 0x78, 0x13, 0x24, 0xdd, + 0x68, 0x1f, 0x35, 0x9a, 0x5a, 0xb7, 0xf3, 0x4a, 0xd7, 0xba, 0x67, 0xad, 0x53, 0x5d, 0xab, 0x35, + 0x8e, 0x1a, 0x5a, 0x5d, 0xbc, 0x83, 0xf3, 0x20, 0xd4, 0xf4, 0x33, 0x11, 0xe1, 0x65, 0xc8, 0xbe, + 0x3c, 0x68, 0x36, 0xc5, 0x4c, 0xb4, 0x3a, 0xd6, 0x0e, 0x74, 0x51, 0xc0, 0x05, 0xc8, 0x77, 0x8e, + 0x0d, 0xed, 0xa0, 0x7e, 0x2a, 0x66, 0xf1, 0x3a, 0x40, 0xad, 0xdd, 0xea, 0x68, 0xad, 0x4e, 0xa3, + 0xdd, 0x12, 0x97, 0xd4, 0x1f, 0x19, 0xb8, 0xcb, 0xcf, 0x09, 0x4e, 0x49, 0x30, 0x74, 0xce, 0x09, + 0xfe, 0x8c, 0x60, 0x6d, 0xc6, 0x4e, 0xf0, 0xb3, 0xf9, 0x93, 0xb8, 0xce, 0x7f, 0x8a, 0x8b, 0x7f, + 0x71, 0xe5, 0xdd, 0x0f, 0xdf, 0xbe, 0x7f, 0xcc, 0xa8, 0xe5, 0x2d, 0x6e, 0xb0, 0xd1, 0x5d, 0x85, + 0xca, 0xe5, 0xe4, 0x29, 0xcb, 0x93, 0x2b, 0xbd, 0x4a, 0x1d, 0x38, 0xdc, 0x47, 0x55, 0xfc, 0x05, + 0xc1, 0xda, 0x8c, 0x01, 0x2c, 0x22, 0xf7, 0x3a, 0xc7, 0xb8, 0x89, 0xdc, 0xbd, 0x58, 0xee, 0xb6, + 0x5a, 0x89, 0xe4, 0x5e, 0xf2, 0x0a, 0x39, 0xb2, 0x84, 0x17, 0x63, 0xf1, 0xd5, 0xb1, 0x4c, 0xa5, + 0x7a, 0xb5, 0x9f, 0x5a, 0xca, 0x61, 0xfb, 0xf5, 0x09, 0x3f, 0xc6, 0xa6, 0xae, 0xe9, 0xd9, 0x32, + 0x0d, 0x6c, 0xc5, 0x26, 0x5e, 0xfc, 0x3d, 0xf2, 0x9f, 0x8f, 0xe9, 0x3b, 0xe1, 0xdf, 0x7f, 0x40, + 0xcf, 0x67, 0x12, 0xbd, 0x5c, 0x8c, 0xdc, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x83, 0x3f, 0xa6, + 0xd9, 0xb9, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go new file mode 100644 index 000000000..79e13682c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go @@ -0,0 +1,661 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v1/trace.proto + +/* +Package cloudtrace is a generated protocol buffer package. + +It is generated from these files: + google/devtools/cloudtrace/v1/trace.proto + +It has these top-level messages: + Trace + Traces + TraceSpan + ListTracesRequest + ListTracesResponse + GetTraceRequest + PatchTracesRequest +*/ +package cloudtrace + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type TraceSpan_SpanKind int32 + +const ( + // Unspecified. + TraceSpan_SPAN_KIND_UNSPECIFIED TraceSpan_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + TraceSpan_RPC_SERVER TraceSpan_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + TraceSpan_RPC_CLIENT TraceSpan_SpanKind = 2 +) + +var TraceSpan_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "RPC_SERVER", + 2: "RPC_CLIENT", +} +var TraceSpan_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "RPC_SERVER": 1, + "RPC_CLIENT": 2, +} + +func (x TraceSpan_SpanKind) String() string { + return proto.EnumName(TraceSpan_SpanKind_name, int32(x)) +} +func (TraceSpan_SpanKind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// Type of data returned for traces in the list. +type ListTracesRequest_ViewType int32 + +const ( + // Default is `MINIMAL` if unspecified. + ListTracesRequest_VIEW_TYPE_UNSPECIFIED ListTracesRequest_ViewType = 0 + // Minimal view of the trace record that contains only the project + // and trace IDs. + ListTracesRequest_MINIMAL ListTracesRequest_ViewType = 1 + // Root span view of the trace record that returns the root spans along + // with the minimal trace data. + ListTracesRequest_ROOTSPAN ListTracesRequest_ViewType = 2 + // Complete view of the trace record that contains the actual trace data. + // This is equivalent to calling the REST `get` or RPC `GetTrace` method + // using the ID of each listed trace. + ListTracesRequest_COMPLETE ListTracesRequest_ViewType = 3 +) + +var ListTracesRequest_ViewType_name = map[int32]string{ + 0: "VIEW_TYPE_UNSPECIFIED", + 1: "MINIMAL", + 2: "ROOTSPAN", + 3: "COMPLETE", +} +var ListTracesRequest_ViewType_value = map[string]int32{ + "VIEW_TYPE_UNSPECIFIED": 0, + "MINIMAL": 1, + "ROOTSPAN": 2, + "COMPLETE": 3, +} + +func (x ListTracesRequest_ViewType) String() string { + return proto.EnumName(ListTracesRequest_ViewType_name, int32(x)) +} +func (ListTracesRequest_ViewType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +// A trace describes how long it takes for an application to perform an +// operation. It consists of a set of spans, each of which represent a single +// timed event within the operation. +type Trace struct { + // Project ID of the Cloud project where the trace data is stored. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Globally unique identifier for the trace. This identifier is a 128-bit + // numeric value formatted as a 32-byte hex string. + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` + // Collection of spans in the trace. + Spans []*TraceSpan `protobuf:"bytes,3,rep,name=spans" json:"spans,omitempty"` +} + +func (m *Trace) Reset() { *m = Trace{} } +func (m *Trace) String() string { return proto.CompactTextString(m) } +func (*Trace) ProtoMessage() {} +func (*Trace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Trace) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Trace) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Trace) GetSpans() []*TraceSpan { + if m != nil { + return m.Spans + } + return nil +} + +// List of new or updated traces. +type Traces struct { + // List of traces. + Traces []*Trace `protobuf:"bytes,1,rep,name=traces" json:"traces,omitempty"` +} + +func (m *Traces) Reset() { *m = Traces{} } +func (m *Traces) String() string { return proto.CompactTextString(m) } +func (*Traces) ProtoMessage() {} +func (*Traces) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Traces) GetTraces() []*Trace { + if m != nil { + return m.Traces + } + return nil +} + +// A span represents a single timed event within a trace. Spans can be nested +// and form a trace tree. Often, a trace contains a root span that describes the +// end-to-end latency of an operation and, optionally, one or more subspans for +// its suboperations. Spans do not need to be contiguous. There may be gaps +// between spans in a trace. +type TraceSpan struct { + // Identifier for the span. Must be a 64-bit integer other than 0 and + // unique within a trace. + SpanId uint64 `protobuf:"fixed64,1,opt,name=span_id,json=spanId" json:"span_id,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `RPC_CLIENT` + // and `RPC_SERVER` to identify queueing latency associated with the span. + Kind TraceSpan_SpanKind `protobuf:"varint,2,opt,name=kind,enum=google.devtools.cloudtrace.v1.TraceSpan_SpanKind" json:"kind,omitempty"` + // Name of the trace. The trace name is sanitized and displayed in the + // Stackdriver Trace tool in the Google Developers Console. + // The name may be a method name or some other per-call site name. + // For the same executable and the same call point, a best practice is + // to use a consistent name, which makes it easier to correlate + // cross-trace spans. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // Start time of the span in nanoseconds from the UNIX epoch. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // End time of the span in nanoseconds from the UNIX epoch. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // ID of the parent span, if any. Optional. + ParentSpanId uint64 `protobuf:"fixed64,6,opt,name=parent_span_id,json=parentSpanId" json:"parent_span_id,omitempty"` + // Collection of labels associated with the span. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *TraceSpan) Reset() { *m = TraceSpan{} } +func (m *TraceSpan) String() string { return proto.CompactTextString(m) } +func (*TraceSpan) ProtoMessage() {} +func (*TraceSpan) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *TraceSpan) GetSpanId() uint64 { + if m != nil { + return m.SpanId + } + return 0 +} + +func (m *TraceSpan) GetKind() TraceSpan_SpanKind { + if m != nil { + return m.Kind + } + return TraceSpan_SPAN_KIND_UNSPECIFIED +} + +func (m *TraceSpan) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TraceSpan) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *TraceSpan) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TraceSpan) GetParentSpanId() uint64 { + if m != nil { + return m.ParentSpanId + } + return 0 +} + +func (m *TraceSpan) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// The request message for the `ListTraces` method. All fields are required +// unless specified. +type ListTracesRequest struct { + // ID of the Cloud project where the trace data is stored. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Type of data returned for traces in the list. Optional. Default is + // `MINIMAL`. + View ListTracesRequest_ViewType `protobuf:"varint,2,opt,name=view,enum=google.devtools.cloudtrace.v1.ListTracesRequest_ViewType" json:"view,omitempty"` + // Maximum number of traces to return. If not specified or <= 0, the + // implementation selects a reasonable value. The implementation may + // return fewer traces than the requested page size. Optional. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Token identifying the page of results to return. If provided, use the + // value of the `next_page_token` field from a previous request. Optional. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // End of the time interval (inclusive) during which the trace data was + // collected from the application. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Start of the time interval (inclusive) during which the trace data was + // collected from the application. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // An optional filter for the request. + Filter string `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` + // Field used to sort the returned traces. Optional. + // Can be one of the following: + // + // * `trace_id` + // * `name` (`name` field of root span in the trace) + // * `duration` (difference between `end_time` and `start_time` fields of + // the root span) + // * `start` (`start_time` field of the root span) + // + // Descending order can be specified by appending `desc` to the sort field + // (for example, `name desc`). + // + // Only one sort field is permitted. + OrderBy string `protobuf:"bytes,8,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` +} + +func (m *ListTracesRequest) Reset() { *m = ListTracesRequest{} } +func (m *ListTracesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTracesRequest) ProtoMessage() {} +func (*ListTracesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListTracesRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListTracesRequest) GetView() ListTracesRequest_ViewType { + if m != nil { + return m.View + } + return ListTracesRequest_VIEW_TYPE_UNSPECIFIED +} + +func (m *ListTracesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTracesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListTracesRequest) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *ListTracesRequest) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *ListTracesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTracesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +// The response message for the `ListTraces` method. +type ListTracesResponse struct { + // List of trace records returned. + Traces []*Trace `protobuf:"bytes,1,rep,name=traces" json:"traces,omitempty"` + // If defined, indicates that there are more traces that match the request + // and that this value should be passed to the next request to continue + // retrieving additional traces. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTracesResponse) Reset() { *m = ListTracesResponse{} } +func (m *ListTracesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTracesResponse) ProtoMessage() {} +func (*ListTracesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListTracesResponse) GetTraces() []*Trace { + if m != nil { + return m.Traces + } + return nil +} + +func (m *ListTracesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request message for the `GetTrace` method. +type GetTraceRequest struct { + // ID of the Cloud project where the trace data is stored. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // ID of the trace to return. + TraceId string `protobuf:"bytes,2,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` +} + +func (m *GetTraceRequest) Reset() { *m = GetTraceRequest{} } +func (m *GetTraceRequest) String() string { return proto.CompactTextString(m) } +func (*GetTraceRequest) ProtoMessage() {} +func (*GetTraceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetTraceRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetTraceRequest) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +// The request message for the `PatchTraces` method. +type PatchTracesRequest struct { + // ID of the Cloud project where the trace data is stored. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The body of the message. + Traces *Traces `protobuf:"bytes,2,opt,name=traces" json:"traces,omitempty"` +} + +func (m *PatchTracesRequest) Reset() { *m = PatchTracesRequest{} } +func (m *PatchTracesRequest) String() string { return proto.CompactTextString(m) } +func (*PatchTracesRequest) ProtoMessage() {} +func (*PatchTracesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PatchTracesRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PatchTracesRequest) GetTraces() *Traces { + if m != nil { + return m.Traces + } + return nil +} + +func init() { + proto.RegisterType((*Trace)(nil), "google.devtools.cloudtrace.v1.Trace") + proto.RegisterType((*Traces)(nil), "google.devtools.cloudtrace.v1.Traces") + proto.RegisterType((*TraceSpan)(nil), "google.devtools.cloudtrace.v1.TraceSpan") + proto.RegisterType((*ListTracesRequest)(nil), "google.devtools.cloudtrace.v1.ListTracesRequest") + proto.RegisterType((*ListTracesResponse)(nil), "google.devtools.cloudtrace.v1.ListTracesResponse") + proto.RegisterType((*GetTraceRequest)(nil), "google.devtools.cloudtrace.v1.GetTraceRequest") + proto.RegisterType((*PatchTracesRequest)(nil), "google.devtools.cloudtrace.v1.PatchTracesRequest") + proto.RegisterEnum("google.devtools.cloudtrace.v1.TraceSpan_SpanKind", TraceSpan_SpanKind_name, TraceSpan_SpanKind_value) + proto.RegisterEnum("google.devtools.cloudtrace.v1.ListTracesRequest_ViewType", ListTracesRequest_ViewType_name, ListTracesRequest_ViewType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TraceService service + +type TraceServiceClient interface { + // Returns of a list of traces that match the specified filter conditions. + ListTraces(ctx context.Context, in *ListTracesRequest, opts ...grpc.CallOption) (*ListTracesResponse, error) + // Gets a single trace by its ID. + GetTrace(ctx context.Context, in *GetTraceRequest, opts ...grpc.CallOption) (*Trace, error) + // Sends new traces to Stackdriver Trace or updates existing traces. If the ID + // of a trace that you send matches that of an existing trace, any fields + // in the existing trace and its spans are overwritten by the provided values, + // and any new fields provided are merged with the existing trace data. If the + // ID does not match, a new trace is created. + PatchTraces(ctx context.Context, in *PatchTracesRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) ListTraces(ctx context.Context, in *ListTracesRequest, opts ...grpc.CallOption) (*ListTracesResponse, error) { + out := new(ListTracesResponse) + err := grpc.Invoke(ctx, "/google.devtools.cloudtrace.v1.TraceService/ListTraces", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *traceServiceClient) GetTrace(ctx context.Context, in *GetTraceRequest, opts ...grpc.CallOption) (*Trace, error) { + out := new(Trace) + err := grpc.Invoke(ctx, "/google.devtools.cloudtrace.v1.TraceService/GetTrace", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *traceServiceClient) PatchTraces(ctx context.Context, in *PatchTracesRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.devtools.cloudtrace.v1.TraceService/PatchTraces", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for TraceService service + +type TraceServiceServer interface { + // Returns of a list of traces that match the specified filter conditions. + ListTraces(context.Context, *ListTracesRequest) (*ListTracesResponse, error) + // Gets a single trace by its ID. + GetTrace(context.Context, *GetTraceRequest) (*Trace, error) + // Sends new traces to Stackdriver Trace or updates existing traces. If the ID + // of a trace that you send matches that of an existing trace, any fields + // in the existing trace and its spans are overwritten by the provided values, + // and any new fields provided are merged with the existing trace data. If the + // ID does not match, a new trace is created. + PatchTraces(context.Context, *PatchTracesRequest) (*google_protobuf1.Empty, error) +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_ListTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTracesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).ListTraces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v1.TraceService/ListTraces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).ListTraces(ctx, req.(*ListTracesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TraceService_GetTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTraceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).GetTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v1.TraceService/GetTrace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).GetTrace(ctx, req.(*GetTraceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TraceService_PatchTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PatchTracesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).PatchTraces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v1.TraceService/PatchTraces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).PatchTraces(ctx, req.(*PatchTracesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudtrace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListTraces", + Handler: _TraceService_ListTraces_Handler, + }, + { + MethodName: "GetTrace", + Handler: _TraceService_GetTrace_Handler, + }, + { + MethodName: "PatchTraces", + Handler: _TraceService_PatchTraces_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudtrace/v1/trace.proto", +} + +func init() { proto.RegisterFile("google/devtools/cloudtrace/v1/trace.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 886 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0x66, 0xed, 0x78, 0x6d, 0x1f, 0x87, 0xd4, 0x8c, 0x68, 0x71, 0x5d, 0x2a, 0xc2, 0xaa, 0x20, + 0x03, 0x62, 0xb7, 0x76, 0x41, 0x22, 0xe5, 0x47, 0x6a, 0xdc, 0x6d, 0xb4, 0x8a, 0xe3, 0xac, 0xd6, + 0x26, 0x08, 0x6e, 0x56, 0x13, 0xef, 0xd4, 0x2c, 0x59, 0xcf, 0x2c, 0x3b, 0x13, 0x17, 0xa7, 0xea, + 0x05, 0x5c, 0x72, 0x09, 0xe2, 0x8a, 0x37, 0xe0, 0x92, 0x37, 0x41, 0xbc, 0x02, 0x12, 0xaf, 0x81, + 0x66, 0x66, 0xb7, 0x89, 0x12, 0x35, 0x76, 0xe8, 0x4d, 0x34, 0xe7, 0xcc, 0xf9, 0xfd, 0xbe, 0x6f, + 0xb2, 0x86, 0xf7, 0xa6, 0x8c, 0x4d, 0x13, 0xe2, 0x44, 0x64, 0x2e, 0x18, 0x4b, 0xb8, 0x33, 0x49, + 0xd8, 0x71, 0x24, 0x32, 0x3c, 0x21, 0xce, 0xbc, 0xeb, 0xa8, 0x83, 0x9d, 0x66, 0x4c, 0x30, 0x74, + 0x5b, 0x87, 0xda, 0x45, 0xa8, 0x7d, 0x1a, 0x6a, 0xcf, 0xbb, 0xed, 0x37, 0xf3, 0x4a, 0x38, 0x8d, + 0x1d, 0x4c, 0x29, 0x13, 0x58, 0xc4, 0x8c, 0x72, 0x9d, 0xdc, 0xbe, 0x95, 0xdf, 0x2a, 0xeb, 0xf0, + 0xf8, 0xb1, 0x43, 0x66, 0xa9, 0x58, 0xe4, 0x97, 0x6f, 0x9d, 0xbf, 0x14, 0xf1, 0x8c, 0x70, 0x81, + 0x67, 0xa9, 0x0e, 0xb0, 0x7e, 0x34, 0xa0, 0x32, 0x96, 0x8d, 0xd0, 0x6d, 0x80, 0x34, 0x63, 0xdf, + 0x91, 0x89, 0x08, 0xe3, 0xa8, 0x65, 0x6c, 0x1a, 0x9d, 0x7a, 0x50, 0xcf, 0x3d, 0x5e, 0x84, 0x6e, + 0x42, 0x4d, 0x0d, 0x24, 0x2f, 0x4b, 0xea, 0xb2, 0xaa, 0x6c, 0x2f, 0x42, 0x5f, 0x40, 0x85, 0xa7, + 0x98, 0xf2, 0x56, 0x79, 0xb3, 0xdc, 0x69, 0xf4, 0x3a, 0xf6, 0xa5, 0xeb, 0xd8, 0xaa, 0xdd, 0x28, + 0xc5, 0x34, 0xd0, 0x69, 0xd6, 0x23, 0x30, 0x95, 0x8f, 0xa3, 0xcf, 0xc0, 0x54, 0x61, 0xbc, 0x65, + 0xa8, 0x52, 0x77, 0x56, 0x29, 0x15, 0xe4, 0x39, 0xd6, 0xbf, 0x65, 0xa8, 0x3f, 0x2f, 0x8e, 0xde, + 0x80, 0xaa, 0x2c, 0x5f, 0x2c, 0x63, 0x06, 0xa6, 0x34, 0xbd, 0x08, 0xb9, 0xb0, 0x76, 0x14, 0x53, + 0xbd, 0xc5, 0x46, 0xaf, 0xbb, 0xea, 0xb4, 0xb6, 0xfc, 0xb3, 0x1b, 0xd3, 0x28, 0x50, 0xe9, 0x08, + 0xc1, 0x1a, 0xc5, 0x33, 0xd2, 0x2a, 0x2b, 0x30, 0xd4, 0x19, 0x6d, 0x01, 0x70, 0x81, 0x33, 0x11, + 0x4a, 0x98, 0x5b, 0x6b, 0x9b, 0x46, 0xa7, 0xd1, 0x6b, 0x17, 0x0d, 0x0a, 0x0e, 0xec, 0x71, 0xc1, + 0x41, 0x50, 0x57, 0xd1, 0xd2, 0x46, 0x1f, 0x43, 0x8d, 0xd0, 0x48, 0x27, 0x56, 0x96, 0x26, 0x56, + 0x09, 0x8d, 0x54, 0xda, 0x1d, 0xd8, 0x48, 0x71, 0x46, 0xa8, 0x08, 0x8b, 0x65, 0x4d, 0xb5, 0xec, + 0xba, 0xf6, 0x8e, 0xf4, 0xca, 0x03, 0x30, 0x13, 0x7c, 0x48, 0x12, 0xde, 0xaa, 0x2a, 0x5c, 0x3f, + 0x5a, 0x79, 0xe9, 0x81, 0x4a, 0x73, 0xa9, 0xc8, 0x16, 0x41, 0x5e, 0xa3, 0xbd, 0x05, 0x8d, 0x33, + 0x6e, 0xd4, 0x84, 0xf2, 0x11, 0x59, 0xe4, 0x8a, 0x91, 0x47, 0xf4, 0x3a, 0x54, 0xe6, 0x38, 0x39, + 0x26, 0xb9, 0x50, 0xb4, 0x71, 0xbf, 0xf4, 0x89, 0x61, 0xb9, 0x50, 0x2b, 0x60, 0x44, 0x37, 0xe1, + 0xfa, 0xc8, 0x7f, 0x30, 0x0c, 0x77, 0xbd, 0xe1, 0xc3, 0xf0, 0xcb, 0xe1, 0xc8, 0x77, 0xfb, 0xde, + 0x23, 0xcf, 0x7d, 0xd8, 0x7c, 0x05, 0x6d, 0x00, 0x04, 0x7e, 0x3f, 0x1c, 0xb9, 0xc1, 0x81, 0x1b, + 0x34, 0x8d, 0xc2, 0xee, 0x0f, 0x3c, 0x77, 0x38, 0x6e, 0x96, 0xac, 0x3f, 0xcb, 0xf0, 0xda, 0x20, + 0xe6, 0x42, 0xcb, 0x26, 0x20, 0xdf, 0x1f, 0x13, 0x2e, 0x96, 0x29, 0x78, 0x0f, 0xd6, 0xe6, 0x31, + 0x79, 0x92, 0xf3, 0xbe, 0xb5, 0x04, 0x82, 0x0b, 0xe5, 0xed, 0x83, 0x98, 0x3c, 0x19, 0x2f, 0x52, + 0x12, 0xa8, 0x32, 0xe8, 0x16, 0xd4, 0x53, 0x3c, 0x25, 0x21, 0x8f, 0x4f, 0xb4, 0x08, 0x2a, 0x41, + 0x4d, 0x3a, 0x46, 0xf1, 0x89, 0x7e, 0x4c, 0xf2, 0x52, 0xb0, 0x23, 0x42, 0x95, 0x10, 0xe4, 0x28, + 0x78, 0x4a, 0xc6, 0xd2, 0x71, 0x4e, 0x27, 0x95, 0xff, 0xab, 0x13, 0x73, 0x75, 0x9d, 0xdc, 0x00, + 0xf3, 0x71, 0x9c, 0x08, 0x92, 0xb5, 0xaa, 0x6a, 0x98, 0xdc, 0x92, 0xcf, 0x9a, 0x65, 0x11, 0xc9, + 0xc2, 0xc3, 0x45, 0xab, 0xa6, 0x9f, 0xb5, 0xb2, 0xb7, 0x17, 0xd6, 0x10, 0x6a, 0xc5, 0xca, 0x92, + 0xab, 0x03, 0xcf, 0xfd, 0x2a, 0x1c, 0x7f, 0xed, 0xbb, 0xe7, 0xb8, 0x6a, 0x40, 0x75, 0xcf, 0x1b, + 0x7a, 0x7b, 0x0f, 0x06, 0x4d, 0x03, 0xad, 0x43, 0x2d, 0xd8, 0xdf, 0x1f, 0x4b, 0x5e, 0x9b, 0x25, + 0x69, 0xf5, 0xf7, 0xf7, 0xfc, 0x81, 0x3b, 0x76, 0x9b, 0x65, 0xeb, 0x04, 0xd0, 0x59, 0x50, 0x79, + 0xca, 0x28, 0x27, 0x2f, 0xf7, 0xe4, 0xd1, 0xbb, 0x70, 0x8d, 0x92, 0x1f, 0x44, 0x78, 0x06, 0x6c, + 0xad, 0xb9, 0x57, 0xa5, 0xdb, 0x2f, 0x00, 0xb7, 0x76, 0xe1, 0xda, 0x0e, 0xd1, 0xad, 0x57, 0x54, + 0xcb, 0x8b, 0xff, 0xdf, 0x59, 0x19, 0x20, 0x1f, 0x8b, 0xc9, 0xb7, 0x57, 0x52, 0xdf, 0xe7, 0xcf, + 0xf7, 0x2c, 0x29, 0xd6, 0xde, 0x59, 0x65, 0x4f, 0x5e, 0x2c, 0xda, 0xfb, 0xab, 0x0c, 0xeb, 0xfa, + 0x55, 0x92, 0x6c, 0x1e, 0x4f, 0x08, 0xfa, 0xdd, 0x00, 0x38, 0x85, 0x13, 0xdd, 0xbd, 0xaa, 0x9c, + 0xdb, 0xdd, 0x2b, 0x64, 0x68, 0xae, 0xac, 0xce, 0x4f, 0x7f, 0xff, 0xf3, 0x6b, 0xc9, 0x42, 0x9b, + 0xf2, 0x03, 0x96, 0xaf, 0xc6, 0x9d, 0xa7, 0xa7, 0x6b, 0x3f, 0x73, 0x72, 0x5e, 0x7e, 0x33, 0xa0, + 0x56, 0x00, 0x8e, 0xec, 0x25, 0x9d, 0xce, 0x31, 0xd3, 0x5e, 0x49, 0x02, 0xd6, 0x3d, 0x35, 0xcc, + 0x87, 0xe8, 0x83, 0x65, 0xc3, 0x38, 0x4f, 0x0b, 0x22, 0x9f, 0xa1, 0x9f, 0x0d, 0x68, 0x9c, 0xe1, + 0x0e, 0x2d, 0x03, 0xe1, 0x22, 0xcf, 0xed, 0x1b, 0x17, 0x9e, 0x9b, 0x2b, 0x3f, 0xb8, 0xd6, 0x5d, + 0x35, 0xcf, 0xfb, 0xbd, 0xa5, 0xe0, 0xdc, 0xcf, 0x39, 0xdd, 0xfe, 0xc5, 0x80, 0xb7, 0x27, 0x6c, + 0x76, 0xf9, 0x08, 0xdb, 0xa0, 0xda, 0xfb, 0xb2, 0x99, 0x6f, 0x7c, 0xb3, 0x93, 0x07, 0x4f, 0x59, + 0x82, 0xe9, 0xd4, 0x66, 0xd9, 0xd4, 0x99, 0x12, 0xaa, 0x46, 0x71, 0xf4, 0x15, 0x4e, 0x63, 0xfe, + 0x82, 0x1f, 0x1d, 0x9f, 0x9e, 0x5a, 0x7f, 0x94, 0xae, 0xef, 0xe8, 0x4a, 0x7d, 0xe9, 0xd3, 0x98, + 0xda, 0x07, 0xdd, 0x43, 0x53, 0xd5, 0xba, 0xf7, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xd5, + 0x26, 0xa6, 0xbf, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test/remote_execution.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test/remote_execution.pb.go new file mode 100644 index 000000000..904fa82a1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test/remote_execution.pb.go @@ -0,0 +1,2013 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/remoteexecution/v1test/remote_execution.proto + +/* +Package remoteexecution is a generated protocol buffer package. + +It is generated from these files: + google/devtools/remoteexecution/v1test/remote_execution.proto + +It has these top-level messages: + Action + Command + Platform + Directory + FileNode + DirectoryNode + Digest + ActionResult + OutputFile + OutputDirectory + ExecuteRequest + ExecuteResponse + ExecuteOperationMetadata + GetActionResultRequest + UpdateActionResultRequest + FindMissingBlobsRequest + FindMissingBlobsResponse + UpdateBlobRequest + BatchUpdateBlobsRequest + BatchUpdateBlobsResponse + GetTreeRequest + GetTreeResponse + ToolDetails + RequestMetadata +*/ +package remoteexecution + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The current stage of execution. +type ExecuteOperationMetadata_Stage int32 + +const ( + ExecuteOperationMetadata_UNKNOWN ExecuteOperationMetadata_Stage = 0 + // Checking the result against the cache. + ExecuteOperationMetadata_CACHE_CHECK ExecuteOperationMetadata_Stage = 1 + // Currently idle, awaiting a free machine to execute. + ExecuteOperationMetadata_QUEUED ExecuteOperationMetadata_Stage = 2 + // Currently being executed by a worker. + ExecuteOperationMetadata_EXECUTING ExecuteOperationMetadata_Stage = 3 + // Finished execution. + ExecuteOperationMetadata_COMPLETED ExecuteOperationMetadata_Stage = 4 +) + +var ExecuteOperationMetadata_Stage_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CACHE_CHECK", + 2: "QUEUED", + 3: "EXECUTING", + 4: "COMPLETED", +} +var ExecuteOperationMetadata_Stage_value = map[string]int32{ + "UNKNOWN": 0, + "CACHE_CHECK": 1, + "QUEUED": 2, + "EXECUTING": 3, + "COMPLETED": 4, +} + +func (x ExecuteOperationMetadata_Stage) String() string { + return proto.EnumName(ExecuteOperationMetadata_Stage_name, int32(x)) +} +func (ExecuteOperationMetadata_Stage) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{12, 0} +} + +// An `Action` captures all the information about an execution which is required +// to reproduce it. +// +// `Action`s are the core component of the [Execution] service. A single +// `Action` represents a repeatable action that can be performed by the +// execution service. `Action`s can be succinctly identified by the digest of +// their wire format encoding and, once an `Action` has been executed, will be +// cached in the action cache. Future requests can then use the cached result +// rather than needing to run afresh. +// +// When a server completes execution of an [Action][google.devtools.remoteexecution.v1test.Action], +// it MAY choose to cache the [result][google.devtools.remoteexecution.v1test.ActionResult] +// in the [ActionCache][google.devtools.remoteexecution.v1test.ActionCache] +// unless `do_not_cache` is `true`. Clients SHOULD expect the server to do so. +// By default, future calls to [Execute][] the same `Action` will also serve +// their results from the cache. Clients must take care to understand the +// caching behaviour. Ideally, all `Action`s will be reproducible so that +// serving a result from cache is always desirable and correct. +type Action struct { + // The digest of the [Command][google.devtools.remoteexecution.v1test.Command] + // to run, which MUST be present in the + // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + CommandDigest *Digest `protobuf:"bytes,1,opt,name=command_digest,json=commandDigest" json:"command_digest,omitempty"` + // The digest of the root [Directory][google.devtools.remoteexecution.v1test.Directory] + // for the input files. The files in the directory tree are available in the + // correct location on the build machine before the command is executed. The + // root directory, as well as every subdirectory and content blob referred to, + // MUST be in the [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + InputRootDigest *Digest `protobuf:"bytes,2,opt,name=input_root_digest,json=inputRootDigest" json:"input_root_digest,omitempty"` + // A list of the output files that the client expects to retrieve from the + // action. Only the listed files, as well as directories listed in + // `output_directories`, will be returned to the client as output. + // Other files that may be created during command execution are discarded. + // + // The paths are specified using forward slashes (`/`) as path separators, + // even if the execution platform natively uses a different separator. The + // path MUST NOT include a trailing slash. + // + // In order to ensure consistent hashing of the same Action, the output paths + // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 + // bytes). + OutputFiles []string `protobuf:"bytes,3,rep,name=output_files,json=outputFiles" json:"output_files,omitempty"` + // A list of the output directories that the client expects to retrieve from + // the action. Only the contents of the indicated directories (recursively + // including the contents of their subdirectories) will be + // returned, as well as files listed in `output_files`. Other files that may + // be created during command execution are discarded. + // + // The paths are specified using forward slashes (`/`) as path separators, + // even if the execution platform natively uses a different separator. The + // path MUST NOT include a trailing slash, unless the path is `"/"` (which, + // although not recommended, can be used to capture the entire working + // directory tree, including inputs). + // + // In order to ensure consistent hashing of the same Action, the output paths + // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 + // bytes). + OutputDirectories []string `protobuf:"bytes,4,rep,name=output_directories,json=outputDirectories" json:"output_directories,omitempty"` + // The platform requirements for the execution environment. The server MAY + // choose to execute the action on any worker satisfying the requirements, so + // the client SHOULD ensure that running the action on any such worker will + // have the same result. + Platform *Platform `protobuf:"bytes,5,opt,name=platform" json:"platform,omitempty"` + // A timeout after which the execution should be killed. If the timeout is + // absent, then the client is specifying that the execution should continue + // as long as the server will let it. The server SHOULD impose a timeout if + // the client does not specify one, however, if the client does specify a + // timeout that is longer than the server's maximum timeout, the server MUST + // reject the request. + // + // The timeout is a part of the + // [Action][google.devtools.remoteexecution.v1test.Action] message, and + // therefore two `Actions` with different timeouts are different, even if they + // are otherwise identical. This is because, if they were not, running an + // `Action` with a lower timeout than is required might result in a cache hit + // from an execution run with a longer timeout, hiding the fact that the + // timeout is too short. By encoding it directly in the `Action`, a lower + // timeout will result in a cache miss and the execution timeout will fail + // immediately, rather than whenever the cache entry gets evicted. + Timeout *google_protobuf3.Duration `protobuf:"bytes,6,opt,name=timeout" json:"timeout,omitempty"` + // If true, then the `Action`'s result cannot be cached. + DoNotCache bool `protobuf:"varint,7,opt,name=do_not_cache,json=doNotCache" json:"do_not_cache,omitempty"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Action) GetCommandDigest() *Digest { + if m != nil { + return m.CommandDigest + } + return nil +} + +func (m *Action) GetInputRootDigest() *Digest { + if m != nil { + return m.InputRootDigest + } + return nil +} + +func (m *Action) GetOutputFiles() []string { + if m != nil { + return m.OutputFiles + } + return nil +} + +func (m *Action) GetOutputDirectories() []string { + if m != nil { + return m.OutputDirectories + } + return nil +} + +func (m *Action) GetPlatform() *Platform { + if m != nil { + return m.Platform + } + return nil +} + +func (m *Action) GetTimeout() *google_protobuf3.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *Action) GetDoNotCache() bool { + if m != nil { + return m.DoNotCache + } + return false +} + +// A `Command` is the actual command executed by a worker running an +// [Action][google.devtools.remoteexecution.v1test.Action]. +// +// Except as otherwise required, the environment (such as which system +// libraries or binaries are available, and what filesystems are mounted where) +// is defined by and specific to the implementation of the remote execution API. +type Command struct { + // The arguments to the command. The first argument must be the path to the + // executable, which must be either a relative path, in which case it is + // evaluated with respect to the input root, or an absolute path. The `PATH` + // environment variable, or similar functionality on other systems, is not + // used to determine which executable to run. + // + // The working directory will always be the input root. + Arguments []string `protobuf:"bytes,1,rep,name=arguments" json:"arguments,omitempty"` + // The environment variables to set when running the program. The worker may + // provide its own default environment variables; these defaults can be + // overridden using this field. Additional variables can also be specified. + // + // In order to ensure that equivalent `Command`s always hash to the same + // value, the environment variables MUST be lexicographically sorted by name. + // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. + EnvironmentVariables []*Command_EnvironmentVariable `protobuf:"bytes,2,rep,name=environment_variables,json=environmentVariables" json:"environment_variables,omitempty"` +} + +func (m *Command) Reset() { *m = Command{} } +func (m *Command) String() string { return proto.CompactTextString(m) } +func (*Command) ProtoMessage() {} +func (*Command) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Command) GetArguments() []string { + if m != nil { + return m.Arguments + } + return nil +} + +func (m *Command) GetEnvironmentVariables() []*Command_EnvironmentVariable { + if m != nil { + return m.EnvironmentVariables + } + return nil +} + +// An `EnvironmentVariable` is one variable to set in the running program's +// environment. +type Command_EnvironmentVariable struct { + // The variable name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The variable value. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Command_EnvironmentVariable) Reset() { *m = Command_EnvironmentVariable{} } +func (m *Command_EnvironmentVariable) String() string { return proto.CompactTextString(m) } +func (*Command_EnvironmentVariable) ProtoMessage() {} +func (*Command_EnvironmentVariable) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +func (m *Command_EnvironmentVariable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Command_EnvironmentVariable) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A `Platform` is a set of requirements, such as hardware, operation system, or +// compiler toolchain, for an +// [Action][google.devtools.remoteexecution.v1test.Action]'s execution +// environment. A `Platform` is represented as a series of key-value pairs +// representing the properties that are required of the platform. +// +// This message is currently being redeveloped since it is an overly simplistic +// model of platforms. +type Platform struct { + // The properties that make up this platform. In order to ensure that + // equivalent `Platform`s always hash to the same value, the properties MUST + // be lexicographically sorted by name, and then by value. Sorting of strings + // is done by code point, equivalently, by the UTF-8 bytes. + Properties []*Platform_Property `protobuf:"bytes,1,rep,name=properties" json:"properties,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (m *Platform) String() string { return proto.CompactTextString(m) } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Platform) GetProperties() []*Platform_Property { + if m != nil { + return m.Properties + } + return nil +} + +// A single property for the environment. The server is responsible for +// specifying the property `name`s that it accepts. If an unknown `name` is +// provided in the requirements for an +// [Action][google.devtools.remoteexecution.v1test.Action], the server SHOULD +// reject the execution request. If permitted by the server, the same `name` +// may occur multiple times. +// +// The server is also responsible for specifying the interpretation of +// property `value`s. For instance, a property describing how much RAM must be +// available may be interpreted as allowing a worker with 16GB to fulfill a +// request for 8GB, while a property describing the OS environment on which +// the action must be performed may require an exact match with the worker's +// OS. +// +// The server MAY use the `value` of one or more properties to determine how +// it sets up the execution environment, such as by making specific system +// files available to the worker. +type Platform_Property struct { + // The property name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The property value. + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Platform_Property) Reset() { *m = Platform_Property{} } +func (m *Platform_Property) String() string { return proto.CompactTextString(m) } +func (*Platform_Property) ProtoMessage() {} +func (*Platform_Property) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *Platform_Property) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Platform_Property) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A `Directory` represents a directory node in a file tree, containing zero or +// more children [FileNodes][google.devtools.remoteexecution.v1test.FileNode] and +// [DirectoryNodes][google.devtools.remoteexecution.v1test.DirectoryNode]. +// Each `Node` contains its name in the directory, the digest of its content +// (either a file blob or a `Directory` proto), as well as possibly some +// metadata about the file or directory. +// +// In order to ensure that two equivalent directory trees hash to the same +// value, the following restrictions MUST be obeyed when constructing a +// a `Directory`: +// - Every child in the directory must have a path of exactly one segment. +// Multiple levels of directory hierarchy may not be collapsed. +// - Each child in the directory must have a unique path segment (file name). +// - The files and directories in the directory must each be sorted in +// lexicographical order by path. The path strings must be sorted by code +// point, equivalently, by UTF-8 bytes. +// +// A `Directory` that obeys the restrictions is said to be in canonical form. +// +// As an example, the following could be used for a file named `bar` and a +// directory named `foo` with an executable file named `baz` (hashes shortened +// for readability): +// +// ```json +// // (Directory proto) +// { +// files: [ +// { +// name: "bar", +// digest: { +// hash: "4a73bc9d03...", +// size: 65534 +// } +// } +// ], +// directories: [ +// { +// name: "foo", +// digest: { +// hash: "4cf2eda940...", +// size: 43 +// } +// } +// ] +// } +// +// // (Directory proto with hash "4cf2eda940..." and size 43) +// { +// files: [ +// { +// name: "baz", +// digest: { +// hash: "b2c941073e...", +// size: 1294, +// }, +// is_executable: true +// } +// ] +// } +// ``` +type Directory struct { + // The files in the directory. + Files []*FileNode `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` + // The subdirectories in the directory. + Directories []*DirectoryNode `protobuf:"bytes,2,rep,name=directories" json:"directories,omitempty"` +} + +func (m *Directory) Reset() { *m = Directory{} } +func (m *Directory) String() string { return proto.CompactTextString(m) } +func (*Directory) ProtoMessage() {} +func (*Directory) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Directory) GetFiles() []*FileNode { + if m != nil { + return m.Files + } + return nil +} + +func (m *Directory) GetDirectories() []*DirectoryNode { + if m != nil { + return m.Directories + } + return nil +} + +// A `FileNode` represents a single file and associated metadata. +type FileNode struct { + // The name of the file. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The digest of the file's content. + Digest *Digest `protobuf:"bytes,2,opt,name=digest" json:"digest,omitempty"` + // True if file is executable, false otherwise. + IsExecutable bool `protobuf:"varint,4,opt,name=is_executable,json=isExecutable" json:"is_executable,omitempty"` +} + +func (m *FileNode) Reset() { *m = FileNode{} } +func (m *FileNode) String() string { return proto.CompactTextString(m) } +func (*FileNode) ProtoMessage() {} +func (*FileNode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FileNode) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FileNode) GetDigest() *Digest { + if m != nil { + return m.Digest + } + return nil +} + +func (m *FileNode) GetIsExecutable() bool { + if m != nil { + return m.IsExecutable + } + return false +} + +// A `DirectoryNode` represents a child of a +// [Directory][google.devtools.remoteexecution.v1test.Directory] which is itself +// a `Directory` and its associated metadata. +type DirectoryNode struct { + // The name of the directory. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The digest of the [Directory][google.devtools.remoteexecution.v1test.Directory] + // object represented. See [Digest][google.devtools.remoteexecution.v1test.Digest] + // for information about how to take the digest of a proto message. + Digest *Digest `protobuf:"bytes,2,opt,name=digest" json:"digest,omitempty"` +} + +func (m *DirectoryNode) Reset() { *m = DirectoryNode{} } +func (m *DirectoryNode) String() string { return proto.CompactTextString(m) } +func (*DirectoryNode) ProtoMessage() {} +func (*DirectoryNode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DirectoryNode) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DirectoryNode) GetDigest() *Digest { + if m != nil { + return m.Digest + } + return nil +} + +// A content digest. A digest for a given blob consists of the size of the blob +// and its hash. The hash algorithm to use is defined by the server, but servers +// SHOULD use SHA-256. +// +// The size is considered to be an integral part of the digest and cannot be +// separated. That is, even if the `hash` field is correctly specified but +// `size_bytes` is not, the server MUST reject the request. +// +// The reason for including the size in the digest is as follows: in a great +// many cases, the server needs to know the size of the blob it is about to work +// with prior to starting an operation with it, such as flattening Merkle tree +// structures or streaming it to a worker. Technically, the server could +// implement a separate metadata store, but this results in a significantly more +// complicated implementation as opposed to having the client specify the size +// up-front (or storing the size along with the digest in every message where +// digests are embedded). This does mean that the API leaks some implementation +// details of (what we consider to be) a reasonable server implementation, but +// we consider this to be a worthwhile tradeoff. +// +// When a `Digest` is used to refer to a proto message, it always refers to the +// message in binary encoded form. To ensure consistent hashing, clients and +// servers MUST ensure that they serialize messages according to the following +// rules, even if there are alternate valid encodings for the same message. +// - Fields are serialized in tag order. +// - There are no unknown fields. +// - There are no duplicate fields. +// - Fields are serialized according to the default semantics for their type. +// +// Most protocol buffer implementations will always follow these rules when +// serializing, but care should be taken to avoid shortcuts. For instance, +// concatenating two messages to merge them may produce duplicate fields. +type Digest struct { + // The hash. In the case of SHA-256, it will always be a lowercase hex string + // exactly 64 characters long. + Hash string `protobuf:"bytes,1,opt,name=hash" json:"hash,omitempty"` + // The size of the blob, in bytes. + SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes" json:"size_bytes,omitempty"` +} + +func (m *Digest) Reset() { *m = Digest{} } +func (m *Digest) String() string { return proto.CompactTextString(m) } +func (*Digest) ProtoMessage() {} +func (*Digest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Digest) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +func (m *Digest) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +// An ActionResult represents the result of an +// [Action][google.devtools.remoteexecution.v1test.Action] being run. +type ActionResult struct { + // The output files of the action. For each output file requested, if the + // corresponding file existed after the action completed, a single entry will + // be present in the output list. + // + // If the action does not produce the requested output, or produces a + // directory where a regular file is expected or vice versa, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + OutputFiles []*OutputFile `protobuf:"bytes,2,rep,name=output_files,json=outputFiles" json:"output_files,omitempty"` + // The output directories of the action. For each output directory requested, + // if the corresponding directory existed after the action completed, a single + // entry will be present in the output list. The client can retrieve the full + // [Directory][google.devtools.remoteexecution.v1test.Directory] structure + // using [ContentAddressableStorage.GetTree][google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree]. + // + // If the action does not produce the requested output, or produces a + // directory where a regular file is expected or vice versa, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + OutputDirectories []*OutputDirectory `protobuf:"bytes,3,rep,name=output_directories,json=outputDirectories" json:"output_directories,omitempty"` + // The exit code of the command. + ExitCode int32 `protobuf:"varint,4,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"` + // The standard output buffer of the action. The server will determine, based + // on the size of the buffer, whether to return it in raw form or to return + // a digest in `stdout_digest` that points to the buffer. If neither is set, + // then the buffer is empty. The client SHOULD NOT assume it will get one of + // the raw buffer or a digest on any given request and should be prepared to + // handle either. + StdoutRaw []byte `protobuf:"bytes,5,opt,name=stdout_raw,json=stdoutRaw,proto3" json:"stdout_raw,omitempty"` + // The digest for a blob containing the standard output of the action, which + // can be retrieved from the + // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + // See `stdout_raw` for when this will be set. + StdoutDigest *Digest `protobuf:"bytes,6,opt,name=stdout_digest,json=stdoutDigest" json:"stdout_digest,omitempty"` + // The standard error buffer of the action. The server will determine, based + // on the size of the buffer, whether to return it in raw form or to return + // a digest in `stderr_digest` that points to the buffer. If neither is set, + // then the buffer is empty. The client SHOULD NOT assume it will get one of + // the raw buffer or a digest on any given request and should be prepared to + // handle either. + StderrRaw []byte `protobuf:"bytes,7,opt,name=stderr_raw,json=stderrRaw,proto3" json:"stderr_raw,omitempty"` + // The digest for a blob containing the standard error of the action, which + // can be retrieved from the + // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + // See `stderr_raw` for when this will be set. + StderrDigest *Digest `protobuf:"bytes,8,opt,name=stderr_digest,json=stderrDigest" json:"stderr_digest,omitempty"` +} + +func (m *ActionResult) Reset() { *m = ActionResult{} } +func (m *ActionResult) String() string { return proto.CompactTextString(m) } +func (*ActionResult) ProtoMessage() {} +func (*ActionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ActionResult) GetOutputFiles() []*OutputFile { + if m != nil { + return m.OutputFiles + } + return nil +} + +func (m *ActionResult) GetOutputDirectories() []*OutputDirectory { + if m != nil { + return m.OutputDirectories + } + return nil +} + +func (m *ActionResult) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ActionResult) GetStdoutRaw() []byte { + if m != nil { + return m.StdoutRaw + } + return nil +} + +func (m *ActionResult) GetStdoutDigest() *Digest { + if m != nil { + return m.StdoutDigest + } + return nil +} + +func (m *ActionResult) GetStderrRaw() []byte { + if m != nil { + return m.StderrRaw + } + return nil +} + +func (m *ActionResult) GetStderrDigest() *Digest { + if m != nil { + return m.StderrDigest + } + return nil +} + +// An `OutputFile` is similar to a +// [FileNode][google.devtools.remoteexecution.v1test.FileNode], but it is +// tailored for output as part of an `ActionResult`. It allows a full file path +// rather than only a name, and allows the server to include content inline. +// +// `OutputFile` is binary-compatible with `FileNode`. +type OutputFile struct { + // The full path of the file relative to the input root, including the + // filename. The path separator is a forward slash `/`. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // The digest of the file's content. + Digest *Digest `protobuf:"bytes,2,opt,name=digest" json:"digest,omitempty"` + // The raw content of the file. + // + // This field may be used by the server to provide the content of a file + // inline in an [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] + // and avoid requiring that the client make a separate call to + // [ContentAddressableStorage.GetBlob] to retrieve it. + // + // The client SHOULD NOT assume that it will get raw content with any request, + // and always be prepared to retrieve it via `digest`. + Content []byte `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` + // True if file is executable, false otherwise. + IsExecutable bool `protobuf:"varint,4,opt,name=is_executable,json=isExecutable" json:"is_executable,omitempty"` +} + +func (m *OutputFile) Reset() { *m = OutputFile{} } +func (m *OutputFile) String() string { return proto.CompactTextString(m) } +func (*OutputFile) ProtoMessage() {} +func (*OutputFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *OutputFile) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *OutputFile) GetDigest() *Digest { + if m != nil { + return m.Digest + } + return nil +} + +func (m *OutputFile) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *OutputFile) GetIsExecutable() bool { + if m != nil { + return m.IsExecutable + } + return false +} + +// An `OutputDirectory` is similar to a +// [DirectoryNode][google.devtools.remoteexecution.v1test.DirectoryNode], +// but it is tailored for output as part of an `ActionResult`. It allows a full +// file path rather than only a name. It contains the digest of a +// [Directory][google.devtools.remoteexecution.v1test.Directory] which will meet +// all the usual requirements for a `Directory`. +// +// `OutputDirectory` is binary-compatible with `DirectoryNode`. +type OutputDirectory struct { + // The full path of the directory relative to the input root, including the + // filename. The path separator is a forward slash `/`. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // The digest of the [Directory][google.devtools.remoteexecution.v1test.Directory] + // proto describing the directory's contents. + Digest *Digest `protobuf:"bytes,2,opt,name=digest" json:"digest,omitempty"` +} + +func (m *OutputDirectory) Reset() { *m = OutputDirectory{} } +func (m *OutputDirectory) String() string { return proto.CompactTextString(m) } +func (*OutputDirectory) ProtoMessage() {} +func (*OutputDirectory) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *OutputDirectory) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *OutputDirectory) GetDigest() *Digest { + if m != nil { + return m.Digest + } + return nil +} + +// A request message for +// [Execution.Execute][google.devtools.remoteexecution.v1test.Execution.Execute]. +type ExecuteRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The action to be performed. + Action *Action `protobuf:"bytes,2,opt,name=action" json:"action,omitempty"` + // If true, the action will be executed anew even if its result was already + // present in the cache. If false, the result may be served from the + // [ActionCache][google.devtools.remoteexecution.v1test.ActionCache]. + SkipCacheLookup bool `protobuf:"varint,3,opt,name=skip_cache_lookup,json=skipCacheLookup" json:"skip_cache_lookup,omitempty"` + // The total count of input files, not counting directories. This must be + // provided so that the server can do resource allocation and, on servers with + // quotas, quota checking. It is also used as a safety check: servers MUST + // return an error if the total number of input files described in the + // `action` is different. + TotalInputFileCount int32 `protobuf:"varint,4,opt,name=total_input_file_count,json=totalInputFileCount" json:"total_input_file_count,omitempty"` + // The total size of input file content, provided as a hint and check. This + // must be provided so that the server can do resource allocation and, on + // servers with quotas, quota checking. It is also used as a safety check: + // servers MUST return an error if the total size of input files described in + // the `action` is different. + TotalInputFileBytes int64 `protobuf:"varint,5,opt,name=total_input_file_bytes,json=totalInputFileBytes" json:"total_input_file_bytes,omitempty"` +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteRequest) ProtoMessage() {} +func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExecuteRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *ExecuteRequest) GetAction() *Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *ExecuteRequest) GetSkipCacheLookup() bool { + if m != nil { + return m.SkipCacheLookup + } + return false +} + +func (m *ExecuteRequest) GetTotalInputFileCount() int32 { + if m != nil { + return m.TotalInputFileCount + } + return 0 +} + +func (m *ExecuteRequest) GetTotalInputFileBytes() int64 { + if m != nil { + return m.TotalInputFileBytes + } + return 0 +} + +// The response message for +// [Execution.Execute][google.devtools.remoteexecution.v1test.Execution.Execute], +// which will be contained in the [response field][google.longrunning.Operation.response] of the +// [Operation][google.longrunning.Operation]. +type ExecuteResponse struct { + // The result of the action. + Result *ActionResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` + // True if the result was served from cache, false if it was executed. + CachedResult bool `protobuf:"varint,2,opt,name=cached_result,json=cachedResult" json:"cached_result,omitempty"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteResponse) ProtoMessage() {} +func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ExecuteResponse) GetResult() *ActionResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *ExecuteResponse) GetCachedResult() bool { + if m != nil { + return m.CachedResult + } + return false +} + +// Metadata about an ongoing [execution][google.devtools.remoteexecution.v1test.Execution.Execute], +// which will be +// contained in the [metadata field][google.longrunning.Operation.response] of +// the [Operation][google.longrunning.Operation]. +type ExecuteOperationMetadata struct { + Stage ExecuteOperationMetadata_Stage `protobuf:"varint,1,opt,name=stage,enum=google.devtools.remoteexecution.v1test.ExecuteOperationMetadata_Stage" json:"stage,omitempty"` + // The digest of the [Action][google.devtools.remoteexecution.v1test.Action] + // being executed. + ActionDigest *Digest `protobuf:"bytes,2,opt,name=action_digest,json=actionDigest" json:"action_digest,omitempty"` + // If set, the client can use this name with + // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the + // standard output. + StdoutStreamName string `protobuf:"bytes,3,opt,name=stdout_stream_name,json=stdoutStreamName" json:"stdout_stream_name,omitempty"` + // If set, the client can use this name with + // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the + // standard error. + StderrStreamName string `protobuf:"bytes,4,opt,name=stderr_stream_name,json=stderrStreamName" json:"stderr_stream_name,omitempty"` +} + +func (m *ExecuteOperationMetadata) Reset() { *m = ExecuteOperationMetadata{} } +func (m *ExecuteOperationMetadata) String() string { return proto.CompactTextString(m) } +func (*ExecuteOperationMetadata) ProtoMessage() {} +func (*ExecuteOperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ExecuteOperationMetadata) GetStage() ExecuteOperationMetadata_Stage { + if m != nil { + return m.Stage + } + return ExecuteOperationMetadata_UNKNOWN +} + +func (m *ExecuteOperationMetadata) GetActionDigest() *Digest { + if m != nil { + return m.ActionDigest + } + return nil +} + +func (m *ExecuteOperationMetadata) GetStdoutStreamName() string { + if m != nil { + return m.StdoutStreamName + } + return "" +} + +func (m *ExecuteOperationMetadata) GetStderrStreamName() string { + if m != nil { + return m.StderrStreamName + } + return "" +} + +// A request message for +// [ActionCache.GetActionResult][google.devtools.remoteexecution.v1test.ActionCache.GetActionResult]. +type GetActionResultRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The digest of the [Action][google.devtools.remoteexecution.v1test.Action] + // whose result is requested. + ActionDigest *Digest `protobuf:"bytes,2,opt,name=action_digest,json=actionDigest" json:"action_digest,omitempty"` +} + +func (m *GetActionResultRequest) Reset() { *m = GetActionResultRequest{} } +func (m *GetActionResultRequest) String() string { return proto.CompactTextString(m) } +func (*GetActionResultRequest) ProtoMessage() {} +func (*GetActionResultRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *GetActionResultRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *GetActionResultRequest) GetActionDigest() *Digest { + if m != nil { + return m.ActionDigest + } + return nil +} + +// A request message for +// [ActionCache.UpdateActionResult][google.devtools.remoteexecution.v1test.ActionCache.UpdateActionResult]. +type UpdateActionResultRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The digest of the [Action][google.devtools.remoteexecution.v1test.Action] + // whose result is being uploaded. + ActionDigest *Digest `protobuf:"bytes,2,opt,name=action_digest,json=actionDigest" json:"action_digest,omitempty"` + // The [ActionResult][google.devtools.remoteexecution.v1test.ActionResult] + // to store in the cache. + ActionResult *ActionResult `protobuf:"bytes,3,opt,name=action_result,json=actionResult" json:"action_result,omitempty"` +} + +func (m *UpdateActionResultRequest) Reset() { *m = UpdateActionResultRequest{} } +func (m *UpdateActionResultRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateActionResultRequest) ProtoMessage() {} +func (*UpdateActionResultRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *UpdateActionResultRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *UpdateActionResultRequest) GetActionDigest() *Digest { + if m != nil { + return m.ActionDigest + } + return nil +} + +func (m *UpdateActionResultRequest) GetActionResult() *ActionResult { + if m != nil { + return m.ActionResult + } + return nil +} + +// A request message for +// [ContentAddressableStorage.FindMissingBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.FindMissingBlobs]. +type FindMissingBlobsRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // A list of the blobs to check. + BlobDigests []*Digest `protobuf:"bytes,2,rep,name=blob_digests,json=blobDigests" json:"blob_digests,omitempty"` +} + +func (m *FindMissingBlobsRequest) Reset() { *m = FindMissingBlobsRequest{} } +func (m *FindMissingBlobsRequest) String() string { return proto.CompactTextString(m) } +func (*FindMissingBlobsRequest) ProtoMessage() {} +func (*FindMissingBlobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *FindMissingBlobsRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *FindMissingBlobsRequest) GetBlobDigests() []*Digest { + if m != nil { + return m.BlobDigests + } + return nil +} + +// A response message for +// [ContentAddressableStorage.FindMissingBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.FindMissingBlobs]. +type FindMissingBlobsResponse struct { + // A list of the blobs requested *not* present in the storage. + MissingBlobDigests []*Digest `protobuf:"bytes,2,rep,name=missing_blob_digests,json=missingBlobDigests" json:"missing_blob_digests,omitempty"` +} + +func (m *FindMissingBlobsResponse) Reset() { *m = FindMissingBlobsResponse{} } +func (m *FindMissingBlobsResponse) String() string { return proto.CompactTextString(m) } +func (*FindMissingBlobsResponse) ProtoMessage() {} +func (*FindMissingBlobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *FindMissingBlobsResponse) GetMissingBlobDigests() []*Digest { + if m != nil { + return m.MissingBlobDigests + } + return nil +} + +// A single request message for +// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]. +type UpdateBlobRequest struct { + // The digest of the blob. This MUST be the digest of `data`. + ContentDigest *Digest `protobuf:"bytes,1,opt,name=content_digest,json=contentDigest" json:"content_digest,omitempty"` + // The raw binary data. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *UpdateBlobRequest) Reset() { *m = UpdateBlobRequest{} } +func (m *UpdateBlobRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateBlobRequest) ProtoMessage() {} +func (*UpdateBlobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *UpdateBlobRequest) GetContentDigest() *Digest { + if m != nil { + return m.ContentDigest + } + return nil +} + +func (m *UpdateBlobRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A request message for +// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]. +type BatchUpdateBlobsRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The individual upload requests. + Requests []*UpdateBlobRequest `protobuf:"bytes,2,rep,name=requests" json:"requests,omitempty"` +} + +func (m *BatchUpdateBlobsRequest) Reset() { *m = BatchUpdateBlobsRequest{} } +func (m *BatchUpdateBlobsRequest) String() string { return proto.CompactTextString(m) } +func (*BatchUpdateBlobsRequest) ProtoMessage() {} +func (*BatchUpdateBlobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *BatchUpdateBlobsRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *BatchUpdateBlobsRequest) GetRequests() []*UpdateBlobRequest { + if m != nil { + return m.Requests + } + return nil +} + +// A response message for +// [ContentAddressableStorage.BatchUpdateBlobs][google.devtools.remoteexecution.v1test.ContentAddressableStorage.BatchUpdateBlobs]. +type BatchUpdateBlobsResponse struct { + // The responses to the requests. + Responses []*BatchUpdateBlobsResponse_Response `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"` +} + +func (m *BatchUpdateBlobsResponse) Reset() { *m = BatchUpdateBlobsResponse{} } +func (m *BatchUpdateBlobsResponse) String() string { return proto.CompactTextString(m) } +func (*BatchUpdateBlobsResponse) ProtoMessage() {} +func (*BatchUpdateBlobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *BatchUpdateBlobsResponse) GetResponses() []*BatchUpdateBlobsResponse_Response { + if m != nil { + return m.Responses + } + return nil +} + +// A response corresponding to a single blob that the client tried to upload. +type BatchUpdateBlobsResponse_Response struct { + // The digest to which this response corresponds. + BlobDigest *Digest `protobuf:"bytes,1,opt,name=blob_digest,json=blobDigest" json:"blob_digest,omitempty"` + // The result of attempting to upload that blob. + Status *google_rpc.Status `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *BatchUpdateBlobsResponse_Response) Reset() { *m = BatchUpdateBlobsResponse_Response{} } +func (m *BatchUpdateBlobsResponse_Response) String() string { return proto.CompactTextString(m) } +func (*BatchUpdateBlobsResponse_Response) ProtoMessage() {} +func (*BatchUpdateBlobsResponse_Response) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19, 0} +} + +func (m *BatchUpdateBlobsResponse_Response) GetBlobDigest() *Digest { + if m != nil { + return m.BlobDigest + } + return nil +} + +func (m *BatchUpdateBlobsResponse_Response) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +// A request message for +// [ContentAddressableStorage.GetTree][google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree]. +type GetTreeRequest struct { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The digest of the root, which must be an encoded + // [Directory][google.devtools.remoteexecution.v1test.Directory] message + // stored in the [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + RootDigest *Digest `protobuf:"bytes,2,opt,name=root_digest,json=rootDigest" json:"root_digest,omitempty"` + // A maximum page size to request. If present, the server will request no more + // than this many items. Regardless of whether a page size is specified, the + // server may place its own limit on the number of items to be returned and + // require the client to retrieve more items using a subsequent request. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A page token, which must be a value received in a previous + // [GetTreeResponse][google.devtools.remoteexecution.v1test.GetTreeResponse]. + // If present, the server will use it to return the following page of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *GetTreeRequest) Reset() { *m = GetTreeRequest{} } +func (m *GetTreeRequest) String() string { return proto.CompactTextString(m) } +func (*GetTreeRequest) ProtoMessage() {} +func (*GetTreeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GetTreeRequest) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *GetTreeRequest) GetRootDigest() *Digest { + if m != nil { + return m.RootDigest + } + return nil +} + +func (m *GetTreeRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *GetTreeRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// A response message for +// [ContentAddressableStorage.GetTree][google.devtools.remoteexecution.v1test.ContentAddressableStorage.GetTree]. +type GetTreeResponse struct { + // The directories descended from the requested root. + Directories []*Directory `protobuf:"bytes,1,rep,name=directories" json:"directories,omitempty"` + // If present, signifies that there are more results which the client can + // retrieve by passing this as the page_token in a subsequent + // [request][google.devtools.remoteexecution.v1test.GetTreeRequest]. + // If empty, signifies that this is the last page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *GetTreeResponse) Reset() { *m = GetTreeResponse{} } +func (m *GetTreeResponse) String() string { return proto.CompactTextString(m) } +func (*GetTreeResponse) ProtoMessage() {} +func (*GetTreeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *GetTreeResponse) GetDirectories() []*Directory { + if m != nil { + return m.Directories + } + return nil +} + +func (m *GetTreeResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Details for the tool used to call the API. +type ToolDetails struct { + // Name of the tool, e.g. bazel. + ToolName string `protobuf:"bytes,1,opt,name=tool_name,json=toolName" json:"tool_name,omitempty"` + // Version of the tool used for the request, e.g. 5.0.3. + ToolVersion string `protobuf:"bytes,2,opt,name=tool_version,json=toolVersion" json:"tool_version,omitempty"` +} + +func (m *ToolDetails) Reset() { *m = ToolDetails{} } +func (m *ToolDetails) String() string { return proto.CompactTextString(m) } +func (*ToolDetails) ProtoMessage() {} +func (*ToolDetails) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *ToolDetails) GetToolName() string { + if m != nil { + return m.ToolName + } + return "" +} + +func (m *ToolDetails) GetToolVersion() string { + if m != nil { + return m.ToolVersion + } + return "" +} + +// An optional Metadata to attach to a RPC request to tell the server about an +// external context of the request. +// If used, the header name should be remote-request-metadata-bin and the +// contents the base64 encoded binary RequestMetadata message. +type RequestMetadata struct { + // The details for the tool invoking the requests. + ToolDetails *ToolDetails `protobuf:"bytes,1,opt,name=tool_details,json=toolDetails" json:"tool_details,omitempty"` + // An identifier that ties multiple requests to the same action. + // For example, multiple requests to the CAS, Action Cache, and Execution + // API are used in order to compile foo.cc. + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId" json:"action_id,omitempty"` + // An identifier that ties multiple actions together to a final result. + // For example, multiple actions are required to build and run foo_test. + ToolInvocationId string `protobuf:"bytes,3,opt,name=tool_invocation_id,json=toolInvocationId" json:"tool_invocation_id,omitempty"` + // An identifier to tie multiple tool invocations together. For example, + // runs of foo_test, bar_test and baz_test on a post-submit of a given patch. + CorrelatedInvocationsId string `protobuf:"bytes,4,opt,name=correlated_invocations_id,json=correlatedInvocationsId" json:"correlated_invocations_id,omitempty"` +} + +func (m *RequestMetadata) Reset() { *m = RequestMetadata{} } +func (m *RequestMetadata) String() string { return proto.CompactTextString(m) } +func (*RequestMetadata) ProtoMessage() {} +func (*RequestMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *RequestMetadata) GetToolDetails() *ToolDetails { + if m != nil { + return m.ToolDetails + } + return nil +} + +func (m *RequestMetadata) GetActionId() string { + if m != nil { + return m.ActionId + } + return "" +} + +func (m *RequestMetadata) GetToolInvocationId() string { + if m != nil { + return m.ToolInvocationId + } + return "" +} + +func (m *RequestMetadata) GetCorrelatedInvocationsId() string { + if m != nil { + return m.CorrelatedInvocationsId + } + return "" +} + +func init() { + proto.RegisterType((*Action)(nil), "google.devtools.remoteexecution.v1test.Action") + proto.RegisterType((*Command)(nil), "google.devtools.remoteexecution.v1test.Command") + proto.RegisterType((*Command_EnvironmentVariable)(nil), "google.devtools.remoteexecution.v1test.Command.EnvironmentVariable") + proto.RegisterType((*Platform)(nil), "google.devtools.remoteexecution.v1test.Platform") + proto.RegisterType((*Platform_Property)(nil), "google.devtools.remoteexecution.v1test.Platform.Property") + proto.RegisterType((*Directory)(nil), "google.devtools.remoteexecution.v1test.Directory") + proto.RegisterType((*FileNode)(nil), "google.devtools.remoteexecution.v1test.FileNode") + proto.RegisterType((*DirectoryNode)(nil), "google.devtools.remoteexecution.v1test.DirectoryNode") + proto.RegisterType((*Digest)(nil), "google.devtools.remoteexecution.v1test.Digest") + proto.RegisterType((*ActionResult)(nil), "google.devtools.remoteexecution.v1test.ActionResult") + proto.RegisterType((*OutputFile)(nil), "google.devtools.remoteexecution.v1test.OutputFile") + proto.RegisterType((*OutputDirectory)(nil), "google.devtools.remoteexecution.v1test.OutputDirectory") + proto.RegisterType((*ExecuteRequest)(nil), "google.devtools.remoteexecution.v1test.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "google.devtools.remoteexecution.v1test.ExecuteResponse") + proto.RegisterType((*ExecuteOperationMetadata)(nil), "google.devtools.remoteexecution.v1test.ExecuteOperationMetadata") + proto.RegisterType((*GetActionResultRequest)(nil), "google.devtools.remoteexecution.v1test.GetActionResultRequest") + proto.RegisterType((*UpdateActionResultRequest)(nil), "google.devtools.remoteexecution.v1test.UpdateActionResultRequest") + proto.RegisterType((*FindMissingBlobsRequest)(nil), "google.devtools.remoteexecution.v1test.FindMissingBlobsRequest") + proto.RegisterType((*FindMissingBlobsResponse)(nil), "google.devtools.remoteexecution.v1test.FindMissingBlobsResponse") + proto.RegisterType((*UpdateBlobRequest)(nil), "google.devtools.remoteexecution.v1test.UpdateBlobRequest") + proto.RegisterType((*BatchUpdateBlobsRequest)(nil), "google.devtools.remoteexecution.v1test.BatchUpdateBlobsRequest") + proto.RegisterType((*BatchUpdateBlobsResponse)(nil), "google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse") + proto.RegisterType((*BatchUpdateBlobsResponse_Response)(nil), "google.devtools.remoteexecution.v1test.BatchUpdateBlobsResponse.Response") + proto.RegisterType((*GetTreeRequest)(nil), "google.devtools.remoteexecution.v1test.GetTreeRequest") + proto.RegisterType((*GetTreeResponse)(nil), "google.devtools.remoteexecution.v1test.GetTreeResponse") + proto.RegisterType((*ToolDetails)(nil), "google.devtools.remoteexecution.v1test.ToolDetails") + proto.RegisterType((*RequestMetadata)(nil), "google.devtools.remoteexecution.v1test.RequestMetadata") + proto.RegisterEnum("google.devtools.remoteexecution.v1test.ExecuteOperationMetadata_Stage", ExecuteOperationMetadata_Stage_name, ExecuteOperationMetadata_Stage_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Execution service + +type ExecutionClient interface { + // Execute an action remotely. + // + // In order to execute an action, the client must first upload all of the + // inputs, as well as the [Command][google.devtools.remoteexecution.v1test.Command] + // to run, into the [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + // It then calls `Execute` with an [Action][google.devtools.remoteexecution.v1test.Action] + // referring to them. The server will run the action and eventually return the + // result. + // + // The input `Action`'s fields MUST meet the various canonicalization + // requirements specified in the documentation for their types so that it has + // the same digest as other logically equivalent `Action`s. The server MAY + // enforce the requirements and return errors if a non-canonical input is + // received. It MAY also proceed without verifying some or all of the + // requirements, such as for performance reasons. If the server does not + // verify the requirement, then it will treat the `Action` as distinct from + // another logically equivalent action if they hash differently. + // + // Returns a [google.longrunning.Operation][google.longrunning.Operation] + // describing the resulting execution, with eventual `response` + // [ExecuteResponse][google.devtools.remoteexecution.v1test.ExecuteResponse]. + // The `metadata` on the operation is of type + // [ExecuteOperationMetadata][google.devtools.remoteexecution.v1test.ExecuteOperationMetadata]. + // + // To query the operation, you can use the + // [Operations API][google.longrunning.Operations.GetOperation]. If you wish + // to allow the server to stream operations updates, rather than requiring + // client polling, you can use the + // [Watcher API][google.watcher.v1.Watcher.Watch] with the Operation's `name` + // as the `target`. + // + // When using the Watcher API, the initial `data` will be the `Operation` at + // the time of the request. Updates will be provided periodically by the + // server until the `Operation` completes, at which point the response message + // will (assuming no error) be at `data.response`. + // + // The server NEED NOT implement other methods or functionality of the + // Operation and Watcher APIs. + // + // Errors discovered during creation of the `Operation` will be reported + // as gRPC Status errors, while errors that occurred while running the + // `Operation` will be reported in the `Operation` error field. + // The possible errors include: + // * `INVALID_ARGUMENT`: One or more arguments are invalid. + // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the + // action requested, such as a missing input or no worker being available. + // The client may be able to fix the errors and retry. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run + // the action. + // * `UNAVAILABLE`: Due to a transient condition, such as all workers being + // occupied (and the server does not support a queue), the action could not + // be started. The client should retry. + // * `INTERNAL`: An internal error occurred in the execution engine or the + // worker. + // * `DEADLINE_EXCEEDED`: The execution timed out. + Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type executionClient struct { + cc *grpc.ClientConn +} + +func NewExecutionClient(cc *grpc.ClientConn) ExecutionClient { + return &executionClient{cc} +} + +func (c *executionClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.Execution/Execute", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Execution service + +type ExecutionServer interface { + // Execute an action remotely. + // + // In order to execute an action, the client must first upload all of the + // inputs, as well as the [Command][google.devtools.remoteexecution.v1test.Command] + // to run, into the [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage]. + // It then calls `Execute` with an [Action][google.devtools.remoteexecution.v1test.Action] + // referring to them. The server will run the action and eventually return the + // result. + // + // The input `Action`'s fields MUST meet the various canonicalization + // requirements specified in the documentation for their types so that it has + // the same digest as other logically equivalent `Action`s. The server MAY + // enforce the requirements and return errors if a non-canonical input is + // received. It MAY also proceed without verifying some or all of the + // requirements, such as for performance reasons. If the server does not + // verify the requirement, then it will treat the `Action` as distinct from + // another logically equivalent action if they hash differently. + // + // Returns a [google.longrunning.Operation][google.longrunning.Operation] + // describing the resulting execution, with eventual `response` + // [ExecuteResponse][google.devtools.remoteexecution.v1test.ExecuteResponse]. + // The `metadata` on the operation is of type + // [ExecuteOperationMetadata][google.devtools.remoteexecution.v1test.ExecuteOperationMetadata]. + // + // To query the operation, you can use the + // [Operations API][google.longrunning.Operations.GetOperation]. If you wish + // to allow the server to stream operations updates, rather than requiring + // client polling, you can use the + // [Watcher API][google.watcher.v1.Watcher.Watch] with the Operation's `name` + // as the `target`. + // + // When using the Watcher API, the initial `data` will be the `Operation` at + // the time of the request. Updates will be provided periodically by the + // server until the `Operation` completes, at which point the response message + // will (assuming no error) be at `data.response`. + // + // The server NEED NOT implement other methods or functionality of the + // Operation and Watcher APIs. + // + // Errors discovered during creation of the `Operation` will be reported + // as gRPC Status errors, while errors that occurred while running the + // `Operation` will be reported in the `Operation` error field. + // The possible errors include: + // * `INVALID_ARGUMENT`: One or more arguments are invalid. + // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the + // action requested, such as a missing input or no worker being available. + // The client may be able to fix the errors and retry. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run + // the action. + // * `UNAVAILABLE`: Due to a transient condition, such as all workers being + // occupied (and the server does not support a queue), the action could not + // be started. The client should retry. + // * `INTERNAL`: An internal error occurred in the execution engine or the + // worker. + // * `DEADLINE_EXCEEDED`: The execution timed out. + Execute(context.Context, *ExecuteRequest) (*google_longrunning.Operation, error) +} + +func RegisterExecutionServer(s *grpc.Server, srv ExecutionServer) { + s.RegisterService(&_Execution_serviceDesc, srv) +} + +func _Execution_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExecutionServer).Execute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.Execution/Execute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExecutionServer).Execute(ctx, req.(*ExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Execution_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.remoteexecution.v1test.Execution", + HandlerType: (*ExecutionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Execute", + Handler: _Execution_Execute_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/remoteexecution/v1test/remote_execution.proto", +} + +// Client API for ActionCache service + +type ActionCacheClient interface { + // Retrieve a cached execution result. + // + // Errors: + // * `NOT_FOUND`: The requested `ActionResult` is not in the cache. + GetActionResult(ctx context.Context, in *GetActionResultRequest, opts ...grpc.CallOption) (*ActionResult, error) + // Upload a new execution result. + // + // This method is intended for servers which implement the distributed cache + // independently of the [Execution][google.devtools.remoteexecution.v1test.Execution] + // API. As a result, it is OPTIONAL for servers to implement. + // + // Errors: + // * `NOT_IMPLEMENTED`: This method is not supported by the server. + // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the + // entry to the cache. + UpdateActionResult(ctx context.Context, in *UpdateActionResultRequest, opts ...grpc.CallOption) (*ActionResult, error) +} + +type actionCacheClient struct { + cc *grpc.ClientConn +} + +func NewActionCacheClient(cc *grpc.ClientConn) ActionCacheClient { + return &actionCacheClient{cc} +} + +func (c *actionCacheClient) GetActionResult(ctx context.Context, in *GetActionResultRequest, opts ...grpc.CallOption) (*ActionResult, error) { + out := new(ActionResult) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.ActionCache/GetActionResult", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *actionCacheClient) UpdateActionResult(ctx context.Context, in *UpdateActionResultRequest, opts ...grpc.CallOption) (*ActionResult, error) { + out := new(ActionResult) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.ActionCache/UpdateActionResult", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ActionCache service + +type ActionCacheServer interface { + // Retrieve a cached execution result. + // + // Errors: + // * `NOT_FOUND`: The requested `ActionResult` is not in the cache. + GetActionResult(context.Context, *GetActionResultRequest) (*ActionResult, error) + // Upload a new execution result. + // + // This method is intended for servers which implement the distributed cache + // independently of the [Execution][google.devtools.remoteexecution.v1test.Execution] + // API. As a result, it is OPTIONAL for servers to implement. + // + // Errors: + // * `NOT_IMPLEMENTED`: This method is not supported by the server. + // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the + // entry to the cache. + UpdateActionResult(context.Context, *UpdateActionResultRequest) (*ActionResult, error) +} + +func RegisterActionCacheServer(s *grpc.Server, srv ActionCacheServer) { + s.RegisterService(&_ActionCache_serviceDesc, srv) +} + +func _ActionCache_GetActionResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetActionResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActionCacheServer).GetActionResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.ActionCache/GetActionResult", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActionCacheServer).GetActionResult(ctx, req.(*GetActionResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActionCache_UpdateActionResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateActionResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActionCacheServer).UpdateActionResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.ActionCache/UpdateActionResult", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActionCacheServer).UpdateActionResult(ctx, req.(*UpdateActionResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ActionCache_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.remoteexecution.v1test.ActionCache", + HandlerType: (*ActionCacheServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetActionResult", + Handler: _ActionCache_GetActionResult_Handler, + }, + { + MethodName: "UpdateActionResult", + Handler: _ActionCache_UpdateActionResult_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/remoteexecution/v1test/remote_execution.proto", +} + +// Client API for ContentAddressableStorage service + +type ContentAddressableStorageClient interface { + // Determine if blobs are present in the CAS. + // + // Clients can use this API before uploading blobs to determine which ones are + // already present in the CAS and do not need to be uploaded again. + // + // There are no method-specific errors. + FindMissingBlobs(ctx context.Context, in *FindMissingBlobsRequest, opts ...grpc.CallOption) (*FindMissingBlobsResponse, error) + // Upload many blobs at once. + // + // The client MUST NOT upload blobs with a combined total size of more than 10 + // MiB using this API. Such requests should either be split into smaller + // chunks or uploaded using the + // [ByteStream API][google.bytestream.ByteStream], as appropriate. + // + // This request is equivalent to calling [UpdateBlob][] on each individual + // blob, in parallel. The requests may succeed or fail independently. + // + // Errors: + // * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of + // data. + // + // Individual requests may return the following errors, additionally: + // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. + // * `INVALID_ARGUMENT`: The [Digest][google.devtools.remoteexecution.v1test.Digest] + // does not match the provided data. + BatchUpdateBlobs(ctx context.Context, in *BatchUpdateBlobsRequest, opts ...grpc.CallOption) (*BatchUpdateBlobsResponse, error) + // Fetch the entire directory tree rooted at a node. + // + // This request must be targeted at a + // [Directory][google.devtools.remoteexecution.v1test.Directory] stored in the + // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage] + // (CAS). The server will enumerate the `Directory` tree recursively and + // return every node descended from the root. + // The exact traversal order is unspecified and, unless retrieving subsequent + // pages from an earlier request, is not guaranteed to be stable across + // multiple invocations of `GetTree`. + // + // If part of the tree is missing from the CAS, the server will return the + // portion present and omit the rest. + // + // * `NOT_FOUND`: The requested tree root is not present in the CAS. + GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*GetTreeResponse, error) +} + +type contentAddressableStorageClient struct { + cc *grpc.ClientConn +} + +func NewContentAddressableStorageClient(cc *grpc.ClientConn) ContentAddressableStorageClient { + return &contentAddressableStorageClient{cc} +} + +func (c *contentAddressableStorageClient) FindMissingBlobs(ctx context.Context, in *FindMissingBlobsRequest, opts ...grpc.CallOption) (*FindMissingBlobsResponse, error) { + out := new(FindMissingBlobsResponse) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/FindMissingBlobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentAddressableStorageClient) BatchUpdateBlobs(ctx context.Context, in *BatchUpdateBlobsRequest, opts ...grpc.CallOption) (*BatchUpdateBlobsResponse, error) { + out := new(BatchUpdateBlobsResponse) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/BatchUpdateBlobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentAddressableStorageClient) GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*GetTreeResponse, error) { + out := new(GetTreeResponse) + err := grpc.Invoke(ctx, "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/GetTree", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ContentAddressableStorage service + +type ContentAddressableStorageServer interface { + // Determine if blobs are present in the CAS. + // + // Clients can use this API before uploading blobs to determine which ones are + // already present in the CAS and do not need to be uploaded again. + // + // There are no method-specific errors. + FindMissingBlobs(context.Context, *FindMissingBlobsRequest) (*FindMissingBlobsResponse, error) + // Upload many blobs at once. + // + // The client MUST NOT upload blobs with a combined total size of more than 10 + // MiB using this API. Such requests should either be split into smaller + // chunks or uploaded using the + // [ByteStream API][google.bytestream.ByteStream], as appropriate. + // + // This request is equivalent to calling [UpdateBlob][] on each individual + // blob, in parallel. The requests may succeed or fail independently. + // + // Errors: + // * `INVALID_ARGUMENT`: The client attempted to upload more than 10 MiB of + // data. + // + // Individual requests may return the following errors, additionally: + // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. + // * `INVALID_ARGUMENT`: The [Digest][google.devtools.remoteexecution.v1test.Digest] + // does not match the provided data. + BatchUpdateBlobs(context.Context, *BatchUpdateBlobsRequest) (*BatchUpdateBlobsResponse, error) + // Fetch the entire directory tree rooted at a node. + // + // This request must be targeted at a + // [Directory][google.devtools.remoteexecution.v1test.Directory] stored in the + // [ContentAddressableStorage][google.devtools.remoteexecution.v1test.ContentAddressableStorage] + // (CAS). The server will enumerate the `Directory` tree recursively and + // return every node descended from the root. + // The exact traversal order is unspecified and, unless retrieving subsequent + // pages from an earlier request, is not guaranteed to be stable across + // multiple invocations of `GetTree`. + // + // If part of the tree is missing from the CAS, the server will return the + // portion present and omit the rest. + // + // * `NOT_FOUND`: The requested tree root is not present in the CAS. + GetTree(context.Context, *GetTreeRequest) (*GetTreeResponse, error) +} + +func RegisterContentAddressableStorageServer(s *grpc.Server, srv ContentAddressableStorageServer) { + s.RegisterService(&_ContentAddressableStorage_serviceDesc, srv) +} + +func _ContentAddressableStorage_FindMissingBlobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindMissingBlobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentAddressableStorageServer).FindMissingBlobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/FindMissingBlobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentAddressableStorageServer).FindMissingBlobs(ctx, req.(*FindMissingBlobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContentAddressableStorage_BatchUpdateBlobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchUpdateBlobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentAddressableStorageServer).BatchUpdateBlobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/BatchUpdateBlobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentAddressableStorageServer).BatchUpdateBlobs(ctx, req.(*BatchUpdateBlobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContentAddressableStorage_GetTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTreeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentAddressableStorageServer).GetTree(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.remoteexecution.v1test.ContentAddressableStorage/GetTree", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentAddressableStorageServer).GetTree(ctx, req.(*GetTreeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ContentAddressableStorage_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.remoteexecution.v1test.ContentAddressableStorage", + HandlerType: (*ContentAddressableStorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FindMissingBlobs", + Handler: _ContentAddressableStorage_FindMissingBlobs_Handler, + }, + { + MethodName: "BatchUpdateBlobs", + Handler: _ContentAddressableStorage_BatchUpdateBlobs_Handler, + }, + { + MethodName: "GetTree", + Handler: _ContentAddressableStorage_GetTree_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/remoteexecution/v1test/remote_execution.proto", +} + +func init() { + proto.RegisterFile("google/devtools/remoteexecution/v1test/remote_execution.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1868 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcf, 0x6f, 0x1c, 0x57, + 0x1d, 0x67, 0x76, 0xbd, 0xeb, 0xdd, 0xef, 0xae, 0x63, 0xe7, 0x35, 0x34, 0x9b, 0x6d, 0x83, 0xdc, + 0xa9, 0x54, 0x59, 0x56, 0x3b, 0x4b, 0x9c, 0x96, 0x08, 0x57, 0xa5, 0xc4, 0xeb, 0x75, 0x6a, 0x35, + 0xb1, 0xdd, 0xb1, 0xd7, 0x6d, 0x4a, 0xa5, 0xe1, 0xed, 0xcc, 0xf3, 0x66, 0xc8, 0xee, 0xbc, 0xe1, + 0xbd, 0xb7, 0x6e, 0xd2, 0x10, 0x0e, 0x08, 0x89, 0x03, 0x02, 0x09, 0x2a, 0x04, 0x12, 0x37, 0x24, + 0x84, 0x84, 0x38, 0xf1, 0x1f, 0xc0, 0x1f, 0xc0, 0x01, 0x2e, 0x9c, 0x11, 0x27, 0x4e, 0x9c, 0x11, + 0x12, 0xe8, 0xfd, 0x98, 0x99, 0xdd, 0xb5, 0x43, 0x66, 0xed, 0x44, 0xe2, 0xb6, 0xf3, 0x7d, 0xef, + 0xfb, 0xf9, 0xfe, 0xfe, 0xf1, 0x6c, 0x78, 0xa7, 0x4f, 0x69, 0x7f, 0x40, 0x5a, 0x01, 0x39, 0x16, + 0x94, 0x0e, 0x78, 0x8b, 0x91, 0x21, 0x15, 0x84, 0x3c, 0x20, 0xfe, 0x48, 0x84, 0x34, 0x6a, 0x1d, + 0x5f, 0x13, 0x84, 0x0b, 0x43, 0xf6, 0x52, 0xba, 0x13, 0x33, 0x2a, 0x28, 0x7a, 0x4d, 0xb3, 0x3b, + 0x09, 0xbb, 0x33, 0xc5, 0xee, 0x68, 0xf6, 0xe6, 0xcb, 0x46, 0x0c, 0x8e, 0xc3, 0x16, 0x8e, 0x22, + 0x2a, 0xb0, 0x3c, 0xe5, 0x1a, 0xa5, 0xf9, 0xaa, 0x39, 0x1d, 0xd0, 0xa8, 0xcf, 0x46, 0x51, 0x14, + 0x46, 0xfd, 0x16, 0x8d, 0x09, 0x9b, 0xb8, 0xf4, 0x25, 0x73, 0x49, 0x7d, 0xf5, 0x46, 0x47, 0xad, + 0x60, 0xa4, 0x2f, 0x98, 0xf3, 0xcb, 0xe6, 0x9c, 0xc5, 0x7e, 0x8b, 0x0b, 0x2c, 0x46, 0x86, 0xd1, + 0xfe, 0x7d, 0x11, 0xca, 0x37, 0x7d, 0x79, 0x13, 0x75, 0xe1, 0x82, 0x4f, 0x87, 0x43, 0x1c, 0x05, + 0x5e, 0x10, 0xf6, 0x09, 0x17, 0x0d, 0x6b, 0xd9, 0x5a, 0xa9, 0xad, 0x39, 0x4e, 0x3e, 0x3b, 0x9c, + 0x4d, 0xc5, 0xe5, 0x2e, 0x18, 0x14, 0xfd, 0x89, 0x3e, 0x86, 0x8b, 0x61, 0x14, 0x8f, 0x84, 0xc7, + 0x28, 0x15, 0x09, 0x72, 0xe1, 0x4c, 0xc8, 0x8b, 0x0a, 0xc8, 0xa5, 0x54, 0x18, 0xec, 0x57, 0xa0, + 0x4e, 0x47, 0x42, 0x82, 0x1f, 0x85, 0x03, 0xc2, 0x1b, 0xc5, 0xe5, 0xe2, 0x4a, 0xd5, 0xad, 0x69, + 0xda, 0x96, 0x24, 0xa1, 0x37, 0x00, 0x99, 0x2b, 0x41, 0xc8, 0x88, 0x2f, 0x28, 0x0b, 0x09, 0x6f, + 0xcc, 0xa9, 0x8b, 0x17, 0xf5, 0xc9, 0x66, 0x76, 0x80, 0x6e, 0x43, 0x25, 0x1e, 0x60, 0x71, 0x44, + 0xd9, 0xb0, 0x51, 0x52, 0x4a, 0x7e, 0x39, 0xaf, 0x92, 0x7b, 0x86, 0xcf, 0x4d, 0x11, 0xd0, 0x75, + 0x98, 0x17, 0xe1, 0x90, 0xd0, 0x91, 0x68, 0x94, 0x15, 0xd8, 0x95, 0x04, 0x2c, 0x09, 0x94, 0xb3, + 0x69, 0x02, 0xe5, 0x26, 0x37, 0xd1, 0x32, 0xd4, 0x03, 0xea, 0x45, 0x54, 0x78, 0x3e, 0xf6, 0xef, + 0x91, 0xc6, 0xfc, 0xb2, 0xb5, 0x52, 0x71, 0x21, 0xa0, 0x3b, 0x54, 0xb4, 0x25, 0xc5, 0xfe, 0x9b, + 0x05, 0xf3, 0x6d, 0xed, 0x64, 0xf4, 0x32, 0x54, 0x31, 0xeb, 0x8f, 0x86, 0x24, 0x12, 0xbc, 0x61, + 0x29, 0xb3, 0x32, 0x02, 0x7a, 0x00, 0x5f, 0x24, 0xd1, 0x71, 0xc8, 0x68, 0x24, 0xbf, 0xbd, 0x63, + 0xcc, 0x42, 0xdc, 0x93, 0x9e, 0x2a, 0x2c, 0x17, 0x57, 0x6a, 0x6b, 0xed, 0xbc, 0xb6, 0x19, 0x69, + 0x4e, 0x27, 0x03, 0x3b, 0x34, 0x58, 0xee, 0x25, 0x72, 0x92, 0xc8, 0x9b, 0xef, 0xc2, 0x0b, 0xa7, + 0x5c, 0x46, 0x08, 0xe6, 0x22, 0x3c, 0x24, 0x2a, 0xb5, 0xaa, 0xae, 0xfa, 0x8d, 0x2e, 0x41, 0xe9, + 0x18, 0x0f, 0x46, 0x44, 0x65, 0x45, 0xd5, 0xd5, 0x1f, 0xf6, 0x2f, 0x2d, 0xa8, 0x24, 0x2e, 0x45, + 0x77, 0x01, 0x62, 0x26, 0xb3, 0x5e, 0xc8, 0xe8, 0x59, 0x4a, 0xf9, 0xaf, 0xce, 0x1a, 0x18, 0x67, + 0x4f, 0x43, 0x3c, 0x74, 0xc7, 0xc0, 0x9a, 0x6f, 0x42, 0x25, 0xa1, 0xcf, 0xa0, 0xdd, 0xef, 0x2c, + 0xa8, 0x26, 0x79, 0xf3, 0x10, 0x6d, 0x41, 0x49, 0x27, 0xa0, 0xd6, 0x2c, 0x77, 0xca, 0xc8, 0x14, + 0xdd, 0xa1, 0x01, 0x71, 0x35, 0x3b, 0xfa, 0x10, 0x6a, 0xe3, 0x59, 0xaa, 0x83, 0xf4, 0x56, 0xfe, + 0x2a, 0x31, 0xfa, 0x28, 0xc8, 0x71, 0x24, 0xfb, 0x87, 0x16, 0x54, 0x12, 0x61, 0xa7, 0x5a, 0xb9, + 0x05, 0xe5, 0x73, 0x95, 0xa6, 0xe1, 0x46, 0xaf, 0xc2, 0x42, 0xc8, 0x4d, 0x27, 0x94, 0x01, 0x6f, + 0xcc, 0xa9, 0xec, 0xad, 0x87, 0xbc, 0x93, 0xd2, 0xec, 0xfb, 0xb0, 0x30, 0xa1, 0xeb, 0xf3, 0xd4, + 0xc8, 0x7e, 0x1b, 0xca, 0xa6, 0x5b, 0x20, 0x98, 0xbb, 0x87, 0xf9, 0xbd, 0x44, 0x8a, 0xfc, 0x8d, + 0xae, 0x02, 0xf0, 0xf0, 0x33, 0xe2, 0xf5, 0x1e, 0x0a, 0xe5, 0x70, 0x6b, 0xa5, 0xe8, 0x56, 0x25, + 0x65, 0x43, 0x12, 0xec, 0x3f, 0x17, 0xa1, 0xae, 0xdb, 0xa3, 0x4b, 0xf8, 0x68, 0x20, 0x50, 0x77, + 0xaa, 0xe3, 0xe8, 0x10, 0xad, 0xe5, 0xd5, 0x6d, 0x37, 0xed, 0x4c, 0x93, 0x5d, 0xea, 0xe8, 0xd4, + 0x2e, 0x55, 0x54, 0xe0, 0x37, 0x66, 0x03, 0x4f, 0x3d, 0x7b, 0x5a, 0x7b, 0x7b, 0x09, 0xaa, 0xe4, + 0x41, 0x28, 0x3c, 0x9f, 0x06, 0x3a, 0x34, 0x25, 0xb7, 0x22, 0x09, 0x6d, 0x19, 0x05, 0xe9, 0x0b, + 0x11, 0x50, 0xd9, 0xaa, 0xf1, 0xa7, 0xaa, 0xfb, 0xd5, 0xdd, 0xaa, 0xa6, 0xb8, 0xf8, 0x53, 0xb4, + 0x0f, 0x0b, 0xe6, 0xd8, 0xc4, 0xa5, 0x7c, 0xa6, 0xb8, 0xd4, 0x35, 0x88, 0x89, 0x89, 0x96, 0x49, + 0x18, 0x53, 0x32, 0xe7, 0x53, 0x99, 0x84, 0xb1, 0x4c, 0xa6, 0x3c, 0x36, 0x32, 0x2b, 0x67, 0x96, + 0x49, 0x18, 0xd3, 0x5f, 0xf6, 0x6f, 0x2c, 0x80, 0x2c, 0x10, 0x32, 0x2d, 0x62, 0x2c, 0xd2, 0xb4, + 0x90, 0xbf, 0x9f, 0x59, 0x39, 0x34, 0x60, 0xde, 0xa7, 0x91, 0x20, 0x91, 0x68, 0x14, 0x95, 0x6d, + 0xc9, 0x67, 0xbe, 0x42, 0x19, 0xc2, 0xe2, 0x54, 0x50, 0x9f, 0xa7, 0xb6, 0xf6, 0xe7, 0x05, 0xb8, + 0xa0, 0xa5, 0x13, 0x97, 0x7c, 0x7b, 0x94, 0xd4, 0x73, 0xc4, 0x05, 0x8e, 0x7c, 0xe2, 0x8d, 0x95, + 0x68, 0x3d, 0x21, 0xee, 0x98, 0x52, 0xc5, 0xaa, 0x48, 0x66, 0x95, 0x6f, 0x4a, 0xcb, 0x70, 0xa3, + 0x55, 0xb8, 0xc8, 0xef, 0x87, 0xb1, 0x9e, 0x7b, 0xde, 0x80, 0xd2, 0xfb, 0xa3, 0x58, 0xf9, 0xad, + 0xe2, 0x2e, 0xca, 0x03, 0x35, 0xfd, 0x6e, 0x2b, 0x32, 0xba, 0x0e, 0x2f, 0x0a, 0x2a, 0xf0, 0xc0, + 0xd3, 0xcb, 0x85, 0xac, 0x46, 0xcf, 0xa7, 0xa3, 0x48, 0x98, 0xb4, 0x7e, 0x41, 0x9d, 0x6e, 0x47, + 0x26, 0xca, 0x6d, 0x79, 0x74, 0x2a, 0x93, 0xae, 0xfc, 0x92, 0xaa, 0xfc, 0x29, 0x26, 0xdd, 0x03, + 0xbe, 0x6f, 0xc1, 0x62, 0xea, 0x15, 0x1e, 0xd3, 0x88, 0x13, 0x74, 0x1b, 0xca, 0x4c, 0x35, 0x04, + 0xb3, 0x23, 0xbd, 0x39, 0xa3, 0xc5, 0x8a, 0xd7, 0x35, 0x18, 0xd2, 0xc9, 0xca, 0xe4, 0xc0, 0x33, + 0xa0, 0x05, 0x9d, 0x0b, 0x9a, 0xa8, 0x2f, 0xdb, 0xff, 0x2a, 0x40, 0xc3, 0xa8, 0xb1, 0x9b, 0xac, + 0x7f, 0x77, 0x88, 0xc0, 0x01, 0x16, 0x18, 0x7d, 0x02, 0x25, 0x2e, 0x70, 0x5f, 0x87, 0xe7, 0xc2, + 0xda, 0x56, 0x5e, 0x75, 0x9e, 0x04, 0xe8, 0xec, 0x4b, 0x34, 0x57, 0x83, 0xca, 0x2a, 0xd4, 0x11, + 0x3a, 0xdf, 0xfa, 0x56, 0xd7, 0x20, 0xa6, 0xf2, 0x5f, 0x07, 0x64, 0xda, 0x09, 0x17, 0x8c, 0xe0, + 0xa1, 0x4e, 0xaf, 0xa2, 0x4a, 0xaf, 0x25, 0x7d, 0xb2, 0xaf, 0x0e, 0x54, 0x8a, 0xe9, 0xdb, 0xb2, + 0x11, 0x8c, 0xdf, 0x9e, 0x4b, 0x6f, 0x13, 0xc6, 0xb2, 0xdb, 0xf6, 0x2e, 0x94, 0x94, 0x01, 0xa8, + 0x06, 0xf3, 0xdd, 0x9d, 0xf7, 0x77, 0x76, 0x3f, 0xdc, 0x59, 0xfa, 0x02, 0x5a, 0x84, 0x5a, 0xfb, + 0x66, 0xfb, 0xbd, 0x8e, 0xd7, 0x7e, 0xaf, 0xd3, 0x7e, 0x7f, 0xc9, 0x42, 0x00, 0xe5, 0x0f, 0xba, + 0x9d, 0x6e, 0x67, 0x73, 0xa9, 0x80, 0x16, 0xa0, 0xda, 0xf9, 0xa8, 0xd3, 0xee, 0x1e, 0x6c, 0xef, + 0xdc, 0x5a, 0x2a, 0xca, 0xcf, 0xf6, 0xee, 0x9d, 0xbd, 0xdb, 0x9d, 0x83, 0xce, 0xe6, 0xd2, 0x9c, + 0xfd, 0x53, 0x0b, 0x5e, 0xbc, 0x45, 0xc4, 0x44, 0xf4, 0x66, 0xa9, 0x90, 0xe7, 0xe1, 0x41, 0xfb, + 0x9f, 0x16, 0x5c, 0xe9, 0xc6, 0x01, 0x16, 0xe4, 0xff, 0x4a, 0x2f, 0x74, 0x37, 0x05, 0x35, 0xe9, + 0x5c, 0x3c, 0x47, 0x8d, 0x18, 0x68, 0x53, 0x04, 0x3f, 0xb1, 0xe0, 0xf2, 0x56, 0x18, 0x05, 0x77, + 0x42, 0xce, 0xc3, 0xa8, 0xbf, 0x31, 0xa0, 0x3d, 0x3e, 0x93, 0xc1, 0x1f, 0x40, 0xbd, 0x37, 0xa0, + 0x3d, 0x63, 0x6e, 0x32, 0xbf, 0x67, 0xb5, 0xb7, 0x26, 0x31, 0xf4, 0x6f, 0x6e, 0x7f, 0x07, 0x1a, + 0x27, 0x55, 0x32, 0x7d, 0xe2, 0x9b, 0x70, 0x69, 0xa8, 0xe9, 0xde, 0x33, 0x10, 0x8b, 0x86, 0x99, + 0x8c, 0x44, 0xfa, 0x77, 0xe1, 0xa2, 0xce, 0x01, 0x49, 0x4c, 0x5c, 0xa1, 0x9e, 0x72, 0x6a, 0xce, + 0x9c, 0xfb, 0x29, 0xa7, 0x50, 0xb2, 0x05, 0x4a, 0x36, 0x07, 0x95, 0x24, 0x75, 0x57, 0xfd, 0xb6, + 0x7f, 0x66, 0xc1, 0xe5, 0x0d, 0x2c, 0xfc, 0x7b, 0x99, 0x16, 0xb3, 0x45, 0xa4, 0x0b, 0x15, 0xa6, + 0xef, 0x27, 0x6e, 0xc9, 0xbd, 0xd8, 0x9f, 0x30, 0xdc, 0x4d, 0xa1, 0xec, 0x1f, 0x15, 0xa0, 0x71, + 0x52, 0x2f, 0x13, 0x96, 0x3e, 0x54, 0x99, 0xf9, 0x9d, 0xec, 0xec, 0xdb, 0x79, 0x85, 0x3e, 0x09, + 0xd4, 0x49, 0x7e, 0xb8, 0x19, 0x76, 0xf3, 0x07, 0x16, 0x54, 0x52, 0xa9, 0xbb, 0x50, 0x1b, 0x4b, + 0x82, 0x33, 0x86, 0x04, 0xb2, 0xd4, 0x43, 0xab, 0x50, 0xd6, 0x8f, 0x79, 0x53, 0xb6, 0x28, 0xc1, + 0x62, 0xb1, 0x2f, 0x3b, 0xb8, 0x18, 0x71, 0xd7, 0xdc, 0xb0, 0xff, 0x68, 0xc1, 0x85, 0x5b, 0x44, + 0x1c, 0x30, 0x32, 0xdb, 0x6c, 0xdf, 0x85, 0xda, 0xf9, 0x1f, 0xee, 0xc0, 0xb2, 0x37, 0xfb, 0x4b, + 0x50, 0x8d, 0x71, 0x9f, 0x78, 0x72, 0xc9, 0x56, 0x9d, 0xa1, 0xe4, 0x56, 0x24, 0x61, 0x3f, 0xfc, + 0x4c, 0xad, 0xa0, 0xea, 0x50, 0xd0, 0xfb, 0x24, 0x32, 0xed, 0x5d, 0x5d, 0x3f, 0x90, 0x04, 0xfb, + 0xc7, 0x16, 0x2c, 0xa6, 0x46, 0x18, 0xaf, 0xee, 0x4f, 0xbe, 0x99, 0x74, 0x34, 0xaf, 0xcd, 0xfc, + 0x66, 0x9a, 0x78, 0x2f, 0xa1, 0xd7, 0x60, 0x31, 0x22, 0x0f, 0x84, 0x37, 0xa6, 0x8c, 0x7e, 0xfe, + 0x2d, 0x48, 0xf2, 0x5e, 0xaa, 0xd0, 0x1d, 0xa8, 0x1d, 0x50, 0x3a, 0xd8, 0x24, 0x02, 0x87, 0x03, + 0xb5, 0x5e, 0x4b, 0x69, 0xe3, 0xde, 0xac, 0x48, 0x82, 0xf2, 0xe4, 0x2b, 0x50, 0x57, 0x87, 0xc7, + 0x84, 0xf1, 0x64, 0x57, 0xaa, 0xba, 0x35, 0x49, 0x3b, 0xd4, 0x24, 0xd9, 0xd1, 0x17, 0x4d, 0x74, + 0xd2, 0xd1, 0x7e, 0x68, 0xd8, 0x02, 0x2d, 0xc3, 0xa4, 0xcd, 0xf5, 0xbc, 0x06, 0x8e, 0xa9, 0xa7, + 0x65, 0x8d, 0xe9, 0x6a, 0xba, 0x74, 0x18, 0x18, 0x5d, 0x2a, 0x9a, 0xb0, 0x1d, 0xc8, 0x71, 0xab, + 0x84, 0x86, 0xd1, 0x31, 0xf5, 0x71, 0x72, 0xcb, 0x0c, 0x67, 0x79, 0xb2, 0x9d, 0x1e, 0x6c, 0x07, + 0x68, 0x1d, 0xae, 0xf8, 0x94, 0x31, 0x32, 0xc0, 0x82, 0x04, 0x63, 0x3c, 0x5c, 0x32, 0xe9, 0x20, + 0x5e, 0xce, 0x2e, 0x64, 0xac, 0x7c, 0x3b, 0x58, 0xfb, 0xb5, 0x05, 0xd5, 0x4e, 0xa2, 0x34, 0xfa, + 0xb9, 0x05, 0xf3, 0x66, 0x27, 0x41, 0x5f, 0x99, 0x71, 0x89, 0x31, 0x8e, 0x6b, 0x5e, 0x4d, 0xf8, + 0xc6, 0xfe, 0x62, 0xe6, 0xa4, 0x1b, 0x8e, 0xfd, 0xd6, 0xf7, 0xfe, 0xf2, 0xf7, 0xcf, 0x0b, 0x2d, + 0x7b, 0x35, 0xf9, 0xeb, 0xdd, 0xa3, 0x89, 0x22, 0x78, 0x67, 0x75, 0xf5, 0x71, 0x4b, 0xfb, 0x81, + 0xaf, 0x6b, 0x51, 0x64, 0xdd, 0x5a, 0x5d, 0xfb, 0x77, 0x11, 0x6a, 0x7a, 0x30, 0xa9, 0x2d, 0x14, + 0xfd, 0x43, 0xa7, 0xe2, 0xc4, 0xe3, 0xf0, 0x6b, 0x79, 0x35, 0x3e, 0x7d, 0x95, 0x68, 0x9e, 0x69, + 0x42, 0xda, 0x58, 0x19, 0xf4, 0x0d, 0x74, 0xf7, 0xa9, 0x06, 0xbd, 0xa1, 0xe7, 0x31, 0x6f, 0x3d, + 0x9a, 0x18, 0xfa, 0x8e, 0x7c, 0x09, 0x3f, 0x9e, 0x26, 0x66, 0xcf, 0xe2, 0xc7, 0xe8, 0x3f, 0x16, + 0xa0, 0x93, 0x9b, 0x06, 0xba, 0x39, 0x5b, 0xa3, 0x7e, 0x76, 0x26, 0xc7, 0xca, 0xe4, 0x6f, 0x35, + 0x9f, 0x9f, 0xc9, 0xeb, 0x93, 0x2b, 0xcc, 0xda, 0x2f, 0x4a, 0x70, 0xa5, 0xad, 0x87, 0xe1, 0xcd, + 0x20, 0x60, 0x84, 0x73, 0xf9, 0x40, 0xdb, 0x17, 0x94, 0xc9, 0x35, 0xf3, 0x4f, 0x16, 0x2c, 0x4d, + 0xef, 0x00, 0xe8, 0xdd, 0xfc, 0x7f, 0x05, 0x3a, 0x75, 0xa1, 0x69, 0x7e, 0xfd, 0xec, 0x00, 0xba, + 0x37, 0xda, 0x37, 0x94, 0x9f, 0xae, 0xd9, 0xaf, 0xff, 0x0f, 0x3f, 0xc9, 0x79, 0xc2, 0xd7, 0x8f, + 0x32, 0x88, 0x75, 0x6b, 0x55, 0x19, 0x34, 0x3d, 0xe8, 0xf2, 0x1b, 0xf4, 0x84, 0x7d, 0x20, 0xbf, + 0x41, 0x4f, 0x9a, 0xb1, 0x33, 0x18, 0xd4, 0xcb, 0x20, 0xa4, 0x41, 0x7f, 0xb5, 0x60, 0xde, 0x4c, + 0x8e, 0xfc, 0x8d, 0x65, 0x72, 0x5e, 0x36, 0x6f, 0xcc, 0xcc, 0x67, 0xb4, 0xfe, 0x44, 0x69, 0x7d, + 0x88, 0x0e, 0x9e, 0xa6, 0x75, 0xeb, 0xd1, 0xd8, 0xac, 0x4d, 0x72, 0x74, 0x9c, 0x34, 0x9e, 0xa1, + 0x7d, 0x2d, 0x65, 0xe3, 0x0f, 0x16, 0xac, 0xfa, 0x74, 0x98, 0x53, 0xb9, 0x8d, 0x4b, 0xae, 0xa2, + 0xa7, 0x3d, 0x77, 0x8f, 0x51, 0x41, 0xf7, 0xac, 0x8f, 0xbb, 0x86, 0xbf, 0x4f, 0x07, 0x38, 0xea, + 0x3b, 0x94, 0xf5, 0x5b, 0x7d, 0x12, 0xa9, 0xbf, 0x53, 0xb7, 0xf4, 0x11, 0x8e, 0x43, 0xfe, 0xb4, + 0xff, 0x85, 0xbc, 0x3d, 0x45, 0xfe, 0x55, 0xa1, 0xe8, 0x76, 0x3e, 0xfa, 0x6d, 0xe1, 0xea, 0x2d, + 0x8d, 0x3e, 0x25, 0xdc, 0x39, 0xbc, 0x76, 0x40, 0xb8, 0xe8, 0x95, 0x95, 0x9c, 0xeb, 0xff, 0x0d, + 0x00, 0x00, 0xff, 0xff, 0xe9, 0x9c, 0x6f, 0xd3, 0x72, 0x19, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/source/v1/source_context.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/source/v1/source_context.pb.go new file mode 100644 index 000000000..e67b5c20e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/source/v1/source_context.pb.go @@ -0,0 +1,945 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/source/v1/source_context.proto + +/* +Package source is a generated protocol buffer package. + +It is generated from these files: + google/devtools/source/v1/source_context.proto + +It has these top-level messages: + SourceContext + ExtendedSourceContext + AliasContext + CloudRepoSourceContext + CloudWorkspaceSourceContext + GerritSourceContext + GitSourceContext + RepoId + ProjectRepoId + CloudWorkspaceId +*/ +package source + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of an Alias. +type AliasContext_Kind int32 + +const ( + // Do not use. + AliasContext_ANY AliasContext_Kind = 0 + // Git tag + AliasContext_FIXED AliasContext_Kind = 1 + // Git branch + AliasContext_MOVABLE AliasContext_Kind = 2 + // OTHER is used to specify non-standard aliases, those not of the kinds + // above. For example, if a Git repo has a ref named "refs/foo/bar", it + // is considered to be of kind OTHER. + AliasContext_OTHER AliasContext_Kind = 4 +) + +var AliasContext_Kind_name = map[int32]string{ + 0: "ANY", + 1: "FIXED", + 2: "MOVABLE", + 4: "OTHER", +} +var AliasContext_Kind_value = map[string]int32{ + "ANY": 0, + "FIXED": 1, + "MOVABLE": 2, + "OTHER": 4, +} + +func (x AliasContext_Kind) String() string { + return proto.EnumName(AliasContext_Kind_name, int32(x)) +} +func (AliasContext_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// A SourceContext is a reference to a tree of files. A SourceContext together +// with a path point to a unique revision of a single file or directory. +type SourceContext struct { + // A SourceContext can refer any one of the following types of repositories. + // + // Types that are valid to be assigned to Context: + // *SourceContext_CloudRepo + // *SourceContext_CloudWorkspace + // *SourceContext_Gerrit + // *SourceContext_Git + Context isSourceContext_Context `protobuf_oneof:"context"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (m *SourceContext) String() string { return proto.CompactTextString(m) } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isSourceContext_Context interface { + isSourceContext_Context() +} + +type SourceContext_CloudRepo struct { + CloudRepo *CloudRepoSourceContext `protobuf:"bytes,1,opt,name=cloud_repo,json=cloudRepo,oneof"` +} +type SourceContext_CloudWorkspace struct { + CloudWorkspace *CloudWorkspaceSourceContext `protobuf:"bytes,2,opt,name=cloud_workspace,json=cloudWorkspace,oneof"` +} +type SourceContext_Gerrit struct { + Gerrit *GerritSourceContext `protobuf:"bytes,3,opt,name=gerrit,oneof"` +} +type SourceContext_Git struct { + Git *GitSourceContext `protobuf:"bytes,6,opt,name=git,oneof"` +} + +func (*SourceContext_CloudRepo) isSourceContext_Context() {} +func (*SourceContext_CloudWorkspace) isSourceContext_Context() {} +func (*SourceContext_Gerrit) isSourceContext_Context() {} +func (*SourceContext_Git) isSourceContext_Context() {} + +func (m *SourceContext) GetContext() isSourceContext_Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *SourceContext) GetCloudRepo() *CloudRepoSourceContext { + if x, ok := m.GetContext().(*SourceContext_CloudRepo); ok { + return x.CloudRepo + } + return nil +} + +func (m *SourceContext) GetCloudWorkspace() *CloudWorkspaceSourceContext { + if x, ok := m.GetContext().(*SourceContext_CloudWorkspace); ok { + return x.CloudWorkspace + } + return nil +} + +func (m *SourceContext) GetGerrit() *GerritSourceContext { + if x, ok := m.GetContext().(*SourceContext_Gerrit); ok { + return x.Gerrit + } + return nil +} + +func (m *SourceContext) GetGit() *GitSourceContext { + if x, ok := m.GetContext().(*SourceContext_Git); ok { + return x.Git + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SourceContext) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SourceContext_OneofMarshaler, _SourceContext_OneofUnmarshaler, _SourceContext_OneofSizer, []interface{}{ + (*SourceContext_CloudRepo)(nil), + (*SourceContext_CloudWorkspace)(nil), + (*SourceContext_Gerrit)(nil), + (*SourceContext_Git)(nil), + } +} + +func _SourceContext_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SourceContext) + // context + switch x := m.Context.(type) { + case *SourceContext_CloudRepo: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CloudRepo); err != nil { + return err + } + case *SourceContext_CloudWorkspace: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CloudWorkspace); err != nil { + return err + } + case *SourceContext_Gerrit: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Gerrit); err != nil { + return err + } + case *SourceContext_Git: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Git); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SourceContext.Context has unexpected type %T", x) + } + return nil +} + +func _SourceContext_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SourceContext) + switch tag { + case 1: // context.cloud_repo + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudRepoSourceContext) + err := b.DecodeMessage(msg) + m.Context = &SourceContext_CloudRepo{msg} + return true, err + case 2: // context.cloud_workspace + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudWorkspaceSourceContext) + err := b.DecodeMessage(msg) + m.Context = &SourceContext_CloudWorkspace{msg} + return true, err + case 3: // context.gerrit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GerritSourceContext) + err := b.DecodeMessage(msg) + m.Context = &SourceContext_Gerrit{msg} + return true, err + case 6: // context.git + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GitSourceContext) + err := b.DecodeMessage(msg) + m.Context = &SourceContext_Git{msg} + return true, err + default: + return false, nil + } +} + +func _SourceContext_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SourceContext) + // context + switch x := m.Context.(type) { + case *SourceContext_CloudRepo: + s := proto.Size(x.CloudRepo) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SourceContext_CloudWorkspace: + s := proto.Size(x.CloudWorkspace) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SourceContext_Gerrit: + s := proto.Size(x.Gerrit) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SourceContext_Git: + s := proto.Size(x.Git) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An ExtendedSourceContext is a SourceContext combined with additional +// details describing the context. +type ExtendedSourceContext struct { + // Any source context. + Context *SourceContext `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + // Labels with user defined metadata. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ExtendedSourceContext) Reset() { *m = ExtendedSourceContext{} } +func (m *ExtendedSourceContext) String() string { return proto.CompactTextString(m) } +func (*ExtendedSourceContext) ProtoMessage() {} +func (*ExtendedSourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtendedSourceContext) GetContext() *SourceContext { + if m != nil { + return m.Context + } + return nil +} + +func (m *ExtendedSourceContext) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// An alias to a repo revision. +type AliasContext struct { + // The alias kind. + Kind AliasContext_Kind `protobuf:"varint,1,opt,name=kind,enum=google.devtools.source.v1.AliasContext_Kind" json:"kind,omitempty"` + // The alias name. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *AliasContext) Reset() { *m = AliasContext{} } +func (m *AliasContext) String() string { return proto.CompactTextString(m) } +func (*AliasContext) ProtoMessage() {} +func (*AliasContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *AliasContext) GetKind() AliasContext_Kind { + if m != nil { + return m.Kind + } + return AliasContext_ANY +} + +func (m *AliasContext) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A CloudRepoSourceContext denotes a particular revision in a cloud +// repo (a repo hosted by the Google Cloud Platform). +type CloudRepoSourceContext struct { + // The ID of the repo. + RepoId *RepoId `protobuf:"bytes,1,opt,name=repo_id,json=repoId" json:"repo_id,omitempty"` + // A revision in a cloud repository can be identified by either its revision + // ID or its Alias. + // + // Types that are valid to be assigned to Revision: + // *CloudRepoSourceContext_RevisionId + // *CloudRepoSourceContext_AliasName + // *CloudRepoSourceContext_AliasContext + Revision isCloudRepoSourceContext_Revision `protobuf_oneof:"revision"` +} + +func (m *CloudRepoSourceContext) Reset() { *m = CloudRepoSourceContext{} } +func (m *CloudRepoSourceContext) String() string { return proto.CompactTextString(m) } +func (*CloudRepoSourceContext) ProtoMessage() {} +func (*CloudRepoSourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type isCloudRepoSourceContext_Revision interface { + isCloudRepoSourceContext_Revision() +} + +type CloudRepoSourceContext_RevisionId struct { + RevisionId string `protobuf:"bytes,2,opt,name=revision_id,json=revisionId,oneof"` +} +type CloudRepoSourceContext_AliasName struct { + AliasName string `protobuf:"bytes,3,opt,name=alias_name,json=aliasName,oneof"` +} +type CloudRepoSourceContext_AliasContext struct { + AliasContext *AliasContext `protobuf:"bytes,4,opt,name=alias_context,json=aliasContext,oneof"` +} + +func (*CloudRepoSourceContext_RevisionId) isCloudRepoSourceContext_Revision() {} +func (*CloudRepoSourceContext_AliasName) isCloudRepoSourceContext_Revision() {} +func (*CloudRepoSourceContext_AliasContext) isCloudRepoSourceContext_Revision() {} + +func (m *CloudRepoSourceContext) GetRevision() isCloudRepoSourceContext_Revision { + if m != nil { + return m.Revision + } + return nil +} + +func (m *CloudRepoSourceContext) GetRepoId() *RepoId { + if m != nil { + return m.RepoId + } + return nil +} + +func (m *CloudRepoSourceContext) GetRevisionId() string { + if x, ok := m.GetRevision().(*CloudRepoSourceContext_RevisionId); ok { + return x.RevisionId + } + return "" +} + +func (m *CloudRepoSourceContext) GetAliasName() string { + if x, ok := m.GetRevision().(*CloudRepoSourceContext_AliasName); ok { + return x.AliasName + } + return "" +} + +func (m *CloudRepoSourceContext) GetAliasContext() *AliasContext { + if x, ok := m.GetRevision().(*CloudRepoSourceContext_AliasContext); ok { + return x.AliasContext + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CloudRepoSourceContext) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CloudRepoSourceContext_OneofMarshaler, _CloudRepoSourceContext_OneofUnmarshaler, _CloudRepoSourceContext_OneofSizer, []interface{}{ + (*CloudRepoSourceContext_RevisionId)(nil), + (*CloudRepoSourceContext_AliasName)(nil), + (*CloudRepoSourceContext_AliasContext)(nil), + } +} + +func _CloudRepoSourceContext_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CloudRepoSourceContext) + // revision + switch x := m.Revision.(type) { + case *CloudRepoSourceContext_RevisionId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.RevisionId) + case *CloudRepoSourceContext_AliasName: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AliasName) + case *CloudRepoSourceContext_AliasContext: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AliasContext); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CloudRepoSourceContext.Revision has unexpected type %T", x) + } + return nil +} + +func _CloudRepoSourceContext_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CloudRepoSourceContext) + switch tag { + case 2: // revision.revision_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &CloudRepoSourceContext_RevisionId{x} + return true, err + case 3: // revision.alias_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &CloudRepoSourceContext_AliasName{x} + return true, err + case 4: // revision.alias_context + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AliasContext) + err := b.DecodeMessage(msg) + m.Revision = &CloudRepoSourceContext_AliasContext{msg} + return true, err + default: + return false, nil + } +} + +func _CloudRepoSourceContext_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CloudRepoSourceContext) + // revision + switch x := m.Revision.(type) { + case *CloudRepoSourceContext_RevisionId: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RevisionId))) + n += len(x.RevisionId) + case *CloudRepoSourceContext_AliasName: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AliasName))) + n += len(x.AliasName) + case *CloudRepoSourceContext_AliasContext: + s := proto.Size(x.AliasContext) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A CloudWorkspaceSourceContext denotes a workspace at a particular snapshot. +type CloudWorkspaceSourceContext struct { + // The ID of the workspace. + WorkspaceId *CloudWorkspaceId `protobuf:"bytes,1,opt,name=workspace_id,json=workspaceId" json:"workspace_id,omitempty"` + // The ID of the snapshot. + // An empty snapshot_id refers to the most recent snapshot. + SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` +} + +func (m *CloudWorkspaceSourceContext) Reset() { *m = CloudWorkspaceSourceContext{} } +func (m *CloudWorkspaceSourceContext) String() string { return proto.CompactTextString(m) } +func (*CloudWorkspaceSourceContext) ProtoMessage() {} +func (*CloudWorkspaceSourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CloudWorkspaceSourceContext) GetWorkspaceId() *CloudWorkspaceId { + if m != nil { + return m.WorkspaceId + } + return nil +} + +func (m *CloudWorkspaceSourceContext) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +// A SourceContext referring to a Gerrit project. +type GerritSourceContext struct { + // The URI of a running Gerrit instance. + HostUri string `protobuf:"bytes,1,opt,name=host_uri,json=hostUri" json:"host_uri,omitempty"` + // The full project name within the host. Projects may be nested, so + // "project/subproject" is a valid project name. + // The "repo name" is hostURI/project. + GerritProject string `protobuf:"bytes,2,opt,name=gerrit_project,json=gerritProject" json:"gerrit_project,omitempty"` + // A revision in a Gerrit project can be identified by either its revision ID + // or its alias. + // + // Types that are valid to be assigned to Revision: + // *GerritSourceContext_RevisionId + // *GerritSourceContext_AliasName + // *GerritSourceContext_AliasContext + Revision isGerritSourceContext_Revision `protobuf_oneof:"revision"` +} + +func (m *GerritSourceContext) Reset() { *m = GerritSourceContext{} } +func (m *GerritSourceContext) String() string { return proto.CompactTextString(m) } +func (*GerritSourceContext) ProtoMessage() {} +func (*GerritSourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isGerritSourceContext_Revision interface { + isGerritSourceContext_Revision() +} + +type GerritSourceContext_RevisionId struct { + RevisionId string `protobuf:"bytes,3,opt,name=revision_id,json=revisionId,oneof"` +} +type GerritSourceContext_AliasName struct { + AliasName string `protobuf:"bytes,4,opt,name=alias_name,json=aliasName,oneof"` +} +type GerritSourceContext_AliasContext struct { + AliasContext *AliasContext `protobuf:"bytes,5,opt,name=alias_context,json=aliasContext,oneof"` +} + +func (*GerritSourceContext_RevisionId) isGerritSourceContext_Revision() {} +func (*GerritSourceContext_AliasName) isGerritSourceContext_Revision() {} +func (*GerritSourceContext_AliasContext) isGerritSourceContext_Revision() {} + +func (m *GerritSourceContext) GetRevision() isGerritSourceContext_Revision { + if m != nil { + return m.Revision + } + return nil +} + +func (m *GerritSourceContext) GetHostUri() string { + if m != nil { + return m.HostUri + } + return "" +} + +func (m *GerritSourceContext) GetGerritProject() string { + if m != nil { + return m.GerritProject + } + return "" +} + +func (m *GerritSourceContext) GetRevisionId() string { + if x, ok := m.GetRevision().(*GerritSourceContext_RevisionId); ok { + return x.RevisionId + } + return "" +} + +func (m *GerritSourceContext) GetAliasName() string { + if x, ok := m.GetRevision().(*GerritSourceContext_AliasName); ok { + return x.AliasName + } + return "" +} + +func (m *GerritSourceContext) GetAliasContext() *AliasContext { + if x, ok := m.GetRevision().(*GerritSourceContext_AliasContext); ok { + return x.AliasContext + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GerritSourceContext) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GerritSourceContext_OneofMarshaler, _GerritSourceContext_OneofUnmarshaler, _GerritSourceContext_OneofSizer, []interface{}{ + (*GerritSourceContext_RevisionId)(nil), + (*GerritSourceContext_AliasName)(nil), + (*GerritSourceContext_AliasContext)(nil), + } +} + +func _GerritSourceContext_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GerritSourceContext) + // revision + switch x := m.Revision.(type) { + case *GerritSourceContext_RevisionId: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.RevisionId) + case *GerritSourceContext_AliasName: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AliasName) + case *GerritSourceContext_AliasContext: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AliasContext); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GerritSourceContext.Revision has unexpected type %T", x) + } + return nil +} + +func _GerritSourceContext_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GerritSourceContext) + switch tag { + case 3: // revision.revision_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &GerritSourceContext_RevisionId{x} + return true, err + case 4: // revision.alias_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Revision = &GerritSourceContext_AliasName{x} + return true, err + case 5: // revision.alias_context + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AliasContext) + err := b.DecodeMessage(msg) + m.Revision = &GerritSourceContext_AliasContext{msg} + return true, err + default: + return false, nil + } +} + +func _GerritSourceContext_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GerritSourceContext) + // revision + switch x := m.Revision.(type) { + case *GerritSourceContext_RevisionId: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RevisionId))) + n += len(x.RevisionId) + case *GerritSourceContext_AliasName: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AliasName))) + n += len(x.AliasName) + case *GerritSourceContext_AliasContext: + s := proto.Size(x.AliasContext) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A GitSourceContext denotes a particular revision in a third party Git +// repository (e.g. GitHub). +type GitSourceContext struct { + // Git repository URL. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // Git commit hash. + // required. + RevisionId string `protobuf:"bytes,2,opt,name=revision_id,json=revisionId" json:"revision_id,omitempty"` +} + +func (m *GitSourceContext) Reset() { *m = GitSourceContext{} } +func (m *GitSourceContext) String() string { return proto.CompactTextString(m) } +func (*GitSourceContext) ProtoMessage() {} +func (*GitSourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GitSourceContext) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *GitSourceContext) GetRevisionId() string { + if m != nil { + return m.RevisionId + } + return "" +} + +// A unique identifier for a cloud repo. +type RepoId struct { + // A cloud repository can be identified by either its project ID and + // repository name combination, or its globally unique identifier. + // + // Types that are valid to be assigned to Id: + // *RepoId_ProjectRepoId + // *RepoId_Uid + Id isRepoId_Id `protobuf_oneof:"id"` +} + +func (m *RepoId) Reset() { *m = RepoId{} } +func (m *RepoId) String() string { return proto.CompactTextString(m) } +func (*RepoId) ProtoMessage() {} +func (*RepoId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isRepoId_Id interface { + isRepoId_Id() +} + +type RepoId_ProjectRepoId struct { + ProjectRepoId *ProjectRepoId `protobuf:"bytes,1,opt,name=project_repo_id,json=projectRepoId,oneof"` +} +type RepoId_Uid struct { + Uid string `protobuf:"bytes,2,opt,name=uid,oneof"` +} + +func (*RepoId_ProjectRepoId) isRepoId_Id() {} +func (*RepoId_Uid) isRepoId_Id() {} + +func (m *RepoId) GetId() isRepoId_Id { + if m != nil { + return m.Id + } + return nil +} + +func (m *RepoId) GetProjectRepoId() *ProjectRepoId { + if x, ok := m.GetId().(*RepoId_ProjectRepoId); ok { + return x.ProjectRepoId + } + return nil +} + +func (m *RepoId) GetUid() string { + if x, ok := m.GetId().(*RepoId_Uid); ok { + return x.Uid + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RepoId) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RepoId_OneofMarshaler, _RepoId_OneofUnmarshaler, _RepoId_OneofSizer, []interface{}{ + (*RepoId_ProjectRepoId)(nil), + (*RepoId_Uid)(nil), + } +} + +func _RepoId_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RepoId) + // id + switch x := m.Id.(type) { + case *RepoId_ProjectRepoId: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProjectRepoId); err != nil { + return err + } + case *RepoId_Uid: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Uid) + case nil: + default: + return fmt.Errorf("RepoId.Id has unexpected type %T", x) + } + return nil +} + +func _RepoId_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RepoId) + switch tag { + case 1: // id.project_repo_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ProjectRepoId) + err := b.DecodeMessage(msg) + m.Id = &RepoId_ProjectRepoId{msg} + return true, err + case 2: // id.uid + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Id = &RepoId_Uid{x} + return true, err + default: + return false, nil + } +} + +func _RepoId_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RepoId) + // id + switch x := m.Id.(type) { + case *RepoId_ProjectRepoId: + s := proto.Size(x.ProjectRepoId) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RepoId_Uid: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Uid))) + n += len(x.Uid) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Selects a repo using a Google Cloud Platform project ID +// (e.g. winged-cargo-31) and a repo name within that project. +type ProjectRepoId struct { + // The ID of the project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The name of the repo. Leave empty for the default repo. + RepoName string `protobuf:"bytes,2,opt,name=repo_name,json=repoName" json:"repo_name,omitempty"` +} + +func (m *ProjectRepoId) Reset() { *m = ProjectRepoId{} } +func (m *ProjectRepoId) String() string { return proto.CompactTextString(m) } +func (*ProjectRepoId) ProtoMessage() {} +func (*ProjectRepoId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ProjectRepoId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ProjectRepoId) GetRepoName() string { + if m != nil { + return m.RepoName + } + return "" +} + +// A CloudWorkspaceId is a unique identifier for a cloud workspace. +// A cloud workspace is a place associated with a repo where modified files +// can be stored before they are committed. +type CloudWorkspaceId struct { + // The ID of the repo containing the workspace. + RepoId *RepoId `protobuf:"bytes,1,opt,name=repo_id,json=repoId" json:"repo_id,omitempty"` + // The unique name of the workspace within the repo. This is the name + // chosen by the client in the Source API's CreateWorkspace method. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *CloudWorkspaceId) Reset() { *m = CloudWorkspaceId{} } +func (m *CloudWorkspaceId) String() string { return proto.CompactTextString(m) } +func (*CloudWorkspaceId) ProtoMessage() {} +func (*CloudWorkspaceId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CloudWorkspaceId) GetRepoId() *RepoId { + if m != nil { + return m.RepoId + } + return nil +} + +func (m *CloudWorkspaceId) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*SourceContext)(nil), "google.devtools.source.v1.SourceContext") + proto.RegisterType((*ExtendedSourceContext)(nil), "google.devtools.source.v1.ExtendedSourceContext") + proto.RegisterType((*AliasContext)(nil), "google.devtools.source.v1.AliasContext") + proto.RegisterType((*CloudRepoSourceContext)(nil), "google.devtools.source.v1.CloudRepoSourceContext") + proto.RegisterType((*CloudWorkspaceSourceContext)(nil), "google.devtools.source.v1.CloudWorkspaceSourceContext") + proto.RegisterType((*GerritSourceContext)(nil), "google.devtools.source.v1.GerritSourceContext") + proto.RegisterType((*GitSourceContext)(nil), "google.devtools.source.v1.GitSourceContext") + proto.RegisterType((*RepoId)(nil), "google.devtools.source.v1.RepoId") + proto.RegisterType((*ProjectRepoId)(nil), "google.devtools.source.v1.ProjectRepoId") + proto.RegisterType((*CloudWorkspaceId)(nil), "google.devtools.source.v1.CloudWorkspaceId") + proto.RegisterEnum("google.devtools.source.v1.AliasContext_Kind", AliasContext_Kind_name, AliasContext_Kind_value) +} + +func init() { proto.RegisterFile("google/devtools/source/v1/source_context.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 780 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x4e, 0xdb, 0x4c, + 0x14, 0x8e, 0xe3, 0x90, 0xe0, 0x13, 0x02, 0xd1, 0xfc, 0x7f, 0xab, 0x00, 0x45, 0x80, 0xa5, 0xaa, + 0x48, 0x54, 0x8e, 0x92, 0x4a, 0x55, 0x4b, 0x2b, 0x51, 0x02, 0x29, 0x89, 0xa0, 0x01, 0x4d, 0x29, + 0xbd, 0x6c, 0x22, 0x63, 0x8f, 0x8c, 0x8b, 0xf1, 0x58, 0xb6, 0x13, 0xe0, 0x25, 0xba, 0xe6, 0x19, + 0xfa, 0x4c, 0x7d, 0x84, 0x2e, 0x2b, 0x75, 0x5b, 0xcd, 0xc5, 0x90, 0x84, 0x60, 0x90, 0xda, 0x95, + 0x67, 0x8e, 0xbf, 0xef, 0x3b, 0x67, 0xce, 0x65, 0x06, 0x0c, 0x87, 0x52, 0xc7, 0x23, 0x55, 0x9b, + 0xf4, 0x63, 0x4a, 0xbd, 0xa8, 0x1a, 0xd1, 0x5e, 0x68, 0x91, 0x6a, 0xbf, 0x26, 0x57, 0x5d, 0x8b, + 0xfa, 0x31, 0x39, 0x8f, 0x8d, 0x20, 0xa4, 0x31, 0x45, 0xb3, 0x02, 0x6f, 0x24, 0x78, 0x43, 0xa0, + 0x8c, 0x7e, 0x6d, 0xee, 0x91, 0x94, 0x32, 0x03, 0xb7, 0x6a, 0xfa, 0x3e, 0x8d, 0xcd, 0xd8, 0xa5, + 0x7e, 0x24, 0x88, 0xfa, 0x8f, 0x2c, 0x94, 0xde, 0x73, 0xec, 0xa6, 0x10, 0x44, 0x18, 0xc0, 0xf2, + 0x68, 0xcf, 0xee, 0x86, 0x24, 0xa0, 0x15, 0x65, 0x49, 0x59, 0x29, 0xd6, 0x6b, 0xc6, 0xad, 0xfa, + 0xc6, 0x26, 0x03, 0x63, 0x12, 0xd0, 0x21, 0x99, 0x56, 0x06, 0x6b, 0x56, 0xf2, 0x07, 0x99, 0x30, + 0x23, 0x34, 0xcf, 0x68, 0x78, 0x12, 0x05, 0xa6, 0x45, 0x2a, 0x59, 0x2e, 0xfc, 0xfc, 0x2e, 0xe1, + 0x8f, 0x09, 0x61, 0x54, 0x7d, 0xda, 0x1a, 0xfa, 0x8d, 0x5a, 0x90, 0x77, 0x48, 0x18, 0xba, 0x71, + 0x45, 0xe5, 0xca, 0x46, 0x8a, 0xf2, 0x36, 0x07, 0x8e, 0x2a, 0x4a, 0x3e, 0x5a, 0x07, 0xd5, 0x71, + 0xe3, 0x4a, 0x9e, 0xcb, 0xac, 0xa6, 0xc9, 0xdc, 0xd4, 0x60, 0xcc, 0x86, 0x06, 0x05, 0x59, 0x1d, + 0xfd, 0xa7, 0x02, 0x0f, 0x9a, 0xe7, 0x31, 0xf1, 0x6d, 0x62, 0x0f, 0xa7, 0xb9, 0x71, 0x05, 0x92, + 0x39, 0x5e, 0x49, 0xf1, 0x34, 0x44, 0xc5, 0x09, 0x11, 0x1d, 0x40, 0xde, 0x33, 0x8f, 0x88, 0x17, + 0x55, 0xb2, 0x4b, 0xea, 0x4a, 0xb1, 0xfe, 0x3a, 0x45, 0x62, 0x6c, 0x14, 0xc6, 0x2e, 0xa7, 0x37, + 0xfd, 0x38, 0xbc, 0xc0, 0x52, 0x6b, 0xee, 0x25, 0x14, 0x07, 0xcc, 0xa8, 0x0c, 0xea, 0x09, 0xb9, + 0xe0, 0x41, 0x6a, 0x98, 0x2d, 0xd1, 0xff, 0x30, 0xd1, 0x37, 0xbd, 0x9e, 0xa8, 0xa1, 0x86, 0xc5, + 0x66, 0x2d, 0xfb, 0x42, 0xd1, 0x2f, 0x15, 0x98, 0xda, 0xf0, 0x5c, 0x33, 0x4a, 0x4e, 0xf9, 0x06, + 0x72, 0x27, 0xae, 0x6f, 0x73, 0xf6, 0x74, 0xfd, 0x69, 0x4a, 0x7c, 0x83, 0x34, 0x63, 0xc7, 0xf5, + 0x6d, 0xcc, 0x99, 0x08, 0x41, 0xce, 0x37, 0x4f, 0x13, 0x5f, 0x7c, 0xad, 0xd7, 0x21, 0xc7, 0x10, + 0xa8, 0x00, 0xea, 0x46, 0xe7, 0x73, 0x39, 0x83, 0x34, 0x98, 0x78, 0xdb, 0xfe, 0xd4, 0xdc, 0x2a, + 0x2b, 0xa8, 0x08, 0x85, 0x77, 0x7b, 0x87, 0x1b, 0x8d, 0xdd, 0x66, 0x39, 0xcb, 0xec, 0x7b, 0x07, + 0xad, 0x26, 0x2e, 0xe7, 0xf4, 0x5f, 0x0a, 0x3c, 0x1c, 0xdf, 0xaa, 0x68, 0x0d, 0x0a, 0xac, 0xd7, + 0xbb, 0xae, 0x2d, 0x4b, 0xb1, 0x9c, 0x12, 0x27, 0xa3, 0xb7, 0x6d, 0x9c, 0x0f, 0xf9, 0x17, 0x2d, + 0x43, 0x31, 0x24, 0x7d, 0x37, 0x72, 0xa9, 0xcf, 0xf8, 0x3c, 0xca, 0x56, 0x06, 0x43, 0x62, 0x6c, + 0xdb, 0x68, 0x11, 0xc0, 0x64, 0x87, 0xeb, 0xf2, 0x73, 0xa8, 0x12, 0xa1, 0x71, 0x5b, 0xc7, 0x3c, + 0x25, 0xa8, 0x03, 0x25, 0x01, 0x48, 0x1a, 0x22, 0xc7, 0xa3, 0x78, 0x72, 0xcf, 0x6c, 0xb5, 0x32, + 0x78, 0xca, 0x1c, 0xd8, 0x37, 0x00, 0x26, 0x13, 0xf7, 0xfa, 0x37, 0x05, 0xe6, 0x53, 0x06, 0x09, + 0x75, 0x60, 0xea, 0x6a, 0x26, 0xaf, 0x13, 0xb0, 0x7a, 0xef, 0xb1, 0x6c, 0xdb, 0xb8, 0x78, 0x76, + 0xbd, 0x41, 0x8b, 0x50, 0x8c, 0x7c, 0x33, 0x88, 0x8e, 0x69, 0x7c, 0x95, 0x0f, 0x0c, 0x89, 0xa9, + 0x6d, 0xeb, 0xbf, 0x15, 0xf8, 0x6f, 0xcc, 0xfc, 0xa1, 0x59, 0x98, 0x3c, 0xa6, 0x51, 0xdc, 0xed, + 0x85, 0xae, 0xec, 0xb5, 0x02, 0xdb, 0x7f, 0x08, 0x5d, 0xf4, 0x18, 0xa6, 0xc5, 0x68, 0x76, 0x83, + 0x90, 0x7e, 0x25, 0x56, 0x2c, 0x65, 0x4b, 0xc2, 0xba, 0x2f, 0x8c, 0xa3, 0xa5, 0x50, 0xef, 0x2c, + 0x45, 0xee, 0x1e, 0xa5, 0x98, 0xf8, 0x77, 0xa5, 0x68, 0x42, 0x79, 0xf4, 0xc6, 0x60, 0xc3, 0xd5, + 0x0b, 0xbd, 0x64, 0xb8, 0x7a, 0xa1, 0xc7, 0x12, 0x78, 0xa3, 0xa1, 0x06, 0xcf, 0xa0, 0xf7, 0x21, + 0x2f, 0x7a, 0x10, 0x61, 0x98, 0x91, 0x09, 0xe9, 0x0e, 0xf7, 0x6f, 0xda, 0x55, 0x22, 0xb3, 0x25, + 0x24, 0x5a, 0x19, 0x5c, 0x0a, 0x06, 0x0d, 0x08, 0x81, 0xda, 0x1b, 0xe8, 0x63, 0xb6, 0x69, 0xe4, + 0x20, 0xeb, 0xda, 0xfa, 0x0e, 0x94, 0x86, 0xb8, 0x68, 0x01, 0x20, 0x71, 0x2f, 0x3d, 0x6b, 0x58, + 0x93, 0x96, 0xb6, 0x8d, 0xe6, 0x41, 0xe3, 0x51, 0x0d, 0x4c, 0xef, 0x24, 0x33, 0xb0, 0x3c, 0xeb, + 0x47, 0x50, 0x1e, 0xed, 0xa3, 0xbf, 0x1a, 0xc3, 0x31, 0xb7, 0x44, 0xe3, 0x52, 0x81, 0x05, 0x8b, + 0x9e, 0xde, 0x2e, 0xd2, 0x40, 0x43, 0xc5, 0xd8, 0x67, 0x0f, 0xe2, 0xbe, 0xf2, 0x65, 0x5d, 0x12, + 0x1c, 0xea, 0x99, 0xbe, 0x63, 0xd0, 0xd0, 0xa9, 0x3a, 0xc4, 0xe7, 0xcf, 0x65, 0x55, 0xfc, 0x32, + 0x03, 0x37, 0x1a, 0xf3, 0x34, 0xbf, 0x12, 0xab, 0xef, 0xd9, 0xc5, 0x6d, 0xa1, 0xc0, 0x8f, 0x68, + 0x6c, 0x91, 0xfe, 0x01, 0x77, 0x2c, 0xbc, 0x19, 0x87, 0xb5, 0xa3, 0x3c, 0x57, 0x7b, 0xf6, 0x27, + 0x00, 0x00, 0xff, 0xff, 0x9e, 0xd0, 0x5c, 0x10, 0xe7, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/sourcerepo/v1/sourcerepo.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/sourcerepo/v1/sourcerepo.pb.go new file mode 100644 index 000000000..56422f581 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/sourcerepo/v1/sourcerepo.pb.go @@ -0,0 +1,634 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/sourcerepo/v1/sourcerepo.proto + +/* +Package sourcerepo is a generated protocol buffer package. + +It is generated from these files: + google/devtools/sourcerepo/v1/sourcerepo.proto + +It has these top-level messages: + Repo + MirrorConfig + GetRepoRequest + ListReposRequest + ListReposResponse + CreateRepoRequest + DeleteRepoRequest +*/ +package sourcerepo + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A repository (or repo) is a Git repository storing versioned source content. +type Repo struct { + // Resource name of the repository, of the form + // `projects//repos/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The disk usage of the repo, in bytes. + // Only returned by GetRepo. + Size int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + // URL to clone the repository from Google Cloud Source Repositories. + Url string `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"` + // How this repository mirrors a repository managed by another service. + MirrorConfig *MirrorConfig `protobuf:"bytes,4,opt,name=mirror_config,json=mirrorConfig" json:"mirror_config,omitempty"` +} + +func (m *Repo) Reset() { *m = Repo{} } +func (m *Repo) String() string { return proto.CompactTextString(m) } +func (*Repo) ProtoMessage() {} +func (*Repo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Repo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Repo) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *Repo) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Repo) GetMirrorConfig() *MirrorConfig { + if m != nil { + return m.MirrorConfig + } + return nil +} + +// Configuration to automatically mirror a repository from another +// hosting service, for example GitHub or BitBucket. +type MirrorConfig struct { + // URL of the main repository at the other hosting service. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // ID of the webhook listening to updates to trigger mirroring. + // Removing this webook from the other hosting service will stop + // Google Cloud Source Repositories from receiving notifications, + // and thereby disabling mirroring. + WebhookId string `protobuf:"bytes,2,opt,name=webhook_id,json=webhookId" json:"webhook_id,omitempty"` + // ID of the SSH deploy key at the other hosting service. + // Removing this key from the other service would deauthorize + // Google Cloud Source Repositories from mirroring. + DeployKeyId string `protobuf:"bytes,3,opt,name=deploy_key_id,json=deployKeyId" json:"deploy_key_id,omitempty"` +} + +func (m *MirrorConfig) Reset() { *m = MirrorConfig{} } +func (m *MirrorConfig) String() string { return proto.CompactTextString(m) } +func (*MirrorConfig) ProtoMessage() {} +func (*MirrorConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *MirrorConfig) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *MirrorConfig) GetWebhookId() string { + if m != nil { + return m.WebhookId + } + return "" +} + +func (m *MirrorConfig) GetDeployKeyId() string { + if m != nil { + return m.DeployKeyId + } + return "" +} + +// Request for GetRepo. +type GetRepoRequest struct { + // The name of the requested repository. Values are of the form + // `projects//repos/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetRepoRequest) Reset() { *m = GetRepoRequest{} } +func (m *GetRepoRequest) String() string { return proto.CompactTextString(m) } +func (*GetRepoRequest) ProtoMessage() {} +func (*GetRepoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetRepoRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for ListRepos. +type ListReposRequest struct { + // The project ID whose repos should be listed. Values are of the form + // `projects/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Maximum number of repositories to return; between 1 and 500. + // If not set or zero, defaults to 100 at the server. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Resume listing repositories where a prior ListReposResponse + // left off. This is an opaque token that must be obtained from + // a recent, prior ListReposResponse's next_page_token field. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListReposRequest) Reset() { *m = ListReposRequest{} } +func (m *ListReposRequest) String() string { return proto.CompactTextString(m) } +func (*ListReposRequest) ProtoMessage() {} +func (*ListReposRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListReposRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListReposRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListReposRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for ListRepos. The size is not set in the returned repositories. +type ListReposResponse struct { + // The listed repos. + Repos []*Repo `protobuf:"bytes,1,rep,name=repos" json:"repos,omitempty"` + // If non-empty, additional repositories exist within the project. These + // can be retrieved by including this value in the next ListReposRequest's + // page_token field. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListReposResponse) Reset() { *m = ListReposResponse{} } +func (m *ListReposResponse) String() string { return proto.CompactTextString(m) } +func (*ListReposResponse) ProtoMessage() {} +func (*ListReposResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListReposResponse) GetRepos() []*Repo { + if m != nil { + return m.Repos + } + return nil +} + +func (m *ListReposResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for CreateRepo +type CreateRepoRequest struct { + // The project in which to create the repo. Values are of the form + // `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The repo to create. Only name should be set; setting other fields + // is an error. The project in the name should match the parent field. + Repo *Repo `protobuf:"bytes,2,opt,name=repo" json:"repo,omitempty"` +} + +func (m *CreateRepoRequest) Reset() { *m = CreateRepoRequest{} } +func (m *CreateRepoRequest) String() string { return proto.CompactTextString(m) } +func (*CreateRepoRequest) ProtoMessage() {} +func (*CreateRepoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateRepoRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateRepoRequest) GetRepo() *Repo { + if m != nil { + return m.Repo + } + return nil +} + +// Request for DeleteRepo. +type DeleteRepoRequest struct { + // The name of the repo to delete. Values are of the form + // `projects//repos/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteRepoRequest) Reset() { *m = DeleteRepoRequest{} } +func (m *DeleteRepoRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRepoRequest) ProtoMessage() {} +func (*DeleteRepoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DeleteRepoRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Repo)(nil), "google.devtools.sourcerepo.v1.Repo") + proto.RegisterType((*MirrorConfig)(nil), "google.devtools.sourcerepo.v1.MirrorConfig") + proto.RegisterType((*GetRepoRequest)(nil), "google.devtools.sourcerepo.v1.GetRepoRequest") + proto.RegisterType((*ListReposRequest)(nil), "google.devtools.sourcerepo.v1.ListReposRequest") + proto.RegisterType((*ListReposResponse)(nil), "google.devtools.sourcerepo.v1.ListReposResponse") + proto.RegisterType((*CreateRepoRequest)(nil), "google.devtools.sourcerepo.v1.CreateRepoRequest") + proto.RegisterType((*DeleteRepoRequest)(nil), "google.devtools.sourcerepo.v1.DeleteRepoRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SourceRepo service + +type SourceRepoClient interface { + // Returns all repos belonging to a project. The sizes of the repos are + // not set by ListRepos. To get the size of a repo, use GetRepo. + ListRepos(ctx context.Context, in *ListReposRequest, opts ...grpc.CallOption) (*ListReposResponse, error) + // Returns information about a repo. + GetRepo(ctx context.Context, in *GetRepoRequest, opts ...grpc.CallOption) (*Repo, error) + // Creates a repo in the given project with the given name. + // + // If the named repository already exists, `CreateRepo` returns + // `ALREADY_EXISTS`. + CreateRepo(ctx context.Context, in *CreateRepoRequest, opts ...grpc.CallOption) (*Repo, error) + // Deletes a repo. + DeleteRepo(ctx context.Context, in *DeleteRepoRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type sourceRepoClient struct { + cc *grpc.ClientConn +} + +func NewSourceRepoClient(cc *grpc.ClientConn) SourceRepoClient { + return &sourceRepoClient{cc} +} + +func (c *sourceRepoClient) ListRepos(ctx context.Context, in *ListReposRequest, opts ...grpc.CallOption) (*ListReposResponse, error) { + out := new(ListReposResponse) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/ListRepos", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) GetRepo(ctx context.Context, in *GetRepoRequest, opts ...grpc.CallOption) (*Repo, error) { + out := new(Repo) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/GetRepo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) CreateRepo(ctx context.Context, in *CreateRepoRequest, opts ...grpc.CallOption) (*Repo, error) { + out := new(Repo) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/CreateRepo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) DeleteRepo(ctx context.Context, in *DeleteRepoRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/DeleteRepo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sourceRepoClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.devtools.sourcerepo.v1.SourceRepo/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for SourceRepo service + +type SourceRepoServer interface { + // Returns all repos belonging to a project. The sizes of the repos are + // not set by ListRepos. To get the size of a repo, use GetRepo. + ListRepos(context.Context, *ListReposRequest) (*ListReposResponse, error) + // Returns information about a repo. + GetRepo(context.Context, *GetRepoRequest) (*Repo, error) + // Creates a repo in the given project with the given name. + // + // If the named repository already exists, `CreateRepo` returns + // `ALREADY_EXISTS`. + CreateRepo(context.Context, *CreateRepoRequest) (*Repo, error) + // Deletes a repo. + DeleteRepo(context.Context, *DeleteRepoRequest) (*google_protobuf1.Empty, error) + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterSourceRepoServer(s *grpc.Server, srv SourceRepoServer) { + s.RegisterService(&_SourceRepo_serviceDesc, srv) +} + +func _SourceRepo_ListRepos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListReposRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).ListRepos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/ListRepos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).ListRepos(ctx, req.(*ListReposRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_GetRepo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRepoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).GetRepo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/GetRepo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).GetRepo(ctx, req.(*GetRepoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_CreateRepo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRepoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).CreateRepo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/CreateRepo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).CreateRepo(ctx, req.(*CreateRepoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_DeleteRepo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRepoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).DeleteRepo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/DeleteRepo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).DeleteRepo(ctx, req.(*DeleteRepoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SourceRepo_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SourceRepoServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.sourcerepo.v1.SourceRepo/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SourceRepoServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SourceRepo_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.sourcerepo.v1.SourceRepo", + HandlerType: (*SourceRepoServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListRepos", + Handler: _SourceRepo_ListRepos_Handler, + }, + { + MethodName: "GetRepo", + Handler: _SourceRepo_GetRepo_Handler, + }, + { + MethodName: "CreateRepo", + Handler: _SourceRepo_CreateRepo_Handler, + }, + { + MethodName: "DeleteRepo", + Handler: _SourceRepo_DeleteRepo_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _SourceRepo_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _SourceRepo_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _SourceRepo_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/sourcerepo/v1/sourcerepo.proto", +} + +func init() { proto.RegisterFile("google/devtools/sourcerepo/v1/sourcerepo.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 748 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xd1, 0x4e, 0x13, 0x4d, + 0x14, 0xce, 0xd0, 0x02, 0x7f, 0x0f, 0xe5, 0x07, 0x26, 0x81, 0x34, 0xc5, 0x92, 0xba, 0x28, 0xd6, + 0x12, 0x77, 0x05, 0x35, 0xc4, 0x1a, 0x13, 0x03, 0x9a, 0x86, 0xa8, 0x49, 0x53, 0xb8, 0xf2, 0xa6, + 0xd9, 0xb6, 0x87, 0x65, 0xa5, 0xbb, 0xb3, 0xee, 0x4c, 0xab, 0xd5, 0xa0, 0x09, 0x09, 0xf7, 0x46, + 0x1e, 0xc3, 0xc7, 0xf1, 0x15, 0x7c, 0x08, 0x2f, 0xcd, 0xcc, 0xee, 0xd2, 0x2d, 0xad, 0xed, 0xde, + 0xcd, 0x9c, 0xf3, 0x9d, 0xf3, 0x7d, 0xf3, 0xcd, 0xd9, 0x59, 0xd0, 0x2d, 0xc6, 0xac, 0x0e, 0x1a, + 0x6d, 0xec, 0x09, 0xc6, 0x3a, 0xdc, 0xe0, 0xac, 0xeb, 0xb7, 0xd0, 0x47, 0x8f, 0x19, 0xbd, 0x9d, + 0xd8, 0x4e, 0xf7, 0x7c, 0x26, 0x18, 0x2d, 0x04, 0x78, 0x3d, 0xc2, 0xeb, 0x31, 0x44, 0x6f, 0x27, + 0x7f, 0x2b, 0x6c, 0x67, 0x7a, 0xb6, 0x61, 0xba, 0x2e, 0x13, 0xa6, 0xb0, 0x99, 0xcb, 0x83, 0xe2, + 0xfc, 0x6a, 0x3c, 0xdb, 0x15, 0xa7, 0x61, 0x78, 0x23, 0x0c, 0xdb, 0xa6, 0x23, 0x39, 0x6d, 0xd3, + 0x69, 0x78, 0xac, 0x63, 0xb7, 0xfa, 0x61, 0x3e, 0x3f, 0x9c, 0x1f, 0xca, 0xad, 0x87, 0x39, 0xb5, + 0x6b, 0x76, 0x4f, 0x0c, 0x74, 0x3c, 0x11, 0x26, 0xb5, 0x1f, 0x04, 0xd2, 0x75, 0xf4, 0x18, 0xa5, + 0x90, 0x76, 0x4d, 0x07, 0x73, 0xa4, 0x48, 0x4a, 0x99, 0xba, 0x5a, 0xcb, 0x18, 0xb7, 0x3f, 0x63, + 0x6e, 0xa6, 0x48, 0x4a, 0xa9, 0xba, 0x5a, 0xd3, 0x65, 0x48, 0x75, 0xfd, 0x4e, 0x2e, 0xa5, 0x60, + 0x72, 0x49, 0x6b, 0xb0, 0xe8, 0xd8, 0xbe, 0xcf, 0xfc, 0x46, 0x8b, 0xb9, 0x27, 0xb6, 0x95, 0x4b, + 0x17, 0x49, 0x69, 0x61, 0x77, 0x5b, 0x9f, 0xe8, 0x83, 0xfe, 0x56, 0xd5, 0x1c, 0xa8, 0x92, 0x7a, + 0xd6, 0x89, 0xed, 0xb4, 0x16, 0x64, 0xe3, 0xd9, 0x88, 0x93, 0x0c, 0x38, 0x0b, 0x00, 0x1f, 0xb1, + 0x79, 0xca, 0xd8, 0x59, 0xc3, 0x6e, 0x2b, 0x7d, 0x99, 0x7a, 0x26, 0x8c, 0x1c, 0xb6, 0xa9, 0x06, + 0x8b, 0x6d, 0xf4, 0x3a, 0xac, 0xdf, 0x38, 0xc3, 0xbe, 0x44, 0x04, 0x72, 0x17, 0x82, 0xe0, 0x6b, + 0xec, 0x1f, 0xb6, 0xb5, 0x3b, 0xf0, 0x7f, 0x15, 0x85, 0x3c, 0x7b, 0x1d, 0x3f, 0x74, 0x91, 0x8b, + 0x71, 0x16, 0x68, 0x4d, 0x58, 0x7e, 0x63, 0x73, 0x05, 0xe3, 0x13, 0x70, 0x74, 0x1d, 0x32, 0x9e, + 0x69, 0x61, 0xe3, 0xda, 0xaf, 0xd9, 0xfa, 0x7f, 0x32, 0x70, 0x24, 0x3d, 0x2b, 0x00, 0xa8, 0xa4, + 0x60, 0x67, 0xe8, 0x86, 0x5a, 0x14, 0xfc, 0x58, 0x06, 0xb4, 0x1e, 0xac, 0xc4, 0x38, 0xb8, 0xc7, + 0x5c, 0x8e, 0xf4, 0x29, 0xcc, 0x4a, 0xa7, 0x78, 0x8e, 0x14, 0x53, 0xa5, 0x85, 0xdd, 0xcd, 0x29, + 0x6e, 0xaa, 0x73, 0x04, 0x15, 0x74, 0x0b, 0x96, 0x5c, 0xfc, 0x24, 0x1a, 0x31, 0xce, 0xc0, 0xa1, + 0x45, 0x19, 0xae, 0x5d, 0xf3, 0xb6, 0x61, 0xe5, 0xc0, 0x47, 0x53, 0x60, 0xdc, 0x84, 0x35, 0x98, + 0xf3, 0x4c, 0x1f, 0x5d, 0x11, 0x1e, 0x2f, 0xdc, 0xd1, 0x3d, 0x48, 0xcb, 0xee, 0xaa, 0x53, 0x42, + 0x39, 0xaa, 0x40, 0xbb, 0x07, 0x2b, 0x2f, 0xb1, 0x83, 0xc3, 0x2c, 0x63, 0x2c, 0xdc, 0xfd, 0x33, + 0x0f, 0x70, 0xa4, 0xba, 0xa8, 0x81, 0xbc, 0x22, 0x90, 0xb9, 0xb6, 0x85, 0x1a, 0x53, 0x08, 0x6f, + 0x5e, 0x52, 0xfe, 0x61, 0xf2, 0x82, 0xc0, 0x71, 0x6d, 0xf3, 0xe2, 0xd7, 0xef, 0xab, 0x99, 0x02, + 0x5d, 0x97, 0x5f, 0xd0, 0x17, 0x29, 0xe9, 0xb9, 0xe7, 0xb3, 0xf7, 0xd8, 0x12, 0xdc, 0x28, 0x9f, + 0x1b, 0x81, 0xb7, 0x97, 0x04, 0xe6, 0xc3, 0xb1, 0xa1, 0x0f, 0xa6, 0x50, 0x0c, 0x8f, 0x57, 0x3e, + 0x89, 0x67, 0xda, 0x96, 0x12, 0x51, 0xa4, 0x1b, 0xe3, 0x44, 0x04, 0x1a, 0x8c, 0x72, 0xf9, 0x9c, + 0x7e, 0x27, 0x00, 0x83, 0xcb, 0xa3, 0xd3, 0x4e, 0x3b, 0x72, 0xcf, 0xc9, 0xd4, 0x6c, 0x2b, 0x35, + 0x77, 0xb5, 0x82, 0x52, 0x13, 0x4c, 0xc2, 0xa8, 0x29, 0x15, 0x75, 0xd1, 0xf4, 0x2b, 0xc0, 0xe0, + 0xa2, 0xa7, 0x2a, 0x1a, 0x99, 0x89, 0xfc, 0x5a, 0x54, 0x11, 0x3d, 0x54, 0xfa, 0x2b, 0xf9, 0x50, + 0x45, 0x96, 0x94, 0xa7, 0x59, 0x72, 0x49, 0x20, 0x7b, 0x84, 0xe2, 0xd0, 0x74, 0x6a, 0xea, 0xf9, + 0xa3, 0x5a, 0xd4, 0xd0, 0x36, 0x1d, 0x49, 0x19, 0x4f, 0x46, 0xa4, 0xab, 0x37, 0x30, 0x41, 0x56, + 0xab, 0x28, 0xce, 0xc7, 0x9a, 0xa1, 0x38, 0x7d, 0x0c, 0xb4, 0x8f, 0xe5, 0xad, 0xf0, 0x58, 0xdb, + 0x0a, 0x29, 0xd3, 0x0b, 0x02, 0xd9, 0xea, 0x24, 0x1d, 0xd5, 0xe4, 0x3a, 0xf6, 0x94, 0x8e, 0x1d, + 0x9a, 0x44, 0x87, 0x15, 0xe7, 0xfc, 0x49, 0x80, 0x1e, 0x23, 0x57, 0x11, 0xf4, 0x1d, 0x9b, 0x73, + 0xf9, 0x93, 0xa1, 0xa5, 0x1b, 0x34, 0xa3, 0x90, 0x48, 0xd0, 0xfd, 0x04, 0xc8, 0xf0, 0xc3, 0x79, + 0xa1, 0x44, 0x56, 0xb4, 0x27, 0x09, 0x44, 0x8a, 0x91, 0x36, 0x15, 0x52, 0xde, 0xff, 0x06, 0xb7, + 0x5b, 0xcc, 0x99, 0x3c, 0x31, 0xfb, 0x4b, 0x83, 0xc7, 0xa1, 0x26, 0x27, 0xa4, 0x46, 0xde, 0x55, + 0xc3, 0x0a, 0x8b, 0x75, 0x4c, 0xd7, 0xd2, 0x99, 0x6f, 0x19, 0x16, 0xba, 0x6a, 0x7e, 0x8c, 0x20, + 0x65, 0x7a, 0x36, 0xff, 0xc7, 0x9f, 0xfb, 0xd9, 0x60, 0xd7, 0x9c, 0x53, 0x35, 0x8f, 0xfe, 0x06, + 0x00, 0x00, 0xff, 0xff, 0x30, 0x80, 0x85, 0x9e, 0xec, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/example/library/v1/library.pb.go b/vendor/google.golang.org/genproto/googleapis/example/library/v1/library.pb.go new file mode 100644 index 000000000..5fa997526 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/example/library/v1/library.pb.go @@ -0,0 +1,981 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/example/library/v1/library.proto + +/* +Package library is a generated protocol buffer package. + +It is generated from these files: + google/example/library/v1/library.proto + +It has these top-level messages: + Book + Shelf + CreateShelfRequest + GetShelfRequest + ListShelvesRequest + ListShelvesResponse + DeleteShelfRequest + MergeShelvesRequest + CreateBookRequest + GetBookRequest + ListBooksRequest + ListBooksResponse + UpdateBookRequest + DeleteBookRequest + MoveBookRequest +*/ +package library + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A single book in the library. +type Book struct { + // The resource name of the book. + // Book names have the form `shelves/{shelf_id}/books/{book_id}`. + // The name is ignored when creating a book. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the book author. + Author string `protobuf:"bytes,2,opt,name=author" json:"author,omitempty"` + // The title of the book. + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + // Value indicating whether the book has been read. + Read bool `protobuf:"varint,4,opt,name=read" json:"read,omitempty"` +} + +func (m *Book) Reset() { *m = Book{} } +func (m *Book) String() string { return proto.CompactTextString(m) } +func (*Book) ProtoMessage() {} +func (*Book) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Book) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Book) GetAuthor() string { + if m != nil { + return m.Author + } + return "" +} + +func (m *Book) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Book) GetRead() bool { + if m != nil { + return m.Read + } + return false +} + +// A Shelf contains a collection of books with a theme. +type Shelf struct { + // The resource name of the shelf. + // Shelf names have the form `shelves/{shelf_id}`. + // The name is ignored when creating a shelf. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The theme of the shelf + Theme string `protobuf:"bytes,2,opt,name=theme" json:"theme,omitempty"` +} + +func (m *Shelf) Reset() { *m = Shelf{} } +func (m *Shelf) String() string { return proto.CompactTextString(m) } +func (*Shelf) ProtoMessage() {} +func (*Shelf) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Shelf) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Shelf) GetTheme() string { + if m != nil { + return m.Theme + } + return "" +} + +// Request message for LibraryService.CreateShelf. +type CreateShelfRequest struct { + // The shelf to create. + Shelf *Shelf `protobuf:"bytes,1,opt,name=shelf" json:"shelf,omitempty"` +} + +func (m *CreateShelfRequest) Reset() { *m = CreateShelfRequest{} } +func (m *CreateShelfRequest) String() string { return proto.CompactTextString(m) } +func (*CreateShelfRequest) ProtoMessage() {} +func (*CreateShelfRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *CreateShelfRequest) GetShelf() *Shelf { + if m != nil { + return m.Shelf + } + return nil +} + +// Request message for LibraryService.GetShelf. +type GetShelfRequest struct { + // The name of the shelf to retrieve. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetShelfRequest) Reset() { *m = GetShelfRequest{} } +func (m *GetShelfRequest) String() string { return proto.CompactTextString(m) } +func (*GetShelfRequest) ProtoMessage() {} +func (*GetShelfRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetShelfRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for LibraryService.ListShelves. +type ListShelvesRequest struct { + // Requested page size. Server may return fewer shelves than requested. + // If unspecified, server will pick an appropriate default. + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying a page of results the server should return. + // Typically, this is the value of + // [ListShelvesResponse.next_page_token][google.example.library.v1.ListShelvesResponse.next_page_token] + // returned from the previous call to `ListShelves` method. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListShelvesRequest) Reset() { *m = ListShelvesRequest{} } +func (m *ListShelvesRequest) String() string { return proto.CompactTextString(m) } +func (*ListShelvesRequest) ProtoMessage() {} +func (*ListShelvesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListShelvesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListShelvesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for LibraryService.ListShelves. +type ListShelvesResponse struct { + // The list of shelves. + Shelves []*Shelf `protobuf:"bytes,1,rep,name=shelves" json:"shelves,omitempty"` + // A token to retrieve next page of results. + // Pass this value in the + // [ListShelvesRequest.page_token][google.example.library.v1.ListShelvesRequest.page_token] + // field in the subsequent call to `ListShelves` method to retrieve the next + // page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListShelvesResponse) Reset() { *m = ListShelvesResponse{} } +func (m *ListShelvesResponse) String() string { return proto.CompactTextString(m) } +func (*ListShelvesResponse) ProtoMessage() {} +func (*ListShelvesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListShelvesResponse) GetShelves() []*Shelf { + if m != nil { + return m.Shelves + } + return nil +} + +func (m *ListShelvesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for LibraryService.DeleteShelf. +type DeleteShelfRequest struct { + // The name of the shelf to delete. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteShelfRequest) Reset() { *m = DeleteShelfRequest{} } +func (m *DeleteShelfRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteShelfRequest) ProtoMessage() {} +func (*DeleteShelfRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DeleteShelfRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Describes the shelf being removed (other_shelf_name) and updated +// (name) in this merge. +type MergeShelvesRequest struct { + // The name of the shelf we're adding books to. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the shelf we're removing books from and deleting. + OtherShelfName string `protobuf:"bytes,2,opt,name=other_shelf_name,json=otherShelfName" json:"other_shelf_name,omitempty"` +} + +func (m *MergeShelvesRequest) Reset() { *m = MergeShelvesRequest{} } +func (m *MergeShelvesRequest) String() string { return proto.CompactTextString(m) } +func (*MergeShelvesRequest) ProtoMessage() {} +func (*MergeShelvesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MergeShelvesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MergeShelvesRequest) GetOtherShelfName() string { + if m != nil { + return m.OtherShelfName + } + return "" +} + +// Request message for LibraryService.CreateBook. +type CreateBookRequest struct { + // The name of the shelf in which the book is created. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The book to create. + Book *Book `protobuf:"bytes,2,opt,name=book" json:"book,omitempty"` +} + +func (m *CreateBookRequest) Reset() { *m = CreateBookRequest{} } +func (m *CreateBookRequest) String() string { return proto.CompactTextString(m) } +func (*CreateBookRequest) ProtoMessage() {} +func (*CreateBookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CreateBookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateBookRequest) GetBook() *Book { + if m != nil { + return m.Book + } + return nil +} + +// Request message for LibraryService.GetBook. +type GetBookRequest struct { + // The name of the book to retrieve. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetBookRequest) Reset() { *m = GetBookRequest{} } +func (m *GetBookRequest) String() string { return proto.CompactTextString(m) } +func (*GetBookRequest) ProtoMessage() {} +func (*GetBookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *GetBookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for LibraryService.ListBooks. +type ListBooksRequest struct { + // The name of the shelf whose books we'd like to list. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Requested page size. Server may return fewer books than requested. + // If unspecified, server will pick an appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A token identifying a page of results the server should return. + // Typically, this is the value of + // [ListBooksResponse.next_page_token][google.example.library.v1.ListBooksResponse.next_page_token]. + // returned from the previous call to `ListBooks` method. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListBooksRequest) Reset() { *m = ListBooksRequest{} } +func (m *ListBooksRequest) String() string { return proto.CompactTextString(m) } +func (*ListBooksRequest) ProtoMessage() {} +func (*ListBooksRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ListBooksRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListBooksRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListBooksRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response message for LibraryService.ListBooks. +type ListBooksResponse struct { + // The list of books. + Books []*Book `protobuf:"bytes,1,rep,name=books" json:"books,omitempty"` + // A token to retrieve next page of results. + // Pass this value in the + // [ListBooksRequest.page_token][google.example.library.v1.ListBooksRequest.page_token] + // field in the subsequent call to `ListBooks` method to retrieve the next + // page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListBooksResponse) Reset() { *m = ListBooksResponse{} } +func (m *ListBooksResponse) String() string { return proto.CompactTextString(m) } +func (*ListBooksResponse) ProtoMessage() {} +func (*ListBooksResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ListBooksResponse) GetBooks() []*Book { + if m != nil { + return m.Books + } + return nil +} + +func (m *ListBooksResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for LibraryService.UpdateBook. +type UpdateBookRequest struct { + // The name of the book to update. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The book to update with. The name must match or be empty. + Book *Book `protobuf:"bytes,2,opt,name=book" json:"book,omitempty"` +} + +func (m *UpdateBookRequest) Reset() { *m = UpdateBookRequest{} } +func (m *UpdateBookRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateBookRequest) ProtoMessage() {} +func (*UpdateBookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *UpdateBookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateBookRequest) GetBook() *Book { + if m != nil { + return m.Book + } + return nil +} + +// Request message for LibraryService.DeleteBook. +type DeleteBookRequest struct { + // The name of the book to delete. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteBookRequest) Reset() { *m = DeleteBookRequest{} } +func (m *DeleteBookRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteBookRequest) ProtoMessage() {} +func (*DeleteBookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *DeleteBookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Describes what book to move (name) and what shelf we're moving it +// to (other_shelf_name). +type MoveBookRequest struct { + // The name of the book to move. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the destination shelf. + OtherShelfName string `protobuf:"bytes,2,opt,name=other_shelf_name,json=otherShelfName" json:"other_shelf_name,omitempty"` +} + +func (m *MoveBookRequest) Reset() { *m = MoveBookRequest{} } +func (m *MoveBookRequest) String() string { return proto.CompactTextString(m) } +func (*MoveBookRequest) ProtoMessage() {} +func (*MoveBookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *MoveBookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MoveBookRequest) GetOtherShelfName() string { + if m != nil { + return m.OtherShelfName + } + return "" +} + +func init() { + proto.RegisterType((*Book)(nil), "google.example.library.v1.Book") + proto.RegisterType((*Shelf)(nil), "google.example.library.v1.Shelf") + proto.RegisterType((*CreateShelfRequest)(nil), "google.example.library.v1.CreateShelfRequest") + proto.RegisterType((*GetShelfRequest)(nil), "google.example.library.v1.GetShelfRequest") + proto.RegisterType((*ListShelvesRequest)(nil), "google.example.library.v1.ListShelvesRequest") + proto.RegisterType((*ListShelvesResponse)(nil), "google.example.library.v1.ListShelvesResponse") + proto.RegisterType((*DeleteShelfRequest)(nil), "google.example.library.v1.DeleteShelfRequest") + proto.RegisterType((*MergeShelvesRequest)(nil), "google.example.library.v1.MergeShelvesRequest") + proto.RegisterType((*CreateBookRequest)(nil), "google.example.library.v1.CreateBookRequest") + proto.RegisterType((*GetBookRequest)(nil), "google.example.library.v1.GetBookRequest") + proto.RegisterType((*ListBooksRequest)(nil), "google.example.library.v1.ListBooksRequest") + proto.RegisterType((*ListBooksResponse)(nil), "google.example.library.v1.ListBooksResponse") + proto.RegisterType((*UpdateBookRequest)(nil), "google.example.library.v1.UpdateBookRequest") + proto.RegisterType((*DeleteBookRequest)(nil), "google.example.library.v1.DeleteBookRequest") + proto.RegisterType((*MoveBookRequest)(nil), "google.example.library.v1.MoveBookRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LibraryService service + +type LibraryServiceClient interface { + // Creates a shelf, and returns the new Shelf. + CreateShelf(ctx context.Context, in *CreateShelfRequest, opts ...grpc.CallOption) (*Shelf, error) + // Gets a shelf. Returns NOT_FOUND if the shelf does not exist. + GetShelf(ctx context.Context, in *GetShelfRequest, opts ...grpc.CallOption) (*Shelf, error) + // Lists shelves. The order is unspecified but deterministic. Newly created + // shelves will not necessarily be added to the end of this list. + ListShelves(ctx context.Context, in *ListShelvesRequest, opts ...grpc.CallOption) (*ListShelvesResponse, error) + // Deletes a shelf. Returns NOT_FOUND if the shelf does not exist. + DeleteShelf(ctx context.Context, in *DeleteShelfRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Merges two shelves by adding all books from the shelf named + // `other_shelf_name` to shelf `name`, and deletes + // `other_shelf_name`. Returns the updated shelf. + // The book ids of the moved books may not be the same as the original books. + // + // Returns NOT_FOUND if either shelf does not exist. + // This call is a no-op if the specified shelves are the same. + MergeShelves(ctx context.Context, in *MergeShelvesRequest, opts ...grpc.CallOption) (*Shelf, error) + // Creates a book, and returns the new Book. + CreateBook(ctx context.Context, in *CreateBookRequest, opts ...grpc.CallOption) (*Book, error) + // Gets a book. Returns NOT_FOUND if the book does not exist. + GetBook(ctx context.Context, in *GetBookRequest, opts ...grpc.CallOption) (*Book, error) + // Lists books in a shelf. The order is unspecified but deterministic. Newly + // created books will not necessarily be added to the end of this list. + // Returns NOT_FOUND if the shelf does not exist. + ListBooks(ctx context.Context, in *ListBooksRequest, opts ...grpc.CallOption) (*ListBooksResponse, error) + // Deletes a book. Returns NOT_FOUND if the book does not exist. + DeleteBook(ctx context.Context, in *DeleteBookRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Updates a book. Returns INVALID_ARGUMENT if the name of the book + // is non-empty and does equal the previous name. + UpdateBook(ctx context.Context, in *UpdateBookRequest, opts ...grpc.CallOption) (*Book, error) + // Moves a book to another shelf, and returns the new book. The book + // id of the new book may not be the same as the original book. + MoveBook(ctx context.Context, in *MoveBookRequest, opts ...grpc.CallOption) (*Book, error) +} + +type libraryServiceClient struct { + cc *grpc.ClientConn +} + +func NewLibraryServiceClient(cc *grpc.ClientConn) LibraryServiceClient { + return &libraryServiceClient{cc} +} + +func (c *libraryServiceClient) CreateShelf(ctx context.Context, in *CreateShelfRequest, opts ...grpc.CallOption) (*Shelf, error) { + out := new(Shelf) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/CreateShelf", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) GetShelf(ctx context.Context, in *GetShelfRequest, opts ...grpc.CallOption) (*Shelf, error) { + out := new(Shelf) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/GetShelf", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) ListShelves(ctx context.Context, in *ListShelvesRequest, opts ...grpc.CallOption) (*ListShelvesResponse, error) { + out := new(ListShelvesResponse) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/ListShelves", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) DeleteShelf(ctx context.Context, in *DeleteShelfRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/DeleteShelf", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) MergeShelves(ctx context.Context, in *MergeShelvesRequest, opts ...grpc.CallOption) (*Shelf, error) { + out := new(Shelf) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/MergeShelves", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) CreateBook(ctx context.Context, in *CreateBookRequest, opts ...grpc.CallOption) (*Book, error) { + out := new(Book) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/CreateBook", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) GetBook(ctx context.Context, in *GetBookRequest, opts ...grpc.CallOption) (*Book, error) { + out := new(Book) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/GetBook", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) ListBooks(ctx context.Context, in *ListBooksRequest, opts ...grpc.CallOption) (*ListBooksResponse, error) { + out := new(ListBooksResponse) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/ListBooks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) DeleteBook(ctx context.Context, in *DeleteBookRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/DeleteBook", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) UpdateBook(ctx context.Context, in *UpdateBookRequest, opts ...grpc.CallOption) (*Book, error) { + out := new(Book) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/UpdateBook", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *libraryServiceClient) MoveBook(ctx context.Context, in *MoveBookRequest, opts ...grpc.CallOption) (*Book, error) { + out := new(Book) + err := grpc.Invoke(ctx, "/google.example.library.v1.LibraryService/MoveBook", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LibraryService service + +type LibraryServiceServer interface { + // Creates a shelf, and returns the new Shelf. + CreateShelf(context.Context, *CreateShelfRequest) (*Shelf, error) + // Gets a shelf. Returns NOT_FOUND if the shelf does not exist. + GetShelf(context.Context, *GetShelfRequest) (*Shelf, error) + // Lists shelves. The order is unspecified but deterministic. Newly created + // shelves will not necessarily be added to the end of this list. + ListShelves(context.Context, *ListShelvesRequest) (*ListShelvesResponse, error) + // Deletes a shelf. Returns NOT_FOUND if the shelf does not exist. + DeleteShelf(context.Context, *DeleteShelfRequest) (*google_protobuf1.Empty, error) + // Merges two shelves by adding all books from the shelf named + // `other_shelf_name` to shelf `name`, and deletes + // `other_shelf_name`. Returns the updated shelf. + // The book ids of the moved books may not be the same as the original books. + // + // Returns NOT_FOUND if either shelf does not exist. + // This call is a no-op if the specified shelves are the same. + MergeShelves(context.Context, *MergeShelvesRequest) (*Shelf, error) + // Creates a book, and returns the new Book. + CreateBook(context.Context, *CreateBookRequest) (*Book, error) + // Gets a book. Returns NOT_FOUND if the book does not exist. + GetBook(context.Context, *GetBookRequest) (*Book, error) + // Lists books in a shelf. The order is unspecified but deterministic. Newly + // created books will not necessarily be added to the end of this list. + // Returns NOT_FOUND if the shelf does not exist. + ListBooks(context.Context, *ListBooksRequest) (*ListBooksResponse, error) + // Deletes a book. Returns NOT_FOUND if the book does not exist. + DeleteBook(context.Context, *DeleteBookRequest) (*google_protobuf1.Empty, error) + // Updates a book. Returns INVALID_ARGUMENT if the name of the book + // is non-empty and does equal the previous name. + UpdateBook(context.Context, *UpdateBookRequest) (*Book, error) + // Moves a book to another shelf, and returns the new book. The book + // id of the new book may not be the same as the original book. + MoveBook(context.Context, *MoveBookRequest) (*Book, error) +} + +func RegisterLibraryServiceServer(s *grpc.Server, srv LibraryServiceServer) { + s.RegisterService(&_LibraryService_serviceDesc, srv) +} + +func _LibraryService_CreateShelf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateShelfRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).CreateShelf(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/CreateShelf", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).CreateShelf(ctx, req.(*CreateShelfRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_GetShelf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetShelfRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).GetShelf(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/GetShelf", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).GetShelf(ctx, req.(*GetShelfRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_ListShelves_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListShelvesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).ListShelves(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/ListShelves", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).ListShelves(ctx, req.(*ListShelvesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_DeleteShelf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteShelfRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).DeleteShelf(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/DeleteShelf", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).DeleteShelf(ctx, req.(*DeleteShelfRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_MergeShelves_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MergeShelvesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).MergeShelves(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/MergeShelves", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).MergeShelves(ctx, req.(*MergeShelvesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_CreateBook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).CreateBook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/CreateBook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).CreateBook(ctx, req.(*CreateBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_GetBook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).GetBook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/GetBook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).GetBook(ctx, req.(*GetBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_ListBooks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBooksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).ListBooks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/ListBooks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).ListBooks(ctx, req.(*ListBooksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_DeleteBook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).DeleteBook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/DeleteBook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).DeleteBook(ctx, req.(*DeleteBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_UpdateBook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).UpdateBook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/UpdateBook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).UpdateBook(ctx, req.(*UpdateBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LibraryService_MoveBook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveBookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LibraryServiceServer).MoveBook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.example.library.v1.LibraryService/MoveBook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LibraryServiceServer).MoveBook(ctx, req.(*MoveBookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LibraryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.example.library.v1.LibraryService", + HandlerType: (*LibraryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateShelf", + Handler: _LibraryService_CreateShelf_Handler, + }, + { + MethodName: "GetShelf", + Handler: _LibraryService_GetShelf_Handler, + }, + { + MethodName: "ListShelves", + Handler: _LibraryService_ListShelves_Handler, + }, + { + MethodName: "DeleteShelf", + Handler: _LibraryService_DeleteShelf_Handler, + }, + { + MethodName: "MergeShelves", + Handler: _LibraryService_MergeShelves_Handler, + }, + { + MethodName: "CreateBook", + Handler: _LibraryService_CreateBook_Handler, + }, + { + MethodName: "GetBook", + Handler: _LibraryService_GetBook_Handler, + }, + { + MethodName: "ListBooks", + Handler: _LibraryService_ListBooks_Handler, + }, + { + MethodName: "DeleteBook", + Handler: _LibraryService_DeleteBook_Handler, + }, + { + MethodName: "UpdateBook", + Handler: _LibraryService_UpdateBook_Handler, + }, + { + MethodName: "MoveBook", + Handler: _LibraryService_MoveBook_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/example/library/v1/library.proto", +} + +func init() { proto.RegisterFile("google/example/library/v1/library.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 838 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdb, 0x4e, 0xdb, 0x48, + 0x18, 0x96, 0x21, 0x81, 0xf0, 0x87, 0x53, 0x06, 0x84, 0xb2, 0x26, 0x2c, 0xd9, 0x11, 0x87, 0x6c, + 0x96, 0xb5, 0x05, 0x68, 0xf7, 0x22, 0x55, 0xa5, 0x8a, 0xb6, 0xe2, 0x06, 0xda, 0x28, 0x69, 0x6f, + 0x2a, 0xa4, 0xc8, 0x81, 0xc1, 0xb1, 0x48, 0x3c, 0xc6, 0x36, 0x11, 0x07, 0xf5, 0xa2, 0x95, 0x10, + 0x52, 0x6f, 0xfb, 0x0a, 0x7d, 0xa3, 0xbe, 0x42, 0x1f, 0xa4, 0x9a, 0x83, 0xc1, 0x24, 0x66, 0x9c, + 0x5e, 0xf4, 0xce, 0x33, 0xf3, 0xcd, 0xf7, 0x7f, 0xff, 0x61, 0xbe, 0x04, 0x36, 0x6d, 0x4a, 0xed, + 0x2e, 0x31, 0xc9, 0xa5, 0xd5, 0xf3, 0xba, 0xc4, 0xec, 0x3a, 0x6d, 0xdf, 0xf2, 0xaf, 0xcc, 0xfe, + 0x76, 0xf4, 0x69, 0x78, 0x3e, 0x0d, 0x29, 0xfa, 0x43, 0x00, 0x0d, 0x09, 0x34, 0xa2, 0xd3, 0xfe, + 0xb6, 0x5e, 0x92, 0x1c, 0x96, 0xe7, 0x98, 0x96, 0xeb, 0xd2, 0xd0, 0x0a, 0x1d, 0xea, 0x06, 0xe2, + 0xa2, 0xbe, 0x2c, 0x4f, 0xf9, 0xaa, 0x7d, 0x71, 0x6a, 0x92, 0x9e, 0x17, 0x4a, 0x56, 0x7c, 0x04, + 0x99, 0x3d, 0x4a, 0xcf, 0x10, 0x82, 0x8c, 0x6b, 0xf5, 0x48, 0x51, 0x2b, 0x6b, 0x95, 0xa9, 0x06, + 0xff, 0x46, 0x4b, 0x30, 0x61, 0x5d, 0x84, 0x1d, 0xea, 0x17, 0xc7, 0xf8, 0xae, 0x5c, 0xa1, 0x45, + 0xc8, 0x86, 0x4e, 0xd8, 0x25, 0xc5, 0x71, 0xbe, 0x2d, 0x16, 0x8c, 0xc1, 0x27, 0xd6, 0x49, 0x31, + 0x53, 0xd6, 0x2a, 0xb9, 0x06, 0xff, 0xc6, 0xdb, 0x90, 0x6d, 0x76, 0x48, 0xf7, 0x34, 0x91, 0x9e, + 0xd1, 0x74, 0x48, 0x8f, 0x48, 0x76, 0xb1, 0xc0, 0x07, 0x80, 0x5e, 0xfa, 0xc4, 0x0a, 0x09, 0xbf, + 0xd8, 0x20, 0xe7, 0x17, 0x24, 0x08, 0xd1, 0xff, 0x90, 0x0d, 0xd8, 0x9a, 0x13, 0xe4, 0x77, 0xca, + 0xc6, 0x93, 0xc5, 0x30, 0xc4, 0x3d, 0x01, 0xc7, 0xeb, 0x30, 0xb7, 0x4f, 0xc2, 0x47, 0x54, 0x09, + 0x52, 0x70, 0x1d, 0xd0, 0x81, 0x13, 0x70, 0x5c, 0x9f, 0x04, 0x11, 0x72, 0x19, 0xa6, 0x3c, 0xcb, + 0x26, 0xad, 0xc0, 0xb9, 0x16, 0xf0, 0x6c, 0x23, 0xc7, 0x36, 0x9a, 0xce, 0x35, 0x41, 0x2b, 0x00, + 0xfc, 0x30, 0xa4, 0x67, 0xc4, 0x95, 0x29, 0x70, 0xf8, 0x3b, 0xb6, 0x81, 0xaf, 0x60, 0xe1, 0x11, + 0x63, 0xe0, 0x51, 0x37, 0x20, 0xa8, 0x06, 0x93, 0x81, 0xd8, 0x2a, 0x6a, 0xe5, 0xf1, 0x91, 0x32, + 0x89, 0x2e, 0xa0, 0x0d, 0x98, 0x73, 0xc9, 0x65, 0xd8, 0x1a, 0x0a, 0x3b, 0xc3, 0xb6, 0xeb, 0xf7, + 0xa1, 0x2b, 0x80, 0x5e, 0x91, 0x2e, 0x19, 0xa8, 0x60, 0x52, 0xda, 0x4d, 0x58, 0x38, 0x24, 0xbe, + 0x4d, 0x06, 0xf2, 0x4e, 0x6a, 0x56, 0x05, 0xe6, 0x69, 0xd8, 0x21, 0x7e, 0x8b, 0xd7, 0xb5, 0xc5, + 0xcf, 0x45, 0xf4, 0x59, 0xbe, 0xcf, 0x63, 0xbd, 0x61, 0xa4, 0x47, 0x50, 0x10, 0x0d, 0x64, 0x73, + 0xa5, 0xa2, 0xdc, 0x85, 0x4c, 0x9b, 0xd2, 0x33, 0x4e, 0x93, 0xdf, 0x59, 0x55, 0x14, 0x82, 0x33, + 0x71, 0x30, 0x5e, 0x83, 0xd9, 0x7d, 0x12, 0xa6, 0x50, 0xe3, 0x36, 0xcc, 0xb3, 0xea, 0x33, 0x98, + 0x32, 0xab, 0x47, 0x1d, 0x1e, 0x53, 0x76, 0x78, 0x7c, 0xb0, 0xc3, 0x3e, 0x14, 0x62, 0x31, 0x64, + 0x7f, 0xff, 0x83, 0x2c, 0x93, 0x19, 0x75, 0x37, 0x35, 0x29, 0x81, 0x1e, 0xb9, 0xb5, 0x47, 0x50, + 0x78, 0xef, 0x9d, 0xfc, 0xae, 0xda, 0x6e, 0x42, 0x41, 0x0c, 0x4e, 0x5a, 0x79, 0xdf, 0xc2, 0xdc, + 0x21, 0xed, 0xa7, 0x8a, 0x18, 0x79, 0x66, 0x76, 0xbe, 0xe5, 0x61, 0xf6, 0x40, 0x68, 0x6a, 0x12, + 0xbf, 0xef, 0x1c, 0x13, 0x74, 0x0d, 0xf9, 0x98, 0x0f, 0xa0, 0x7f, 0x15, 0x29, 0x0c, 0xfb, 0x85, + 0x9e, 0xfa, 0xac, 0xb0, 0xfe, 0xf9, 0xfb, 0x8f, 0xaf, 0x63, 0x8b, 0x38, 0xcf, 0x9c, 0x56, 0x3e, + 0xb1, 0x9a, 0x70, 0x0d, 0xd4, 0x87, 0x5c, 0xe4, 0x1a, 0xa8, 0xaa, 0x60, 0x1a, 0xb0, 0x96, 0x11, + 0xa2, 0x96, 0x78, 0xd4, 0x25, 0xb4, 0xc8, 0xa2, 0xde, 0xb0, 0x8a, 0x3c, 0x97, 0xb1, 0xcd, 0xea, + 0x47, 0xf4, 0x49, 0x83, 0x7c, 0xcc, 0x35, 0x94, 0x49, 0x0f, 0xfb, 0x95, 0x6e, 0x8c, 0x0a, 0x17, + 0xc3, 0x8a, 0x17, 0xb8, 0x98, 0x19, 0x14, 0x2f, 0x01, 0xf2, 0x21, 0x1f, 0x73, 0x0f, 0xa5, 0x84, + 0x61, 0x97, 0xd1, 0x97, 0x22, 0x78, 0xf4, 0x63, 0x63, 0xbc, 0x66, 0x3f, 0x36, 0x51, 0xde, 0xd5, + 0xe4, 0xbc, 0xef, 0x34, 0x98, 0x8e, 0x1b, 0x11, 0x52, 0x65, 0x92, 0xe0, 0x58, 0x23, 0x14, 0x7e, + 0x9d, 0x0b, 0x58, 0xc5, 0x7a, 0x92, 0x80, 0x5a, 0x8f, 0x71, 0xd6, 0xb4, 0x2a, 0xba, 0xd5, 0x00, + 0x1e, 0xdc, 0x0b, 0x6d, 0xa5, 0x4e, 0x5d, 0xec, 0x0d, 0xe8, 0x69, 0xcf, 0x0c, 0x57, 0xb8, 0x08, + 0x9c, 0x2c, 0xc2, 0xe4, 0x56, 0x50, 0xe3, 0x4f, 0x11, 0xdd, 0xc0, 0xa4, 0xb4, 0x39, 0xf4, 0xb7, + 0x7a, 0x00, 0x7f, 0x49, 0xc0, 0x1a, 0x17, 0xf0, 0x27, 0x2a, 0x25, 0x08, 0x10, 0xf1, 0x59, 0x3b, + 0xbe, 0x68, 0x30, 0x75, 0x6f, 0x6d, 0xe8, 0x9f, 0x94, 0xa9, 0x8a, 0x9b, 0xac, 0xbe, 0x35, 0x1a, + 0x58, 0x0e, 0x20, 0xe6, 0x72, 0x4a, 0x48, 0x51, 0x0f, 0x74, 0x09, 0xf0, 0x60, 0x4a, 0xca, 0x86, + 0x0c, 0x79, 0xd7, 0x93, 0xd3, 0x28, 0xcb, 0x50, 0x55, 0x97, 0xe1, 0x4e, 0x03, 0x78, 0x70, 0x5b, + 0x65, 0xe8, 0x21, 0x53, 0x4e, 0x6f, 0x45, 0x95, 0x6b, 0x58, 0xd3, 0x95, 0x1a, 0xe4, 0x34, 0xdc, + 0x6a, 0x90, 0x8b, 0x0c, 0x57, 0x69, 0x48, 0x03, 0xae, 0x9c, 0xae, 0x62, 0x8b, 0xab, 0xd8, 0xc0, + 0x7f, 0x29, 0x55, 0xf4, 0x68, 0x9f, 0xbd, 0x8e, 0xbd, 0x73, 0x58, 0x39, 0xa6, 0xbd, 0xa7, 0x39, + 0xf7, 0xa6, 0xa5, 0x89, 0xd7, 0x59, 0xbd, 0xeb, 0xda, 0x87, 0x17, 0x12, 0x6a, 0xd3, 0xae, 0xe5, + 0xda, 0x06, 0xf5, 0x6d, 0xd3, 0x26, 0x2e, 0xef, 0x86, 0x29, 0x8e, 0x2c, 0xcf, 0x09, 0x12, 0xfe, + 0xfb, 0x3e, 0x93, 0x9f, 0xed, 0x09, 0x0e, 0xde, 0xfd, 0x19, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x13, + 0xc5, 0x7e, 0x27, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/common.pb.go b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/common.pb.go new file mode 100644 index 000000000..b5b9207ba --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/common.pb.go @@ -0,0 +1,497 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/firestore/v1beta1/common.proto + +/* +Package firestore is a generated protocol buffer package. + +It is generated from these files: + google/firestore/v1beta1/common.proto + google/firestore/v1beta1/document.proto + google/firestore/v1beta1/firestore.proto + google/firestore/v1beta1/query.proto + google/firestore/v1beta1/write.proto + +It has these top-level messages: + DocumentMask + Precondition + TransactionOptions + Document + Value + ArrayValue + MapValue + GetDocumentRequest + ListDocumentsRequest + ListDocumentsResponse + CreateDocumentRequest + UpdateDocumentRequest + DeleteDocumentRequest + BatchGetDocumentsRequest + BatchGetDocumentsResponse + BeginTransactionRequest + BeginTransactionResponse + CommitRequest + CommitResponse + RollbackRequest + RunQueryRequest + RunQueryResponse + WriteRequest + WriteResponse + ListenRequest + ListenResponse + Target + TargetChange + ListCollectionIdsRequest + ListCollectionIdsResponse + StructuredQuery + Cursor + Write + DocumentTransform + WriteResult + DocumentChange + DocumentDelete + DocumentRemove + ExistenceFilter +*/ +package firestore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A set of field paths on a document. +// Used to restrict a get or update operation on a document to a subset of its +// fields. +// This is different from standard field masks, as this is always scoped to a +// [Document][google.firestore.v1beta1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1beta1.Value]. +type DocumentMask struct { + // The list of field paths in the mask. See [Document.fields][google.firestore.v1beta1.Document.fields] for a field + // path syntax reference. + FieldPaths []string `protobuf:"bytes,1,rep,name=field_paths,json=fieldPaths" json:"field_paths,omitempty"` +} + +func (m *DocumentMask) Reset() { *m = DocumentMask{} } +func (m *DocumentMask) String() string { return proto.CompactTextString(m) } +func (*DocumentMask) ProtoMessage() {} +func (*DocumentMask) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *DocumentMask) GetFieldPaths() []string { + if m != nil { + return m.FieldPaths + } + return nil +} + +// A precondition on a document, used for conditional operations. +type Precondition struct { + // The type of precondition. + // + // Types that are valid to be assigned to ConditionType: + // *Precondition_Exists + // *Precondition_UpdateTime + ConditionType isPrecondition_ConditionType `protobuf_oneof:"condition_type"` +} + +func (m *Precondition) Reset() { *m = Precondition{} } +func (m *Precondition) String() string { return proto.CompactTextString(m) } +func (*Precondition) ProtoMessage() {} +func (*Precondition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isPrecondition_ConditionType interface { + isPrecondition_ConditionType() +} + +type Precondition_Exists struct { + Exists bool `protobuf:"varint,1,opt,name=exists,oneof"` +} +type Precondition_UpdateTime struct { + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=update_time,json=updateTime,oneof"` +} + +func (*Precondition_Exists) isPrecondition_ConditionType() {} +func (*Precondition_UpdateTime) isPrecondition_ConditionType() {} + +func (m *Precondition) GetConditionType() isPrecondition_ConditionType { + if m != nil { + return m.ConditionType + } + return nil +} + +func (m *Precondition) GetExists() bool { + if x, ok := m.GetConditionType().(*Precondition_Exists); ok { + return x.Exists + } + return false +} + +func (m *Precondition) GetUpdateTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConditionType().(*Precondition_UpdateTime); ok { + return x.UpdateTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Precondition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Precondition_OneofMarshaler, _Precondition_OneofUnmarshaler, _Precondition_OneofSizer, []interface{}{ + (*Precondition_Exists)(nil), + (*Precondition_UpdateTime)(nil), + } +} + +func _Precondition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Precondition) + // condition_type + switch x := m.ConditionType.(type) { + case *Precondition_Exists: + t := uint64(0) + if x.Exists { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Precondition_UpdateTime: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UpdateTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Precondition.ConditionType has unexpected type %T", x) + } + return nil +} + +func _Precondition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Precondition) + switch tag { + case 1: // condition_type.exists + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ConditionType = &Precondition_Exists{x != 0} + return true, err + case 2: // condition_type.update_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConditionType = &Precondition_UpdateTime{msg} + return true, err + default: + return false, nil + } +} + +func _Precondition_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Precondition) + // condition_type + switch x := m.ConditionType.(type) { + case *Precondition_Exists: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Precondition_UpdateTime: + s := proto.Size(x.UpdateTime) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for creating a new transaction. +type TransactionOptions struct { + // The mode of the transaction. + // + // Types that are valid to be assigned to Mode: + // *TransactionOptions_ReadOnly_ + // *TransactionOptions_ReadWrite_ + Mode isTransactionOptions_Mode `protobuf_oneof:"mode"` +} + +func (m *TransactionOptions) Reset() { *m = TransactionOptions{} } +func (m *TransactionOptions) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions) ProtoMessage() {} +func (*TransactionOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isTransactionOptions_Mode interface { + isTransactionOptions_Mode() +} + +type TransactionOptions_ReadOnly_ struct { + ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,oneof"` +} +type TransactionOptions_ReadWrite_ struct { + ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,3,opt,name=read_write,json=readWrite,oneof"` +} + +func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {} +func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {} + +func (m *TransactionOptions) GetMode() isTransactionOptions_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly { + if x, ok := m.GetMode().(*TransactionOptions_ReadOnly_); ok { + return x.ReadOnly + } + return nil +} + +func (m *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite { + if x, ok := m.GetMode().(*TransactionOptions_ReadWrite_); ok { + return x.ReadWrite + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_OneofMarshaler, _TransactionOptions_OneofUnmarshaler, _TransactionOptions_OneofSizer, []interface{}{ + (*TransactionOptions_ReadOnly_)(nil), + (*TransactionOptions_ReadWrite_)(nil), + } +} + +func _TransactionOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadOnly_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadOnly); err != nil { + return err + } + case *TransactionOptions_ReadWrite_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadWrite); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions.Mode has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions) + switch tag { + case 2: // mode.read_only + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadOnly) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadOnly_{msg} + return true, err + case 3: // mode.read_write + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadWrite) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadWrite_{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadOnly_: + s := proto.Size(x.ReadOnly) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadWrite_: + s := proto.Size(x.ReadWrite) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for a transaction that can be used to read and write documents. +type TransactionOptions_ReadWrite struct { + // An optional transaction to retry. + RetryTransaction []byte `protobuf:"bytes,1,opt,name=retry_transaction,json=retryTransaction,proto3" json:"retry_transaction,omitempty"` +} + +func (m *TransactionOptions_ReadWrite) Reset() { *m = TransactionOptions_ReadWrite{} } +func (m *TransactionOptions_ReadWrite) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadWrite) ProtoMessage() {} +func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *TransactionOptions_ReadWrite) GetRetryTransaction() []byte { + if m != nil { + return m.RetryTransaction + } + return nil +} + +// Options for a transaction that can only be used to read documents. +type TransactionOptions_ReadOnly struct { + // The consistency mode for this transaction. If not set, defaults to strong + // consistency. + // + // Types that are valid to be assigned to ConsistencySelector: + // *TransactionOptions_ReadOnly_ReadTime + ConsistencySelector isTransactionOptions_ReadOnly_ConsistencySelector `protobuf_oneof:"consistency_selector"` +} + +func (m *TransactionOptions_ReadOnly) Reset() { *m = TransactionOptions_ReadOnly{} } +func (m *TransactionOptions_ReadOnly) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadOnly) ProtoMessage() {} +func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +type isTransactionOptions_ReadOnly_ConsistencySelector interface { + isTransactionOptions_ReadOnly_ConsistencySelector() +} + +type TransactionOptions_ReadOnly_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=read_time,json=readTime,oneof"` +} + +func (*TransactionOptions_ReadOnly_ReadTime) isTransactionOptions_ReadOnly_ConsistencySelector() {} + +func (m *TransactionOptions_ReadOnly) GetConsistencySelector() isTransactionOptions_ReadOnly_ConsistencySelector { + if m != nil { + return m.ConsistencySelector + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConsistencySelector().(*TransactionOptions_ReadOnly_ReadTime); ok { + return x.ReadTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions_ReadOnly) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_ReadOnly_OneofMarshaler, _TransactionOptions_ReadOnly_OneofUnmarshaler, _TransactionOptions_ReadOnly_OneofSizer, []interface{}{ + (*TransactionOptions_ReadOnly_ReadTime)(nil), + } +} + +func _TransactionOptions_ReadOnly_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions_ReadOnly) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *TransactionOptions_ReadOnly_ReadTime: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions_ReadOnly.ConsistencySelector has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_ReadOnly_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions_ReadOnly) + switch tag { + case 2: // consistency_selector.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &TransactionOptions_ReadOnly_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_ReadOnly_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions_ReadOnly) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *TransactionOptions_ReadOnly_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*DocumentMask)(nil), "google.firestore.v1beta1.DocumentMask") + proto.RegisterType((*Precondition)(nil), "google.firestore.v1beta1.Precondition") + proto.RegisterType((*TransactionOptions)(nil), "google.firestore.v1beta1.TransactionOptions") + proto.RegisterType((*TransactionOptions_ReadWrite)(nil), "google.firestore.v1beta1.TransactionOptions.ReadWrite") + proto.RegisterType((*TransactionOptions_ReadOnly)(nil), "google.firestore.v1beta1.TransactionOptions.ReadOnly") +} + +func init() { proto.RegisterFile("google/firestore/v1beta1/common.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x5f, 0x8b, 0xd3, 0x40, + 0x10, 0x6f, 0x7b, 0x47, 0x69, 0xa7, 0x45, 0xce, 0x20, 0x12, 0xc2, 0xe1, 0x1d, 0x05, 0xa1, 0x20, + 0x6c, 0xa8, 0xa2, 0x28, 0xe2, 0x83, 0xa9, 0xdc, 0xf5, 0x45, 0xae, 0xc4, 0xe2, 0x81, 0x20, 0x61, + 0x9b, 0x4c, 0xe3, 0x62, 0xb2, 0x13, 0x76, 0xb7, 0x6a, 0xbe, 0x89, 0xcf, 0x3e, 0xfa, 0xb9, 0xfc, + 0x20, 0x92, 0xcd, 0x36, 0x0a, 0xc7, 0x81, 0xbe, 0xed, 0xcc, 0xfc, 0xe6, 0xf7, 0x67, 0x58, 0x78, + 0x98, 0x13, 0xe5, 0x05, 0x86, 0x3b, 0xa1, 0x50, 0x1b, 0x52, 0x18, 0x7e, 0x59, 0x6c, 0xd1, 0xf0, + 0x45, 0x98, 0x52, 0x59, 0x92, 0x64, 0x95, 0x22, 0x43, 0x9e, 0xdf, 0xc2, 0x58, 0x07, 0x63, 0x0e, + 0x16, 0x9c, 0x3a, 0x02, 0x5e, 0x89, 0x90, 0x4b, 0x49, 0x86, 0x1b, 0x41, 0x52, 0xb7, 0x7b, 0xc1, + 0x99, 0x9b, 0xda, 0x6a, 0xbb, 0xdf, 0x85, 0x46, 0x94, 0xa8, 0x0d, 0x2f, 0xab, 0x16, 0x30, 0x0b, + 0x61, 0xfa, 0x86, 0xd2, 0x7d, 0x89, 0xd2, 0xbc, 0xe5, 0xfa, 0xb3, 0x77, 0x06, 0x93, 0x9d, 0xc0, + 0x22, 0x4b, 0x2a, 0x6e, 0x3e, 0x69, 0xbf, 0x7f, 0x7e, 0x34, 0x1f, 0xc7, 0x60, 0x5b, 0xeb, 0xa6, + 0x33, 0xab, 0x61, 0xba, 0x56, 0x98, 0x92, 0xcc, 0x44, 0x23, 0xe4, 0xf9, 0x30, 0xc4, 0x6f, 0x42, + 0x9b, 0x06, 0xdb, 0x9f, 0x8f, 0x56, 0xbd, 0xd8, 0xd5, 0xde, 0x2b, 0x98, 0xec, 0xab, 0x8c, 0x1b, + 0x4c, 0x1a, 0x51, 0x7f, 0x70, 0xde, 0x9f, 0x4f, 0x1e, 0x07, 0xcc, 0x25, 0x39, 0x38, 0x62, 0x9b, + 0x83, 0xa3, 0x55, 0x2f, 0x86, 0x76, 0xa1, 0x69, 0x45, 0x27, 0x70, 0xa7, 0x53, 0x49, 0x4c, 0x5d, + 0xe1, 0xec, 0xd7, 0x00, 0xbc, 0x8d, 0xe2, 0x52, 0xf3, 0xb4, 0x69, 0x5e, 0x55, 0x36, 0xa9, 0xb7, + 0x81, 0xb1, 0x42, 0x9e, 0x25, 0x24, 0x8b, 0xda, 0xa9, 0x3c, 0x65, 0xb7, 0xdd, 0x8b, 0xdd, 0x24, + 0x60, 0x31, 0xf2, 0xec, 0x4a, 0x16, 0xf5, 0xaa, 0x17, 0x8f, 0x94, 0x7b, 0x7b, 0xd7, 0x00, 0x96, + 0xf5, 0xab, 0x12, 0x06, 0xfd, 0x23, 0x4b, 0xfb, 0xec, 0xbf, 0x69, 0xaf, 0x9b, 0xed, 0x55, 0x2f, + 0xb6, 0x0e, 0x6d, 0x11, 0x3c, 0x87, 0x71, 0x37, 0xf1, 0x1e, 0xc1, 0x5d, 0x85, 0x46, 0xd5, 0x89, + 0xf9, 0xb3, 0x6f, 0x0f, 0x39, 0x8d, 0x4f, 0xec, 0xe0, 0x2f, 0xde, 0xe0, 0x23, 0x8c, 0x0e, 0x56, + 0xbd, 0x17, 0x2e, 0xf4, 0x3f, 0x9f, 0xd6, 0x26, 0xb3, 0x87, 0xbd, 0x0f, 0xf7, 0x52, 0x92, 0x5a, + 0x68, 0x83, 0x32, 0xad, 0x13, 0x8d, 0x05, 0xa6, 0x86, 0x54, 0x34, 0x84, 0xe3, 0x92, 0x32, 0x8c, + 0xbe, 0xf7, 0xe1, 0x34, 0xa5, 0xf2, 0xd6, 0xac, 0xd1, 0x64, 0x69, 0xbf, 0xe6, 0xba, 0x91, 0x59, + 0xf7, 0x3f, 0xbc, 0x76, 0xc0, 0x9c, 0x0a, 0x2e, 0x73, 0x46, 0x2a, 0x0f, 0x73, 0x94, 0xd6, 0x44, + 0xd8, 0x8e, 0x78, 0x25, 0xf4, 0xcd, 0x1f, 0xfe, 0xb2, 0xeb, 0xfc, 0x18, 0x1c, 0x5f, 0x2e, 0x2f, + 0xde, 0xfd, 0x1c, 0x3c, 0xb8, 0x6c, 0xa9, 0x96, 0x05, 0xed, 0x33, 0x76, 0xd1, 0x29, 0xbf, 0x5f, + 0x44, 0xcd, 0xc6, 0x76, 0x68, 0x59, 0x9f, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x27, 0x10, 0x0b, + 0x70, 0x36, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/document.pb.go b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/document.pb.go new file mode 100644 index 000000000..069fd6c0b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/document.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/firestore/v1beta1/document.proto + +package firestore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_type "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Firestore document. +// +// Must not exceed 1 MiB - 4 bytes. +type Document struct { + // The resource name of the document, for example + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The document's fields. + // + // The map keys represent field names. + // + // A simple field name contains only characters `a` to `z`, `A` to `Z`, + // `0` to `9`, or `_`, and must not start with `0` to `9` or `_`. For example, + // `foo_bar_17`. + // + // Field names matching the regular expression `__.*__` are reserved. Reserved + // field names are forbidden except in certain documented contexts. The map + // keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be + // empty. + // + // Field paths may be used in other contexts to refer to structured fields + // defined here. For `map_value`, the field path is represented by the simple + // or quoted field names of the containing fields, delimited by `.`. For + // example, the structured field + // `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be + // represented by the field path `foo.x&y`. + // + // Within a field path, a quoted field name starts and ends with `` ` `` and + // may contain any character. Some characters, including `` ` ``, must be + // escaped using a `\`. For example, `` `x&y` `` represents `x&y` and + // `` `bak\`tik` `` represents `` bak`tik ``. + Fields map[string]*Value `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Output only. The time at which the document was created. + // + // This value increases monotonically when a document is deleted then + // recreated. It can also be compared to values from other documents and + // the `read_time` of a query. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. The time at which the document was last changed. + // + // This value is initally set to the `create_time` then increases + // monotonically with each change to the document. It can also be + // compared to values from other documents and the `read_time` of a query. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Document) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Document) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Document) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Document) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// A message that can hold any of the supported value types. +type Value struct { + // Must have a value set. + // + // Types that are valid to be assigned to ValueType: + // *Value_NullValue + // *Value_BooleanValue + // *Value_IntegerValue + // *Value_DoubleValue + // *Value_TimestampValue + // *Value_StringValue + // *Value_BytesValue + // *Value_ReferenceValue + // *Value_GeoPointValue + // *Value_ArrayValue + // *Value_MapValue + ValueType isValue_ValueType `protobuf_oneof:"value_type"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isValue_ValueType interface { + isValue_ValueType() +} + +type Value_NullValue struct { + NullValue google_protobuf2.NullValue `protobuf:"varint,11,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_BooleanValue struct { + BooleanValue bool `protobuf:"varint,1,opt,name=boolean_value,json=booleanValue,oneof"` +} +type Value_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,2,opt,name=integer_value,json=integerValue,oneof"` +} +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type Value_TimestampValue struct { + TimestampValue *google_protobuf1.Timestamp `protobuf:"bytes,10,opt,name=timestamp_value,json=timestampValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,17,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,18,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} +type Value_ReferenceValue struct { + ReferenceValue string `protobuf:"bytes,5,opt,name=reference_value,json=referenceValue,oneof"` +} +type Value_GeoPointValue struct { + GeoPointValue *google_type.LatLng `protobuf:"bytes,8,opt,name=geo_point_value,json=geoPointValue,oneof"` +} +type Value_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,9,opt,name=array_value,json=arrayValue,oneof"` +} +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,6,opt,name=map_value,json=mapValue,oneof"` +} + +func (*Value_NullValue) isValue_ValueType() {} +func (*Value_BooleanValue) isValue_ValueType() {} +func (*Value_IntegerValue) isValue_ValueType() {} +func (*Value_DoubleValue) isValue_ValueType() {} +func (*Value_TimestampValue) isValue_ValueType() {} +func (*Value_StringValue) isValue_ValueType() {} +func (*Value_BytesValue) isValue_ValueType() {} +func (*Value_ReferenceValue) isValue_ValueType() {} +func (*Value_GeoPointValue) isValue_ValueType() {} +func (*Value_ArrayValue) isValue_ValueType() {} +func (*Value_MapValue) isValue_ValueType() {} + +func (m *Value) GetValueType() isValue_ValueType { + if m != nil { + return m.ValueType + } + return nil +} + +func (m *Value) GetNullValue() google_protobuf2.NullValue { + if x, ok := m.GetValueType().(*Value_NullValue); ok { + return x.NullValue + } + return google_protobuf2.NullValue_NULL_VALUE +} + +func (m *Value) GetBooleanValue() bool { + if x, ok := m.GetValueType().(*Value_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Value) GetIntegerValue() int64 { + if x, ok := m.GetValueType().(*Value_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Value) GetDoubleValue() float64 { + if x, ok := m.GetValueType().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Value) GetTimestampValue() *google_protobuf1.Timestamp { + if x, ok := m.GetValueType().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetValueType().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBytesValue() []byte { + if x, ok := m.GetValueType().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (m *Value) GetReferenceValue() string { + if x, ok := m.GetValueType().(*Value_ReferenceValue); ok { + return x.ReferenceValue + } + return "" +} + +func (m *Value) GetGeoPointValue() *google_type.LatLng { + if x, ok := m.GetValueType().(*Value_GeoPointValue); ok { + return x.GeoPointValue + } + return nil +} + +func (m *Value) GetArrayValue() *ArrayValue { + if x, ok := m.GetValueType().(*Value_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (m *Value) GetMapValue() *MapValue { + if x, ok := m.GetValueType().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_BooleanValue)(nil), + (*Value_IntegerValue)(nil), + (*Value_DoubleValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_ReferenceValue)(nil), + (*Value_GeoPointValue)(nil), + (*Value_ArrayValue)(nil), + (*Value_MapValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_IntegerValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *Value_TimestampValue: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampValue); err != nil { + return err + } + case *Value_StringValue: + b.EncodeVarint(17<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BytesValue: + b.EncodeVarint(18<<3 | proto.WireBytes) + b.EncodeRawBytes(x.BytesValue) + case *Value_ReferenceValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ReferenceValue) + case *Value_GeoPointValue: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GeoPointValue); err != nil { + return err + } + case *Value_ArrayValue: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ArrayValue); err != nil { + return err + } + case *Value_MapValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MapValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.ValueType has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 11: // value_type.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_NullValue{google_protobuf2.NullValue(x)} + return true, err + case 1: // value_type.boolean_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_BooleanValue{x != 0} + return true, err + case 2: // value_type.integer_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.ValueType = &Value_IntegerValue{int64(x)} + return true, err + case 3: // value_type.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.ValueType = &Value_DoubleValue{math.Float64frombits(x)} + return true, err + case 10: // value_type.timestamp_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ValueType = &Value_TimestampValue{msg} + return true, err + case 17: // value_type.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ValueType = &Value_StringValue{x} + return true, err + case 18: // value_type.bytes_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ValueType = &Value_BytesValue{x} + return true, err + case 5: // value_type.reference_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ValueType = &Value_ReferenceValue{x} + return true, err + case 8: // value_type.geo_point_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.LatLng) + err := b.DecodeMessage(msg) + m.ValueType = &Value_GeoPointValue{msg} + return true, err + case 9: // value_type.array_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ArrayValue) + err := b.DecodeMessage(msg) + m.ValueType = &Value_ArrayValue{msg} + return true, err + case 6: // value_type.map_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MapValue) + err := b.DecodeMessage(msg) + m.ValueType = &Value_MapValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // value_type + switch x := m.ValueType.(type) { + case *Value_NullValue: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_BooleanValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Value_IntegerValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Value_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *Value_TimestampValue: + s := proto.Size(x.TimestampValue) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_StringValue: + n += proto.SizeVarint(17<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BytesValue: + n += proto.SizeVarint(18<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.BytesValue))) + n += len(x.BytesValue) + case *Value_ReferenceValue: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferenceValue))) + n += len(x.ReferenceValue) + case *Value_GeoPointValue: + s := proto.Size(x.GeoPointValue) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ArrayValue: + s := proto.Size(x.ArrayValue) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_MapValue: + s := proto.Size(x.MapValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An array value. +type ArrayValue struct { + // Values in the array. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ArrayValue) Reset() { *m = ArrayValue{} } +func (m *ArrayValue) String() string { return proto.CompactTextString(m) } +func (*ArrayValue) ProtoMessage() {} +func (*ArrayValue) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ArrayValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// A map value. +type MapValue struct { + // The map's fields. + // + // The map keys represent field names. Field names matching the regular + // expression `__.*__` are reserved. Reserved field names are forbidden except + // in certain documented contexts. The map keys, represented as UTF-8, must + // not exceed 1,500 bytes and cannot be empty. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *MapValue) Reset() { *m = MapValue{} } +func (m *MapValue) String() string { return proto.CompactTextString(m) } +func (*MapValue) ProtoMessage() {} +func (*MapValue) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *MapValue) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func init() { + proto.RegisterType((*Document)(nil), "google.firestore.v1beta1.Document") + proto.RegisterType((*Value)(nil), "google.firestore.v1beta1.Value") + proto.RegisterType((*ArrayValue)(nil), "google.firestore.v1beta1.ArrayValue") + proto.RegisterType((*MapValue)(nil), "google.firestore.v1beta1.MapValue") +} + +func init() { proto.RegisterFile("google/firestore/v1beta1/document.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 643 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x9b, 0xb6, 0xab, 0xda, 0x37, 0xdd, 0x06, 0xe1, 0x52, 0x55, 0x13, 0x2b, 0x05, 0x44, + 0xb9, 0x24, 0xda, 0x10, 0x02, 0x31, 0x71, 0x58, 0xc7, 0xba, 0x1e, 0x06, 0xaa, 0x02, 0xda, 0x61, + 0x97, 0xca, 0x69, 0x5d, 0x2b, 0xc2, 0xb1, 0x23, 0xc7, 0x99, 0xd4, 0xaf, 0xc2, 0x91, 0x23, 0x07, + 0x3e, 0x01, 0x9f, 0x80, 0x4f, 0x85, 0xfc, 0x2f, 0x9b, 0x60, 0x55, 0x4f, 0xdc, 0x92, 0xf7, 0xfd, + 0x3d, 0xcf, 0xfb, 0xd8, 0xb1, 0x03, 0x2f, 0x08, 0xe7, 0x84, 0xe2, 0x68, 0x95, 0x0a, 0x5c, 0x48, + 0x2e, 0x70, 0x74, 0x73, 0x94, 0x60, 0x89, 0x8e, 0xa2, 0x25, 0x5f, 0x94, 0x19, 0x66, 0x32, 0xcc, + 0x05, 0x97, 0x3c, 0xe8, 0x19, 0x30, 0xac, 0xc0, 0xd0, 0x82, 0xfd, 0x03, 0x6b, 0x81, 0xf2, 0x34, + 0x42, 0x8c, 0x71, 0x89, 0x64, 0xca, 0x59, 0x61, 0x74, 0x55, 0x57, 0xbf, 0x25, 0xe5, 0x2a, 0x2a, + 0xa4, 0x28, 0x17, 0xd6, 0xb5, 0x7f, 0xf8, 0x77, 0x57, 0xa6, 0x19, 0x2e, 0x24, 0xca, 0x72, 0x0b, + 0xd8, 0xb1, 0x91, 0x5c, 0xe7, 0x38, 0xa2, 0x48, 0x52, 0x46, 0x4c, 0x67, 0xf8, 0xab, 0x0e, 0xed, + 0x0f, 0x36, 0x63, 0x10, 0x40, 0x93, 0xa1, 0x0c, 0xf7, 0xbc, 0x81, 0x37, 0xea, 0xc4, 0xfa, 0x39, + 0x98, 0x40, 0x6b, 0x95, 0x62, 0xba, 0x2c, 0x7a, 0xf5, 0x41, 0x63, 0xe4, 0x1f, 0x87, 0xe1, 0xa6, + 0x25, 0x84, 0xce, 0x27, 0x9c, 0x68, 0xc1, 0x39, 0x93, 0x62, 0x1d, 0x5b, 0x75, 0x70, 0x02, 0xfe, + 0x42, 0x60, 0x24, 0xf1, 0x5c, 0x85, 0xeb, 0x35, 0x06, 0xde, 0xc8, 0x3f, 0xee, 0x3b, 0x33, 0x97, + 0x3c, 0xfc, 0xe2, 0x92, 0xc7, 0x60, 0x70, 0x55, 0x50, 0xe2, 0x32, 0x5f, 0x56, 0xe2, 0xe6, 0x76, + 0xb1, 0xc1, 0x55, 0xa1, 0x7f, 0x0d, 0xfe, 0x9d, 0x40, 0xc1, 0x03, 0x68, 0x7c, 0xc5, 0x6b, 0xbb, + 0x46, 0xf5, 0x18, 0xbc, 0x86, 0x9d, 0x1b, 0x44, 0x4b, 0xdc, 0xab, 0x6b, 0xdf, 0xc3, 0xcd, 0x2b, + 0xbc, 0x52, 0x58, 0x6c, 0xe8, 0x77, 0xf5, 0xb7, 0xde, 0xf0, 0x77, 0x13, 0x76, 0x74, 0x31, 0x38, + 0x01, 0x60, 0x25, 0xa5, 0x73, 0xe3, 0xe4, 0x0f, 0xbc, 0xd1, 0xde, 0x3d, 0x09, 0x3f, 0x95, 0x94, + 0x6a, 0x7e, 0x5a, 0x8b, 0x3b, 0xcc, 0xbd, 0x04, 0xcf, 0x61, 0x37, 0xe1, 0x9c, 0x62, 0xc4, 0xac, + 0x5e, 0xa5, 0x6b, 0x4f, 0x6b, 0x71, 0xd7, 0x96, 0x2b, 0x2c, 0x65, 0x12, 0x13, 0x2c, 0xe6, 0xb7, + 0x81, 0x1b, 0x0a, 0xb3, 0x65, 0x83, 0x3d, 0x85, 0xee, 0x92, 0x97, 0x09, 0xc5, 0x96, 0x52, 0x7b, + 0xed, 0x4d, 0x6b, 0xb1, 0x6f, 0xaa, 0x06, 0x3a, 0x87, 0xfd, 0xea, 0x94, 0x58, 0x0e, 0xb6, 0x6d, + 0xeb, 0xb4, 0x16, 0xef, 0x55, 0xa2, 0x6a, 0x56, 0x21, 0x45, 0xca, 0x88, 0xf5, 0x78, 0xa8, 0xb6, + 0x55, 0xcd, 0x32, 0x55, 0x03, 0x3d, 0x01, 0x3f, 0x59, 0x4b, 0x5c, 0x58, 0x26, 0x18, 0x78, 0xa3, + 0xee, 0xb4, 0x16, 0x83, 0x2e, 0x1a, 0xe4, 0x25, 0xec, 0x0b, 0xbc, 0xc2, 0x02, 0xb3, 0x85, 0x8b, + 0xbd, 0x63, 0xad, 0xf6, 0xaa, 0x86, 0x41, 0xdf, 0xc3, 0x3e, 0xc1, 0x7c, 0x9e, 0xf3, 0x94, 0x49, + 0x8b, 0xb6, 0x75, 0xf2, 0x47, 0x2e, 0xb9, 0x3a, 0xe6, 0xe1, 0x25, 0x92, 0x97, 0x8c, 0x4c, 0x6b, + 0xf1, 0x2e, 0xc1, 0x7c, 0xa6, 0x60, 0x23, 0xbf, 0x00, 0x1f, 0x09, 0x81, 0xd6, 0x56, 0xda, 0xd1, + 0xd2, 0x67, 0x9b, 0xbf, 0xf9, 0xa9, 0x82, 0xdd, 0x37, 0x03, 0x54, 0xbd, 0x05, 0xa7, 0xd0, 0xc9, + 0x90, 0xdb, 0xbb, 0x96, 0xb6, 0x19, 0x6e, 0xb6, 0xf9, 0x88, 0x72, 0x67, 0xd2, 0xce, 0xec, 0xf3, + 0xb8, 0x0b, 0xa0, 0xe5, 0x73, 0x95, 0x78, 0x78, 0x0e, 0x70, 0x3b, 0x2c, 0x78, 0x03, 0x2d, 0xdd, + 0x2b, 0x7a, 0x9e, 0xbe, 0x78, 0x5b, 0x8f, 0xa5, 0xc5, 0x87, 0x3f, 0x3d, 0x68, 0xbb, 0x69, 0x77, + 0xae, 0xaf, 0xb7, 0xed, 0xfa, 0x3a, 0xcd, 0x7d, 0xd7, 0xf7, 0x7f, 0x5e, 0xa2, 0xf1, 0x37, 0x0f, + 0x0e, 0x16, 0x3c, 0xdb, 0xa8, 0x18, 0xef, 0xba, 0x3f, 0xcb, 0x4c, 0x1d, 0xc9, 0x99, 0x77, 0x7d, + 0x6a, 0x51, 0xc2, 0x29, 0x62, 0x24, 0xe4, 0x82, 0x44, 0x04, 0x33, 0x7d, 0x60, 0x23, 0xd3, 0x42, + 0x79, 0x5a, 0xfc, 0xfb, 0x3b, 0x3e, 0xa9, 0x2a, 0xdf, 0xeb, 0xcd, 0x8b, 0xb3, 0xc9, 0xe7, 0x1f, + 0xf5, 0xc7, 0x17, 0xc6, 0xea, 0x8c, 0xf2, 0x72, 0x19, 0x4e, 0xaa, 0xd9, 0x57, 0x47, 0x63, 0xa5, + 0x48, 0x5a, 0xda, 0xf5, 0xd5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x79, 0x7d, 0xad, 0xe3, + 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/firestore.pb.go b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/firestore.pb.go new file mode 100644 index 000000000..c7ada4fab --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/firestore.pb.go @@ -0,0 +1,3124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/firestore/v1beta1/firestore.proto + +package firestore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The type of change. +type TargetChange_TargetChangeType int32 + +const ( + // No change has occurred. Used only to send an updated `resume_token`. + TargetChange_NO_CHANGE TargetChange_TargetChangeType = 0 + // The targets have been added. + TargetChange_ADD TargetChange_TargetChangeType = 1 + // The targets have been removed. + TargetChange_REMOVE TargetChange_TargetChangeType = 2 + // The targets reflect all changes committed before the targets were added + // to the stream. + // + // This will be sent after or with a `read_time` that is greater than or + // equal to the time at which the targets were added. + // + // Listeners can wait for this change if read-after-write semantics + // are desired. + TargetChange_CURRENT TargetChange_TargetChangeType = 3 + // The targets have been reset, and a new initial state for the targets + // will be returned in subsequent changes. + // + // After the initial state is complete, `CURRENT` will be returned even + // if the target was previously indicated to be `CURRENT`. + TargetChange_RESET TargetChange_TargetChangeType = 4 +) + +var TargetChange_TargetChangeType_name = map[int32]string{ + 0: "NO_CHANGE", + 1: "ADD", + 2: "REMOVE", + 3: "CURRENT", + 4: "RESET", +} +var TargetChange_TargetChangeType_value = map[string]int32{ + "NO_CHANGE": 0, + "ADD": 1, + "REMOVE": 2, + "CURRENT": 3, + "RESET": 4, +} + +func (x TargetChange_TargetChangeType) String() string { + return proto.EnumName(TargetChange_TargetChangeType_name, int32(x)) +} +func (TargetChange_TargetChangeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{20, 0} +} + +// The request for [Firestore.GetDocument][google.firestore.v1beta1.Firestore.GetDocument]. +type GetDocumentRequest struct { + // The resource name of the Document to get. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + Mask *DocumentMask `protobuf:"bytes,2,opt,name=mask" json:"mask,omitempty"` + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + // + // Types that are valid to be assigned to ConsistencySelector: + // *GetDocumentRequest_Transaction + // *GetDocumentRequest_ReadTime + ConsistencySelector isGetDocumentRequest_ConsistencySelector `protobuf_oneof:"consistency_selector"` +} + +func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } +func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*GetDocumentRequest) ProtoMessage() {} +func (*GetDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type isGetDocumentRequest_ConsistencySelector interface { + isGetDocumentRequest_ConsistencySelector() +} + +type GetDocumentRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,3,opt,name=transaction,proto3,oneof"` +} +type GetDocumentRequest_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=read_time,json=readTime,oneof"` +} + +func (*GetDocumentRequest_Transaction) isGetDocumentRequest_ConsistencySelector() {} +func (*GetDocumentRequest_ReadTime) isGetDocumentRequest_ConsistencySelector() {} + +func (m *GetDocumentRequest) GetConsistencySelector() isGetDocumentRequest_ConsistencySelector { + if m != nil { + return m.ConsistencySelector + } + return nil +} + +func (m *GetDocumentRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetDocumentRequest) GetMask() *DocumentMask { + if m != nil { + return m.Mask + } + return nil +} + +func (m *GetDocumentRequest) GetTransaction() []byte { + if x, ok := m.GetConsistencySelector().(*GetDocumentRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *GetDocumentRequest) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConsistencySelector().(*GetDocumentRequest_ReadTime); ok { + return x.ReadTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GetDocumentRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GetDocumentRequest_OneofMarshaler, _GetDocumentRequest_OneofUnmarshaler, _GetDocumentRequest_OneofSizer, []interface{}{ + (*GetDocumentRequest_Transaction)(nil), + (*GetDocumentRequest_ReadTime)(nil), + } +} + +func _GetDocumentRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GetDocumentRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *GetDocumentRequest_Transaction: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case *GetDocumentRequest_ReadTime: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GetDocumentRequest.ConsistencySelector has unexpected type %T", x) + } + return nil +} + +func _GetDocumentRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GetDocumentRequest) + switch tag { + case 3: // consistency_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencySelector = &GetDocumentRequest_Transaction{x} + return true, err + case 5: // consistency_selector.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &GetDocumentRequest_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _GetDocumentRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GetDocumentRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *GetDocumentRequest_Transaction: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case *GetDocumentRequest_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The request for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. +type ListDocumentsRequest struct { + // The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The collection ID, relative to `parent`, to list. For example: `chatrooms` + // or `messages`. + CollectionId string `protobuf:"bytes,2,opt,name=collection_id,json=collectionId" json:"collection_id,omitempty"` + // The maximum number of documents to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The `next_page_token` value returned from a previous List request, if any. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The order to sort results by. For example: `priority desc, name`. + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` + // The fields to return. If not set, returns all fields. + // + // If a document has a field that is not present in this mask, that field + // will not be returned in the response. + Mask *DocumentMask `protobuf:"bytes,7,opt,name=mask" json:"mask,omitempty"` + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + // + // Types that are valid to be assigned to ConsistencySelector: + // *ListDocumentsRequest_Transaction + // *ListDocumentsRequest_ReadTime + ConsistencySelector isListDocumentsRequest_ConsistencySelector `protobuf_oneof:"consistency_selector"` + // If the list should show missing documents. A missing document is a + // document that does not exist but has sub-documents. These documents will + // be returned with a key but will not have fields, [Document.create_time][google.firestore.v1beta1.Document.create_time], + // or [Document.update_time][google.firestore.v1beta1.Document.update_time] set. + // + // Requests with `show_missing` may not specify `where` or + // `order_by`. + ShowMissing bool `protobuf:"varint,12,opt,name=show_missing,json=showMissing" json:"show_missing,omitempty"` +} + +func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} } +func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsRequest) ProtoMessage() {} +func (*ListDocumentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +type isListDocumentsRequest_ConsistencySelector interface { + isListDocumentsRequest_ConsistencySelector() +} + +type ListDocumentsRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,8,opt,name=transaction,proto3,oneof"` +} +type ListDocumentsRequest_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,10,opt,name=read_time,json=readTime,oneof"` +} + +func (*ListDocumentsRequest_Transaction) isListDocumentsRequest_ConsistencySelector() {} +func (*ListDocumentsRequest_ReadTime) isListDocumentsRequest_ConsistencySelector() {} + +func (m *ListDocumentsRequest) GetConsistencySelector() isListDocumentsRequest_ConsistencySelector { + if m != nil { + return m.ConsistencySelector + } + return nil +} + +func (m *ListDocumentsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDocumentsRequest) GetCollectionId() string { + if m != nil { + return m.CollectionId + } + return "" +} + +func (m *ListDocumentsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDocumentsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListDocumentsRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListDocumentsRequest) GetMask() *DocumentMask { + if m != nil { + return m.Mask + } + return nil +} + +func (m *ListDocumentsRequest) GetTransaction() []byte { + if x, ok := m.GetConsistencySelector().(*ListDocumentsRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *ListDocumentsRequest) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConsistencySelector().(*ListDocumentsRequest_ReadTime); ok { + return x.ReadTime + } + return nil +} + +func (m *ListDocumentsRequest) GetShowMissing() bool { + if m != nil { + return m.ShowMissing + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListDocumentsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListDocumentsRequest_OneofMarshaler, _ListDocumentsRequest_OneofUnmarshaler, _ListDocumentsRequest_OneofSizer, []interface{}{ + (*ListDocumentsRequest_Transaction)(nil), + (*ListDocumentsRequest_ReadTime)(nil), + } +} + +func _ListDocumentsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListDocumentsRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *ListDocumentsRequest_Transaction: + b.EncodeVarint(8<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case *ListDocumentsRequest_ReadTime: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ListDocumentsRequest.ConsistencySelector has unexpected type %T", x) + } + return nil +} + +func _ListDocumentsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListDocumentsRequest) + switch tag { + case 8: // consistency_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencySelector = &ListDocumentsRequest_Transaction{x} + return true, err + case 10: // consistency_selector.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &ListDocumentsRequest_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _ListDocumentsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListDocumentsRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *ListDocumentsRequest_Transaction: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case *ListDocumentsRequest_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. +type ListDocumentsResponse struct { + // The Documents found. + Documents []*Document `protobuf:"bytes,1,rep,name=documents" json:"documents,omitempty"` + // The next page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} } +func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsResponse) ProtoMessage() {} +func (*ListDocumentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *ListDocumentsResponse) GetDocuments() []*Document { + if m != nil { + return m.Documents + } + return nil +} + +func (m *ListDocumentsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for [Firestore.CreateDocument][google.firestore.v1beta1.Firestore.CreateDocument]. +type CreateDocumentRequest struct { + // The parent resource. For example: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The collection ID, relative to `parent`, to list. For example: `chatrooms`. + CollectionId string `protobuf:"bytes,2,opt,name=collection_id,json=collectionId" json:"collection_id,omitempty"` + // The client-assigned document ID to use for this document. + // + // Optional. If not specified, an ID will be assigned by the service. + DocumentId string `protobuf:"bytes,3,opt,name=document_id,json=documentId" json:"document_id,omitempty"` + // The document to create. `name` must not be set. + Document *Document `protobuf:"bytes,4,opt,name=document" json:"document,omitempty"` + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + Mask *DocumentMask `protobuf:"bytes,5,opt,name=mask" json:"mask,omitempty"` +} + +func (m *CreateDocumentRequest) Reset() { *m = CreateDocumentRequest{} } +func (m *CreateDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDocumentRequest) ProtoMessage() {} +func (*CreateDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *CreateDocumentRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDocumentRequest) GetCollectionId() string { + if m != nil { + return m.CollectionId + } + return "" +} + +func (m *CreateDocumentRequest) GetDocumentId() string { + if m != nil { + return m.DocumentId + } + return "" +} + +func (m *CreateDocumentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *CreateDocumentRequest) GetMask() *DocumentMask { + if m != nil { + return m.Mask + } + return nil +} + +// The request for [Firestore.UpdateDocument][google.firestore.v1beta1.Firestore.UpdateDocument]. +type UpdateDocumentRequest struct { + // The updated document. + // Creates the document if it does not already exist. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The fields to update. + // None of the field paths in the mask may contain a reserved name. + // + // If the document exists on the server and has fields not referenced in the + // mask, they are left unchanged. + // Fields referenced in the mask, but not present in the input document, are + // deleted from the document on the server. + UpdateMask *DocumentMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // The fields to return. If not set, returns all fields. + // + // If the document has a field that is not present in this mask, that field + // will not be returned in the response. + Mask *DocumentMask `protobuf:"bytes,3,opt,name=mask" json:"mask,omitempty"` + // An optional precondition on the document. + // The request will fail if this is set and not met by the target document. + CurrentDocument *Precondition `protobuf:"bytes,4,opt,name=current_document,json=currentDocument" json:"current_document,omitempty"` +} + +func (m *UpdateDocumentRequest) Reset() { *m = UpdateDocumentRequest{} } +func (m *UpdateDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDocumentRequest) ProtoMessage() {} +func (*UpdateDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *UpdateDocumentRequest) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *UpdateDocumentRequest) GetUpdateMask() *DocumentMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateDocumentRequest) GetMask() *DocumentMask { + if m != nil { + return m.Mask + } + return nil +} + +func (m *UpdateDocumentRequest) GetCurrentDocument() *Precondition { + if m != nil { + return m.CurrentDocument + } + return nil +} + +// The request for [Firestore.DeleteDocument][google.firestore.v1beta1.Firestore.DeleteDocument]. +type DeleteDocumentRequest struct { + // The resource name of the Document to delete. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // An optional precondition on the document. + // The request will fail if this is set and not met by the target document. + CurrentDocument *Precondition `protobuf:"bytes,2,opt,name=current_document,json=currentDocument" json:"current_document,omitempty"` +} + +func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } +func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentRequest) ProtoMessage() {} +func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *DeleteDocumentRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteDocumentRequest) GetCurrentDocument() *Precondition { + if m != nil { + return m.CurrentDocument + } + return nil +} + +// The request for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. +type BatchGetDocumentsRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The names of the documents to retrieve. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // The request will fail if any of the document is not a child resource of the + // given `database`. Duplicate names will be elided. + Documents []string `protobuf:"bytes,2,rep,name=documents" json:"documents,omitempty"` + // The fields to return. If not set, returns all fields. + // + // If a document has a field that is not present in this mask, that field will + // not be returned in the response. + Mask *DocumentMask `protobuf:"bytes,3,opt,name=mask" json:"mask,omitempty"` + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + // + // Types that are valid to be assigned to ConsistencySelector: + // *BatchGetDocumentsRequest_Transaction + // *BatchGetDocumentsRequest_NewTransaction + // *BatchGetDocumentsRequest_ReadTime + ConsistencySelector isBatchGetDocumentsRequest_ConsistencySelector `protobuf_oneof:"consistency_selector"` +} + +func (m *BatchGetDocumentsRequest) Reset() { *m = BatchGetDocumentsRequest{} } +func (m *BatchGetDocumentsRequest) String() string { return proto.CompactTextString(m) } +func (*BatchGetDocumentsRequest) ProtoMessage() {} +func (*BatchGetDocumentsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +type isBatchGetDocumentsRequest_ConsistencySelector interface { + isBatchGetDocumentsRequest_ConsistencySelector() +} + +type BatchGetDocumentsRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,4,opt,name=transaction,proto3,oneof"` +} +type BatchGetDocumentsRequest_NewTransaction struct { + NewTransaction *TransactionOptions `protobuf:"bytes,5,opt,name=new_transaction,json=newTransaction,oneof"` +} +type BatchGetDocumentsRequest_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=read_time,json=readTime,oneof"` +} + +func (*BatchGetDocumentsRequest_Transaction) isBatchGetDocumentsRequest_ConsistencySelector() {} +func (*BatchGetDocumentsRequest_NewTransaction) isBatchGetDocumentsRequest_ConsistencySelector() {} +func (*BatchGetDocumentsRequest_ReadTime) isBatchGetDocumentsRequest_ConsistencySelector() {} + +func (m *BatchGetDocumentsRequest) GetConsistencySelector() isBatchGetDocumentsRequest_ConsistencySelector { + if m != nil { + return m.ConsistencySelector + } + return nil +} + +func (m *BatchGetDocumentsRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *BatchGetDocumentsRequest) GetDocuments() []string { + if m != nil { + return m.Documents + } + return nil +} + +func (m *BatchGetDocumentsRequest) GetMask() *DocumentMask { + if m != nil { + return m.Mask + } + return nil +} + +func (m *BatchGetDocumentsRequest) GetTransaction() []byte { + if x, ok := m.GetConsistencySelector().(*BatchGetDocumentsRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *BatchGetDocumentsRequest) GetNewTransaction() *TransactionOptions { + if x, ok := m.GetConsistencySelector().(*BatchGetDocumentsRequest_NewTransaction); ok { + return x.NewTransaction + } + return nil +} + +func (m *BatchGetDocumentsRequest) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConsistencySelector().(*BatchGetDocumentsRequest_ReadTime); ok { + return x.ReadTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BatchGetDocumentsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BatchGetDocumentsRequest_OneofMarshaler, _BatchGetDocumentsRequest_OneofUnmarshaler, _BatchGetDocumentsRequest_OneofSizer, []interface{}{ + (*BatchGetDocumentsRequest_Transaction)(nil), + (*BatchGetDocumentsRequest_NewTransaction)(nil), + (*BatchGetDocumentsRequest_ReadTime)(nil), + } +} + +func _BatchGetDocumentsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BatchGetDocumentsRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *BatchGetDocumentsRequest_Transaction: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case *BatchGetDocumentsRequest_NewTransaction: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NewTransaction); err != nil { + return err + } + case *BatchGetDocumentsRequest_ReadTime: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("BatchGetDocumentsRequest.ConsistencySelector has unexpected type %T", x) + } + return nil +} + +func _BatchGetDocumentsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BatchGetDocumentsRequest) + switch tag { + case 4: // consistency_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencySelector = &BatchGetDocumentsRequest_Transaction{x} + return true, err + case 5: // consistency_selector.new_transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &BatchGetDocumentsRequest_NewTransaction{msg} + return true, err + case 7: // consistency_selector.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &BatchGetDocumentsRequest_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _BatchGetDocumentsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BatchGetDocumentsRequest) + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *BatchGetDocumentsRequest_Transaction: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case *BatchGetDocumentsRequest_NewTransaction: + s := proto.Size(x.NewTransaction) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchGetDocumentsRequest_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. +type BatchGetDocumentsResponse struct { + // A single result. + // This can be empty if the server is just returning a transaction. + // + // Types that are valid to be assigned to Result: + // *BatchGetDocumentsResponse_Found + // *BatchGetDocumentsResponse_Missing + Result isBatchGetDocumentsResponse_Result `protobuf_oneof:"result"` + // The transaction that was started as part of this request. + // Will only be set in the first response, and only if + // [BatchGetDocumentsRequest.new_transaction][google.firestore.v1beta1.BatchGetDocumentsRequest.new_transaction] was set in the request. + Transaction []byte `protobuf:"bytes,3,opt,name=transaction,proto3" json:"transaction,omitempty"` + // The time at which the document was read. + // This may be monotically increasing, in this case the previous documents in + // the result stream are guaranteed not to have changed between their + // read_time and this one. + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime" json:"read_time,omitempty"` +} + +func (m *BatchGetDocumentsResponse) Reset() { *m = BatchGetDocumentsResponse{} } +func (m *BatchGetDocumentsResponse) String() string { return proto.CompactTextString(m) } +func (*BatchGetDocumentsResponse) ProtoMessage() {} +func (*BatchGetDocumentsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +type isBatchGetDocumentsResponse_Result interface { + isBatchGetDocumentsResponse_Result() +} + +type BatchGetDocumentsResponse_Found struct { + Found *Document `protobuf:"bytes,1,opt,name=found,oneof"` +} +type BatchGetDocumentsResponse_Missing struct { + Missing string `protobuf:"bytes,2,opt,name=missing,oneof"` +} + +func (*BatchGetDocumentsResponse_Found) isBatchGetDocumentsResponse_Result() {} +func (*BatchGetDocumentsResponse_Missing) isBatchGetDocumentsResponse_Result() {} + +func (m *BatchGetDocumentsResponse) GetResult() isBatchGetDocumentsResponse_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *BatchGetDocumentsResponse) GetFound() *Document { + if x, ok := m.GetResult().(*BatchGetDocumentsResponse_Found); ok { + return x.Found + } + return nil +} + +func (m *BatchGetDocumentsResponse) GetMissing() string { + if x, ok := m.GetResult().(*BatchGetDocumentsResponse_Missing); ok { + return x.Missing + } + return "" +} + +func (m *BatchGetDocumentsResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *BatchGetDocumentsResponse) GetReadTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ReadTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BatchGetDocumentsResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BatchGetDocumentsResponse_OneofMarshaler, _BatchGetDocumentsResponse_OneofUnmarshaler, _BatchGetDocumentsResponse_OneofSizer, []interface{}{ + (*BatchGetDocumentsResponse_Found)(nil), + (*BatchGetDocumentsResponse_Missing)(nil), + } +} + +func _BatchGetDocumentsResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BatchGetDocumentsResponse) + // result + switch x := m.Result.(type) { + case *BatchGetDocumentsResponse_Found: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Found); err != nil { + return err + } + case *BatchGetDocumentsResponse_Missing: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Missing) + case nil: + default: + return fmt.Errorf("BatchGetDocumentsResponse.Result has unexpected type %T", x) + } + return nil +} + +func _BatchGetDocumentsResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BatchGetDocumentsResponse) + switch tag { + case 1: // result.found + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Document) + err := b.DecodeMessage(msg) + m.Result = &BatchGetDocumentsResponse_Found{msg} + return true, err + case 2: // result.missing + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &BatchGetDocumentsResponse_Missing{x} + return true, err + default: + return false, nil + } +} + +func _BatchGetDocumentsResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BatchGetDocumentsResponse) + // result + switch x := m.Result.(type) { + case *BatchGetDocumentsResponse_Found: + s := proto.Size(x.Found) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchGetDocumentsResponse_Missing: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Missing))) + n += len(x.Missing) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The request for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. +type BeginTransactionRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The options for the transaction. + // Defaults to a read-write transaction. + Options *TransactionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *BeginTransactionRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *BeginTransactionRequest) GetOptions() *TransactionOptions { + if m != nil { + return m.Options + } + return nil +} + +// The response for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. +type BeginTransactionResponse struct { + // The transaction that was started. + Transaction []byte `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } +func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionResponse) ProtoMessage() {} +func (*BeginTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *BeginTransactionResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. +type CommitRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The writes to apply. + // + // Always executed atomically and in order. + Writes []*Write `protobuf:"bytes,2,rep,name=writes" json:"writes,omitempty"` + // If set, applies all writes in this transaction, and commits it. + Transaction []byte `protobuf:"bytes,3,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +func (m *CommitRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *CommitRequest) GetWrites() []*Write { + if m != nil { + return m.Writes + } + return nil +} + +func (m *CommitRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The response for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. +type CommitResponse struct { + // The result of applying the writes. + // + // This i-th write result corresponds to the i-th write in the + // request. + WriteResults []*WriteResult `protobuf:"bytes,1,rep,name=write_results,json=writeResults" json:"write_results,omitempty"` + // The time at which the commit occurred. + CommitTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=commit_time,json=commitTime" json:"commit_time,omitempty"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func (m *CommitResponse) GetWriteResults() []*WriteResult { + if m != nil { + return m.WriteResults + } + return nil +} + +func (m *CommitResponse) GetCommitTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CommitTime + } + return nil +} + +// The request for [Firestore.Rollback][google.firestore.v1beta1.Firestore.Rollback]. +type RollbackRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The transaction to roll back. + Transaction []byte `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } + +func (m *RollbackRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *RollbackRequest) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +// The request for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. +type RunQueryRequest struct { + // The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The query to run. + // + // Types that are valid to be assigned to QueryType: + // *RunQueryRequest_StructuredQuery + QueryType isRunQueryRequest_QueryType `protobuf_oneof:"query_type"` + // The consistency mode for this transaction. + // If not set, defaults to strong consistency. + // + // Types that are valid to be assigned to ConsistencySelector: + // *RunQueryRequest_Transaction + // *RunQueryRequest_NewTransaction + // *RunQueryRequest_ReadTime + ConsistencySelector isRunQueryRequest_ConsistencySelector `protobuf_oneof:"consistency_selector"` +} + +func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} } +func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) } +func (*RunQueryRequest) ProtoMessage() {} +func (*RunQueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{13} } + +type isRunQueryRequest_QueryType interface { + isRunQueryRequest_QueryType() +} +type isRunQueryRequest_ConsistencySelector interface { + isRunQueryRequest_ConsistencySelector() +} + +type RunQueryRequest_StructuredQuery struct { + StructuredQuery *StructuredQuery `protobuf:"bytes,2,opt,name=structured_query,json=structuredQuery,oneof"` +} +type RunQueryRequest_Transaction struct { + Transaction []byte `protobuf:"bytes,5,opt,name=transaction,proto3,oneof"` +} +type RunQueryRequest_NewTransaction struct { + NewTransaction *TransactionOptions `protobuf:"bytes,6,opt,name=new_transaction,json=newTransaction,oneof"` +} +type RunQueryRequest_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=read_time,json=readTime,oneof"` +} + +func (*RunQueryRequest_StructuredQuery) isRunQueryRequest_QueryType() {} +func (*RunQueryRequest_Transaction) isRunQueryRequest_ConsistencySelector() {} +func (*RunQueryRequest_NewTransaction) isRunQueryRequest_ConsistencySelector() {} +func (*RunQueryRequest_ReadTime) isRunQueryRequest_ConsistencySelector() {} + +func (m *RunQueryRequest) GetQueryType() isRunQueryRequest_QueryType { + if m != nil { + return m.QueryType + } + return nil +} +func (m *RunQueryRequest) GetConsistencySelector() isRunQueryRequest_ConsistencySelector { + if m != nil { + return m.ConsistencySelector + } + return nil +} + +func (m *RunQueryRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *RunQueryRequest) GetStructuredQuery() *StructuredQuery { + if x, ok := m.GetQueryType().(*RunQueryRequest_StructuredQuery); ok { + return x.StructuredQuery + } + return nil +} + +func (m *RunQueryRequest) GetTransaction() []byte { + if x, ok := m.GetConsistencySelector().(*RunQueryRequest_Transaction); ok { + return x.Transaction + } + return nil +} + +func (m *RunQueryRequest) GetNewTransaction() *TransactionOptions { + if x, ok := m.GetConsistencySelector().(*RunQueryRequest_NewTransaction); ok { + return x.NewTransaction + } + return nil +} + +func (m *RunQueryRequest) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetConsistencySelector().(*RunQueryRequest_ReadTime); ok { + return x.ReadTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RunQueryRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RunQueryRequest_OneofMarshaler, _RunQueryRequest_OneofUnmarshaler, _RunQueryRequest_OneofSizer, []interface{}{ + (*RunQueryRequest_StructuredQuery)(nil), + (*RunQueryRequest_Transaction)(nil), + (*RunQueryRequest_NewTransaction)(nil), + (*RunQueryRequest_ReadTime)(nil), + } +} + +func _RunQueryRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_StructuredQuery: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructuredQuery); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RunQueryRequest.QueryType has unexpected type %T", x) + } + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *RunQueryRequest_Transaction: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Transaction) + case *RunQueryRequest_NewTransaction: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NewTransaction); err != nil { + return err + } + case *RunQueryRequest_ReadTime: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RunQueryRequest.ConsistencySelector has unexpected type %T", x) + } + return nil +} + +func _RunQueryRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RunQueryRequest) + switch tag { + case 2: // query_type.structured_query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery) + err := b.DecodeMessage(msg) + m.QueryType = &RunQueryRequest_StructuredQuery{msg} + return true, err + case 5: // consistency_selector.transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ConsistencySelector = &RunQueryRequest_Transaction{x} + return true, err + case 6: // consistency_selector.new_transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &RunQueryRequest_NewTransaction{msg} + return true, err + case 7: // consistency_selector.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ConsistencySelector = &RunQueryRequest_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _RunQueryRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RunQueryRequest) + // query_type + switch x := m.QueryType.(type) { + case *RunQueryRequest_StructuredQuery: + s := proto.Size(x.StructuredQuery) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // consistency_selector + switch x := m.ConsistencySelector.(type) { + case *RunQueryRequest_Transaction: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Transaction))) + n += len(x.Transaction) + case *RunQueryRequest_NewTransaction: + s := proto.Size(x.NewTransaction) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RunQueryRequest_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. +type RunQueryResponse struct { + // The transaction that was started as part of this request. + // Can only be set in the first response, and only if + // [RunQueryRequest.new_transaction][google.firestore.v1beta1.RunQueryRequest.new_transaction] was set in the request. + // If set, no other fields will be set in this response. + Transaction []byte `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` + // A query result. + // Not set when reporting partial progress. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The time at which the document was read. This may be monotonically + // increasing; in this case, the previous documents in the result stream are + // guaranteed not to have changed between their `read_time` and this one. + // + // If the query returns no results, a response with `read_time` and no + // `document` will be sent, and this represents the time at which the query + // was run. + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=read_time,json=readTime" json:"read_time,omitempty"` + // The number of results that have been skipped due to an offset between + // the last response and the current response. + SkippedResults int32 `protobuf:"varint,4,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` +} + +func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} } +func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) } +func (*RunQueryResponse) ProtoMessage() {} +func (*RunQueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{14} } + +func (m *RunQueryResponse) GetTransaction() []byte { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *RunQueryResponse) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *RunQueryResponse) GetReadTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ReadTime + } + return nil +} + +func (m *RunQueryResponse) GetSkippedResults() int32 { + if m != nil { + return m.SkippedResults + } + return 0 +} + +// The request for [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. +// +// The first request creates a stream, or resumes an existing one from a token. +// +// When creating a new stream, the server replies with a response containing +// only an ID and a token, to use in the next request. +// +// When resuming a stream, the server first streams any responses later than the +// given token, then a response containing only an up-to-date token, to use in +// the next request. +type WriteRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + // This is only required in the first message. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The ID of the write stream to resume. + // This may only be set in the first message. When left empty, a new write + // stream will be created. + StreamId string `protobuf:"bytes,2,opt,name=stream_id,json=streamId" json:"stream_id,omitempty"` + // The writes to apply. + // + // Always executed atomically and in order. + // This must be empty on the first request. + // This may be empty on the last request. + // This must not be empty on all other requests. + Writes []*Write `protobuf:"bytes,3,rep,name=writes" json:"writes,omitempty"` + // A stream token that was previously sent by the server. + // + // The client should set this field to the token from the most recent + // [WriteResponse][google.firestore.v1beta1.WriteResponse] it has received. This acknowledges that the client has + // received responses up to this token. After sending this token, earlier + // tokens may not be used anymore. + // + // The server may close the stream if there are too many unacknowledged + // responses. + // + // Leave this field unset when creating a new stream. To resume a stream at + // a specific point, set this field and the `stream_id` field. + // + // Leave this field unset when creating a new stream. + StreamToken []byte `protobuf:"bytes,4,opt,name=stream_token,json=streamToken,proto3" json:"stream_token,omitempty"` + // Labels associated with this write request. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{15} } + +func (m *WriteRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *WriteRequest) GetStreamId() string { + if m != nil { + return m.StreamId + } + return "" +} + +func (m *WriteRequest) GetWrites() []*Write { + if m != nil { + return m.Writes + } + return nil +} + +func (m *WriteRequest) GetStreamToken() []byte { + if m != nil { + return m.StreamToken + } + return nil +} + +func (m *WriteRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// The response for [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. +type WriteResponse struct { + // The ID of the stream. + // Only set on the first message, when a new stream was created. + StreamId string `protobuf:"bytes,1,opt,name=stream_id,json=streamId" json:"stream_id,omitempty"` + // A token that represents the position of this response in the stream. + // This can be used by a client to resume the stream at this point. + // + // This field is always set. + StreamToken []byte `protobuf:"bytes,2,opt,name=stream_token,json=streamToken,proto3" json:"stream_token,omitempty"` + // The result of applying the writes. + // + // This i-th write result corresponds to the i-th write in the + // request. + WriteResults []*WriteResult `protobuf:"bytes,3,rep,name=write_results,json=writeResults" json:"write_results,omitempty"` + // The time at which the commit occurred. + CommitTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=commit_time,json=commitTime" json:"commit_time,omitempty"` +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (m *WriteResponse) String() string { return proto.CompactTextString(m) } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{16} } + +func (m *WriteResponse) GetStreamId() string { + if m != nil { + return m.StreamId + } + return "" +} + +func (m *WriteResponse) GetStreamToken() []byte { + if m != nil { + return m.StreamToken + } + return nil +} + +func (m *WriteResponse) GetWriteResults() []*WriteResult { + if m != nil { + return m.WriteResults + } + return nil +} + +func (m *WriteResponse) GetCommitTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CommitTime + } + return nil +} + +// A request for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen] +type ListenRequest struct { + // The database name. In the format: + // `projects/{project_id}/databases/{database_id}`. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // The supported target changes. + // + // Types that are valid to be assigned to TargetChange: + // *ListenRequest_AddTarget + // *ListenRequest_RemoveTarget + TargetChange isListenRequest_TargetChange `protobuf_oneof:"target_change"` + // Labels associated with this target change. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} +func (*ListenRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{17} } + +type isListenRequest_TargetChange interface { + isListenRequest_TargetChange() +} + +type ListenRequest_AddTarget struct { + AddTarget *Target `protobuf:"bytes,2,opt,name=add_target,json=addTarget,oneof"` +} +type ListenRequest_RemoveTarget struct { + RemoveTarget int32 `protobuf:"varint,3,opt,name=remove_target,json=removeTarget,oneof"` +} + +func (*ListenRequest_AddTarget) isListenRequest_TargetChange() {} +func (*ListenRequest_RemoveTarget) isListenRequest_TargetChange() {} + +func (m *ListenRequest) GetTargetChange() isListenRequest_TargetChange { + if m != nil { + return m.TargetChange + } + return nil +} + +func (m *ListenRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *ListenRequest) GetAddTarget() *Target { + if x, ok := m.GetTargetChange().(*ListenRequest_AddTarget); ok { + return x.AddTarget + } + return nil +} + +func (m *ListenRequest) GetRemoveTarget() int32 { + if x, ok := m.GetTargetChange().(*ListenRequest_RemoveTarget); ok { + return x.RemoveTarget + } + return 0 +} + +func (m *ListenRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListenRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListenRequest_OneofMarshaler, _ListenRequest_OneofUnmarshaler, _ListenRequest_OneofSizer, []interface{}{ + (*ListenRequest_AddTarget)(nil), + (*ListenRequest_RemoveTarget)(nil), + } +} + +func _ListenRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListenRequest) + // target_change + switch x := m.TargetChange.(type) { + case *ListenRequest_AddTarget: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AddTarget); err != nil { + return err + } + case *ListenRequest_RemoveTarget: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.RemoveTarget)) + case nil: + default: + return fmt.Errorf("ListenRequest.TargetChange has unexpected type %T", x) + } + return nil +} + +func _ListenRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListenRequest) + switch tag { + case 2: // target_change.add_target + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Target) + err := b.DecodeMessage(msg) + m.TargetChange = &ListenRequest_AddTarget{msg} + return true, err + case 3: // target_change.remove_target + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetChange = &ListenRequest_RemoveTarget{int32(x)} + return true, err + default: + return false, nil + } +} + +func _ListenRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListenRequest) + // target_change + switch x := m.TargetChange.(type) { + case *ListenRequest_AddTarget: + s := proto.Size(x.AddTarget) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListenRequest_RemoveTarget: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.RemoveTarget)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen]. +type ListenResponse struct { + // The supported responses. + // + // Types that are valid to be assigned to ResponseType: + // *ListenResponse_TargetChange + // *ListenResponse_DocumentChange + // *ListenResponse_DocumentDelete + // *ListenResponse_DocumentRemove + // *ListenResponse_Filter + ResponseType isListenResponse_ResponseType `protobuf_oneof:"response_type"` +} + +func (m *ListenResponse) Reset() { *m = ListenResponse{} } +func (m *ListenResponse) String() string { return proto.CompactTextString(m) } +func (*ListenResponse) ProtoMessage() {} +func (*ListenResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{18} } + +type isListenResponse_ResponseType interface { + isListenResponse_ResponseType() +} + +type ListenResponse_TargetChange struct { + TargetChange *TargetChange `protobuf:"bytes,2,opt,name=target_change,json=targetChange,oneof"` +} +type ListenResponse_DocumentChange struct { + DocumentChange *DocumentChange `protobuf:"bytes,3,opt,name=document_change,json=documentChange,oneof"` +} +type ListenResponse_DocumentDelete struct { + DocumentDelete *DocumentDelete `protobuf:"bytes,4,opt,name=document_delete,json=documentDelete,oneof"` +} +type ListenResponse_DocumentRemove struct { + DocumentRemove *DocumentRemove `protobuf:"bytes,6,opt,name=document_remove,json=documentRemove,oneof"` +} +type ListenResponse_Filter struct { + Filter *ExistenceFilter `protobuf:"bytes,5,opt,name=filter,oneof"` +} + +func (*ListenResponse_TargetChange) isListenResponse_ResponseType() {} +func (*ListenResponse_DocumentChange) isListenResponse_ResponseType() {} +func (*ListenResponse_DocumentDelete) isListenResponse_ResponseType() {} +func (*ListenResponse_DocumentRemove) isListenResponse_ResponseType() {} +func (*ListenResponse_Filter) isListenResponse_ResponseType() {} + +func (m *ListenResponse) GetResponseType() isListenResponse_ResponseType { + if m != nil { + return m.ResponseType + } + return nil +} + +func (m *ListenResponse) GetTargetChange() *TargetChange { + if x, ok := m.GetResponseType().(*ListenResponse_TargetChange); ok { + return x.TargetChange + } + return nil +} + +func (m *ListenResponse) GetDocumentChange() *DocumentChange { + if x, ok := m.GetResponseType().(*ListenResponse_DocumentChange); ok { + return x.DocumentChange + } + return nil +} + +func (m *ListenResponse) GetDocumentDelete() *DocumentDelete { + if x, ok := m.GetResponseType().(*ListenResponse_DocumentDelete); ok { + return x.DocumentDelete + } + return nil +} + +func (m *ListenResponse) GetDocumentRemove() *DocumentRemove { + if x, ok := m.GetResponseType().(*ListenResponse_DocumentRemove); ok { + return x.DocumentRemove + } + return nil +} + +func (m *ListenResponse) GetFilter() *ExistenceFilter { + if x, ok := m.GetResponseType().(*ListenResponse_Filter); ok { + return x.Filter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListenResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListenResponse_OneofMarshaler, _ListenResponse_OneofUnmarshaler, _ListenResponse_OneofSizer, []interface{}{ + (*ListenResponse_TargetChange)(nil), + (*ListenResponse_DocumentChange)(nil), + (*ListenResponse_DocumentDelete)(nil), + (*ListenResponse_DocumentRemove)(nil), + (*ListenResponse_Filter)(nil), + } +} + +func _ListenResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListenResponse) + // response_type + switch x := m.ResponseType.(type) { + case *ListenResponse_TargetChange: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TargetChange); err != nil { + return err + } + case *ListenResponse_DocumentChange: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DocumentChange); err != nil { + return err + } + case *ListenResponse_DocumentDelete: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DocumentDelete); err != nil { + return err + } + case *ListenResponse_DocumentRemove: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DocumentRemove); err != nil { + return err + } + case *ListenResponse_Filter: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Filter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ListenResponse.ResponseType has unexpected type %T", x) + } + return nil +} + +func _ListenResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListenResponse) + switch tag { + case 2: // response_type.target_change + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TargetChange) + err := b.DecodeMessage(msg) + m.ResponseType = &ListenResponse_TargetChange{msg} + return true, err + case 3: // response_type.document_change + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DocumentChange) + err := b.DecodeMessage(msg) + m.ResponseType = &ListenResponse_DocumentChange{msg} + return true, err + case 4: // response_type.document_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DocumentDelete) + err := b.DecodeMessage(msg) + m.ResponseType = &ListenResponse_DocumentDelete{msg} + return true, err + case 6: // response_type.document_remove + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DocumentRemove) + err := b.DecodeMessage(msg) + m.ResponseType = &ListenResponse_DocumentRemove{msg} + return true, err + case 5: // response_type.filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExistenceFilter) + err := b.DecodeMessage(msg) + m.ResponseType = &ListenResponse_Filter{msg} + return true, err + default: + return false, nil + } +} + +func _ListenResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListenResponse) + // response_type + switch x := m.ResponseType.(type) { + case *ListenResponse_TargetChange: + s := proto.Size(x.TargetChange) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListenResponse_DocumentChange: + s := proto.Size(x.DocumentChange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListenResponse_DocumentDelete: + s := proto.Size(x.DocumentDelete) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListenResponse_DocumentRemove: + s := proto.Size(x.DocumentRemove) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ListenResponse_Filter: + s := proto.Size(x.Filter) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A specification of a set of documents to listen to. +type Target struct { + // The type of target to listen to. + // + // Types that are valid to be assigned to TargetType: + // *Target_Query + // *Target_Documents + TargetType isTarget_TargetType `protobuf_oneof:"target_type"` + // When to start listening. + // + // If not specified, all matching Documents are returned before any + // subsequent changes. + // + // Types that are valid to be assigned to ResumeType: + // *Target_ResumeToken + // *Target_ReadTime + ResumeType isTarget_ResumeType `protobuf_oneof:"resume_type"` + // A client provided target ID. + // + // If not set, the server will assign an ID for the target. + // + // Used for resuming a target without changing IDs. The IDs can either be + // client-assigned or be server-assigned in a previous stream. All targets + // with client provided IDs must be added before adding a target that needs + // a server-assigned id. + TargetId int32 `protobuf:"varint,5,opt,name=target_id,json=targetId" json:"target_id,omitempty"` + // If the target should be removed once it is current and consistent. + Once bool `protobuf:"varint,6,opt,name=once" json:"once,omitempty"` +} + +func (m *Target) Reset() { *m = Target{} } +func (m *Target) String() string { return proto.CompactTextString(m) } +func (*Target) ProtoMessage() {} +func (*Target) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{19} } + +type isTarget_TargetType interface { + isTarget_TargetType() +} +type isTarget_ResumeType interface { + isTarget_ResumeType() +} + +type Target_Query struct { + Query *Target_QueryTarget `protobuf:"bytes,2,opt,name=query,oneof"` +} +type Target_Documents struct { + Documents *Target_DocumentsTarget `protobuf:"bytes,3,opt,name=documents,oneof"` +} +type Target_ResumeToken struct { + ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3,oneof"` +} +type Target_ReadTime struct { + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,11,opt,name=read_time,json=readTime,oneof"` +} + +func (*Target_Query) isTarget_TargetType() {} +func (*Target_Documents) isTarget_TargetType() {} +func (*Target_ResumeToken) isTarget_ResumeType() {} +func (*Target_ReadTime) isTarget_ResumeType() {} + +func (m *Target) GetTargetType() isTarget_TargetType { + if m != nil { + return m.TargetType + } + return nil +} +func (m *Target) GetResumeType() isTarget_ResumeType { + if m != nil { + return m.ResumeType + } + return nil +} + +func (m *Target) GetQuery() *Target_QueryTarget { + if x, ok := m.GetTargetType().(*Target_Query); ok { + return x.Query + } + return nil +} + +func (m *Target) GetDocuments() *Target_DocumentsTarget { + if x, ok := m.GetTargetType().(*Target_Documents); ok { + return x.Documents + } + return nil +} + +func (m *Target) GetResumeToken() []byte { + if x, ok := m.GetResumeType().(*Target_ResumeToken); ok { + return x.ResumeToken + } + return nil +} + +func (m *Target) GetReadTime() *google_protobuf1.Timestamp { + if x, ok := m.GetResumeType().(*Target_ReadTime); ok { + return x.ReadTime + } + return nil +} + +func (m *Target) GetTargetId() int32 { + if m != nil { + return m.TargetId + } + return 0 +} + +func (m *Target) GetOnce() bool { + if m != nil { + return m.Once + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Target) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Target_OneofMarshaler, _Target_OneofUnmarshaler, _Target_OneofSizer, []interface{}{ + (*Target_Query)(nil), + (*Target_Documents)(nil), + (*Target_ResumeToken)(nil), + (*Target_ReadTime)(nil), + } +} + +func _Target_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Target) + // target_type + switch x := m.TargetType.(type) { + case *Target_Query: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } + case *Target_Documents: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Documents); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Target.TargetType has unexpected type %T", x) + } + // resume_type + switch x := m.ResumeType.(type) { + case *Target_ResumeToken: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ResumeToken) + case *Target_ReadTime: + b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Target.ResumeType has unexpected type %T", x) + } + return nil +} + +func _Target_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Target) + switch tag { + case 2: // target_type.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Target_QueryTarget) + err := b.DecodeMessage(msg) + m.TargetType = &Target_Query{msg} + return true, err + case 3: // target_type.documents + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Target_DocumentsTarget) + err := b.DecodeMessage(msg) + m.TargetType = &Target_Documents{msg} + return true, err + case 4: // resume_type.resume_token + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.ResumeType = &Target_ResumeToken{x} + return true, err + case 11: // resume_type.read_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.ResumeType = &Target_ReadTime{msg} + return true, err + default: + return false, nil + } +} + +func _Target_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Target) + // target_type + switch x := m.TargetType.(type) { + case *Target_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Target_Documents: + s := proto.Size(x.Documents) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // resume_type + switch x := m.ResumeType.(type) { + case *Target_ResumeToken: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ResumeToken))) + n += len(x.ResumeToken) + case *Target_ReadTime: + s := proto.Size(x.ReadTime) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A target specified by a set of documents names. +type Target_DocumentsTarget struct { + // The names of the documents to retrieve. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // The request will fail if any of the document is not a child resource of + // the given `database`. Duplicate names will be elided. + Documents []string `protobuf:"bytes,2,rep,name=documents" json:"documents,omitempty"` +} + +func (m *Target_DocumentsTarget) Reset() { *m = Target_DocumentsTarget{} } +func (m *Target_DocumentsTarget) String() string { return proto.CompactTextString(m) } +func (*Target_DocumentsTarget) ProtoMessage() {} +func (*Target_DocumentsTarget) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{19, 0} } + +func (m *Target_DocumentsTarget) GetDocuments() []string { + if m != nil { + return m.Documents + } + return nil +} + +// A target specified by a query. +type Target_QueryTarget struct { + // The parent resource name. In the format: + // `projects/{project_id}/databases/{database_id}/documents` or + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents` or + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The query to run. + // + // Types that are valid to be assigned to QueryType: + // *Target_QueryTarget_StructuredQuery + QueryType isTarget_QueryTarget_QueryType `protobuf_oneof:"query_type"` +} + +func (m *Target_QueryTarget) Reset() { *m = Target_QueryTarget{} } +func (m *Target_QueryTarget) String() string { return proto.CompactTextString(m) } +func (*Target_QueryTarget) ProtoMessage() {} +func (*Target_QueryTarget) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{19, 1} } + +type isTarget_QueryTarget_QueryType interface { + isTarget_QueryTarget_QueryType() +} + +type Target_QueryTarget_StructuredQuery struct { + StructuredQuery *StructuredQuery `protobuf:"bytes,2,opt,name=structured_query,json=structuredQuery,oneof"` +} + +func (*Target_QueryTarget_StructuredQuery) isTarget_QueryTarget_QueryType() {} + +func (m *Target_QueryTarget) GetQueryType() isTarget_QueryTarget_QueryType { + if m != nil { + return m.QueryType + } + return nil +} + +func (m *Target_QueryTarget) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *Target_QueryTarget) GetStructuredQuery() *StructuredQuery { + if x, ok := m.GetQueryType().(*Target_QueryTarget_StructuredQuery); ok { + return x.StructuredQuery + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Target_QueryTarget) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Target_QueryTarget_OneofMarshaler, _Target_QueryTarget_OneofUnmarshaler, _Target_QueryTarget_OneofSizer, []interface{}{ + (*Target_QueryTarget_StructuredQuery)(nil), + } +} + +func _Target_QueryTarget_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Target_QueryTarget) + // query_type + switch x := m.QueryType.(type) { + case *Target_QueryTarget_StructuredQuery: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructuredQuery); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Target_QueryTarget.QueryType has unexpected type %T", x) + } + return nil +} + +func _Target_QueryTarget_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Target_QueryTarget) + switch tag { + case 2: // query_type.structured_query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery) + err := b.DecodeMessage(msg) + m.QueryType = &Target_QueryTarget_StructuredQuery{msg} + return true, err + default: + return false, nil + } +} + +func _Target_QueryTarget_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Target_QueryTarget) + // query_type + switch x := m.QueryType.(type) { + case *Target_QueryTarget_StructuredQuery: + s := proto.Size(x.StructuredQuery) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Targets being watched have changed. +type TargetChange struct { + // The type of change that occurred. + TargetChangeType TargetChange_TargetChangeType `protobuf:"varint,1,opt,name=target_change_type,json=targetChangeType,enum=google.firestore.v1beta1.TargetChange_TargetChangeType" json:"target_change_type,omitempty"` + // The target IDs of targets that have changed. + // + // If empty, the change applies to all targets. + // + // For `target_change_type=ADD`, the order of the target IDs matches the order + // of the requests to add the targets. This allows clients to unambiguously + // associate server-assigned target IDs with added targets. + // + // For other states, the order of the target IDs is not defined. + TargetIds []int32 `protobuf:"varint,2,rep,packed,name=target_ids,json=targetIds" json:"target_ids,omitempty"` + // The error that resulted in this change, if applicable. + Cause *google_rpc.Status `protobuf:"bytes,3,opt,name=cause" json:"cause,omitempty"` + // A token that can be used to resume the stream for the given `target_ids`, + // or all targets if `target_ids` is empty. + // + // Not set on every target change. + ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // The consistent `read_time` for the given `target_ids` (omitted when the + // target_ids are not at a consistent snapshot). + // + // The stream is guaranteed to send a `read_time` with `target_ids` empty + // whenever the entire stream reaches a new consistent snapshot. ADD, + // CURRENT, and RESET messages are guaranteed to (eventually) result in a + // new consistent snapshot (while NO_CHANGE and REMOVE messages are not). + // + // For a given stream, `read_time` is guaranteed to be monotonically + // increasing. + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=read_time,json=readTime" json:"read_time,omitempty"` +} + +func (m *TargetChange) Reset() { *m = TargetChange{} } +func (m *TargetChange) String() string { return proto.CompactTextString(m) } +func (*TargetChange) ProtoMessage() {} +func (*TargetChange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{20} } + +func (m *TargetChange) GetTargetChangeType() TargetChange_TargetChangeType { + if m != nil { + return m.TargetChangeType + } + return TargetChange_NO_CHANGE +} + +func (m *TargetChange) GetTargetIds() []int32 { + if m != nil { + return m.TargetIds + } + return nil +} + +func (m *TargetChange) GetCause() *google_rpc.Status { + if m != nil { + return m.Cause + } + return nil +} + +func (m *TargetChange) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +func (m *TargetChange) GetReadTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ReadTime + } + return nil +} + +// The request for [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. +type ListCollectionIdsRequest struct { + // The parent document. In the format: + // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. + // For example: + // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The maximum number of results to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // A page token. Must be a value from + // [ListCollectionIdsResponse][google.firestore.v1beta1.ListCollectionIdsResponse]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListCollectionIdsRequest) Reset() { *m = ListCollectionIdsRequest{} } +func (m *ListCollectionIdsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCollectionIdsRequest) ProtoMessage() {} +func (*ListCollectionIdsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{21} } + +func (m *ListCollectionIdsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListCollectionIdsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListCollectionIdsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response from [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. +type ListCollectionIdsResponse struct { + // The collection ids. + CollectionIds []string `protobuf:"bytes,1,rep,name=collection_ids,json=collectionIds" json:"collection_ids,omitempty"` + // A page token that may be used to continue the list. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListCollectionIdsResponse) Reset() { *m = ListCollectionIdsResponse{} } +func (m *ListCollectionIdsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCollectionIdsResponse) ProtoMessage() {} +func (*ListCollectionIdsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{22} } + +func (m *ListCollectionIdsResponse) GetCollectionIds() []string { + if m != nil { + return m.CollectionIds + } + return nil +} + +func (m *ListCollectionIdsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*GetDocumentRequest)(nil), "google.firestore.v1beta1.GetDocumentRequest") + proto.RegisterType((*ListDocumentsRequest)(nil), "google.firestore.v1beta1.ListDocumentsRequest") + proto.RegisterType((*ListDocumentsResponse)(nil), "google.firestore.v1beta1.ListDocumentsResponse") + proto.RegisterType((*CreateDocumentRequest)(nil), "google.firestore.v1beta1.CreateDocumentRequest") + proto.RegisterType((*UpdateDocumentRequest)(nil), "google.firestore.v1beta1.UpdateDocumentRequest") + proto.RegisterType((*DeleteDocumentRequest)(nil), "google.firestore.v1beta1.DeleteDocumentRequest") + proto.RegisterType((*BatchGetDocumentsRequest)(nil), "google.firestore.v1beta1.BatchGetDocumentsRequest") + proto.RegisterType((*BatchGetDocumentsResponse)(nil), "google.firestore.v1beta1.BatchGetDocumentsResponse") + proto.RegisterType((*BeginTransactionRequest)(nil), "google.firestore.v1beta1.BeginTransactionRequest") + proto.RegisterType((*BeginTransactionResponse)(nil), "google.firestore.v1beta1.BeginTransactionResponse") + proto.RegisterType((*CommitRequest)(nil), "google.firestore.v1beta1.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "google.firestore.v1beta1.CommitResponse") + proto.RegisterType((*RollbackRequest)(nil), "google.firestore.v1beta1.RollbackRequest") + proto.RegisterType((*RunQueryRequest)(nil), "google.firestore.v1beta1.RunQueryRequest") + proto.RegisterType((*RunQueryResponse)(nil), "google.firestore.v1beta1.RunQueryResponse") + proto.RegisterType((*WriteRequest)(nil), "google.firestore.v1beta1.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "google.firestore.v1beta1.WriteResponse") + proto.RegisterType((*ListenRequest)(nil), "google.firestore.v1beta1.ListenRequest") + proto.RegisterType((*ListenResponse)(nil), "google.firestore.v1beta1.ListenResponse") + proto.RegisterType((*Target)(nil), "google.firestore.v1beta1.Target") + proto.RegisterType((*Target_DocumentsTarget)(nil), "google.firestore.v1beta1.Target.DocumentsTarget") + proto.RegisterType((*Target_QueryTarget)(nil), "google.firestore.v1beta1.Target.QueryTarget") + proto.RegisterType((*TargetChange)(nil), "google.firestore.v1beta1.TargetChange") + proto.RegisterType((*ListCollectionIdsRequest)(nil), "google.firestore.v1beta1.ListCollectionIdsRequest") + proto.RegisterType((*ListCollectionIdsResponse)(nil), "google.firestore.v1beta1.ListCollectionIdsResponse") + proto.RegisterEnum("google.firestore.v1beta1.TargetChange_TargetChangeType", TargetChange_TargetChangeType_name, TargetChange_TargetChangeType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Firestore service + +type FirestoreClient interface { + // Gets a single document. + GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*Document, error) + // Lists documents. + ListDocuments(ctx context.Context, in *ListDocumentsRequest, opts ...grpc.CallOption) (*ListDocumentsResponse, error) + // Creates a new document. + CreateDocument(ctx context.Context, in *CreateDocumentRequest, opts ...grpc.CallOption) (*Document, error) + // Updates or inserts a document. + UpdateDocument(ctx context.Context, in *UpdateDocumentRequest, opts ...grpc.CallOption) (*Document, error) + // Deletes a document. + DeleteDocument(ctx context.Context, in *DeleteDocumentRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Gets multiple documents. + // + // Documents returned by this method are not guaranteed to be returned in the + // same order that they were requested. + BatchGetDocuments(ctx context.Context, in *BatchGetDocumentsRequest, opts ...grpc.CallOption) (Firestore_BatchGetDocumentsClient, error) + // Starts a new transaction. + BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) + // Commits a transaction, while optionally updating documents. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Runs a query. + RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (Firestore_RunQueryClient, error) + // Streams batches of document updates and deletes, in order. + Write(ctx context.Context, opts ...grpc.CallOption) (Firestore_WriteClient, error) + // Listens to changes. + Listen(ctx context.Context, opts ...grpc.CallOption) (Firestore_ListenClient, error) + // Lists all the collection IDs underneath a document. + ListCollectionIds(ctx context.Context, in *ListCollectionIdsRequest, opts ...grpc.CallOption) (*ListCollectionIdsResponse, error) +} + +type firestoreClient struct { + cc *grpc.ClientConn +} + +func NewFirestoreClient(cc *grpc.ClientConn) FirestoreClient { + return &firestoreClient{cc} +} + +func (c *firestoreClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*Document, error) { + out := new(Document) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/GetDocument", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) ListDocuments(ctx context.Context, in *ListDocumentsRequest, opts ...grpc.CallOption) (*ListDocumentsResponse, error) { + out := new(ListDocumentsResponse) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/ListDocuments", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) CreateDocument(ctx context.Context, in *CreateDocumentRequest, opts ...grpc.CallOption) (*Document, error) { + out := new(Document) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/CreateDocument", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) UpdateDocument(ctx context.Context, in *UpdateDocumentRequest, opts ...grpc.CallOption) (*Document, error) { + out := new(Document) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/UpdateDocument", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) DeleteDocument(ctx context.Context, in *DeleteDocumentRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/DeleteDocument", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) BatchGetDocuments(ctx context.Context, in *BatchGetDocumentsRequest, opts ...grpc.CallOption) (Firestore_BatchGetDocumentsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Firestore_serviceDesc.Streams[0], c.cc, "/google.firestore.v1beta1.Firestore/BatchGetDocuments", opts...) + if err != nil { + return nil, err + } + x := &firestoreBatchGetDocumentsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Firestore_BatchGetDocumentsClient interface { + Recv() (*BatchGetDocumentsResponse, error) + grpc.ClientStream +} + +type firestoreBatchGetDocumentsClient struct { + grpc.ClientStream +} + +func (x *firestoreBatchGetDocumentsClient) Recv() (*BatchGetDocumentsResponse, error) { + m := new(BatchGetDocumentsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *firestoreClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*BeginTransactionResponse, error) { + out := new(BeginTransactionResponse) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/BeginTransaction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/Rollback", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *firestoreClient) RunQuery(ctx context.Context, in *RunQueryRequest, opts ...grpc.CallOption) (Firestore_RunQueryClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Firestore_serviceDesc.Streams[1], c.cc, "/google.firestore.v1beta1.Firestore/RunQuery", opts...) + if err != nil { + return nil, err + } + x := &firestoreRunQueryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Firestore_RunQueryClient interface { + Recv() (*RunQueryResponse, error) + grpc.ClientStream +} + +type firestoreRunQueryClient struct { + grpc.ClientStream +} + +func (x *firestoreRunQueryClient) Recv() (*RunQueryResponse, error) { + m := new(RunQueryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *firestoreClient) Write(ctx context.Context, opts ...grpc.CallOption) (Firestore_WriteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Firestore_serviceDesc.Streams[2], c.cc, "/google.firestore.v1beta1.Firestore/Write", opts...) + if err != nil { + return nil, err + } + x := &firestoreWriteClient{stream} + return x, nil +} + +type Firestore_WriteClient interface { + Send(*WriteRequest) error + Recv() (*WriteResponse, error) + grpc.ClientStream +} + +type firestoreWriteClient struct { + grpc.ClientStream +} + +func (x *firestoreWriteClient) Send(m *WriteRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *firestoreWriteClient) Recv() (*WriteResponse, error) { + m := new(WriteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *firestoreClient) Listen(ctx context.Context, opts ...grpc.CallOption) (Firestore_ListenClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Firestore_serviceDesc.Streams[3], c.cc, "/google.firestore.v1beta1.Firestore/Listen", opts...) + if err != nil { + return nil, err + } + x := &firestoreListenClient{stream} + return x, nil +} + +type Firestore_ListenClient interface { + Send(*ListenRequest) error + Recv() (*ListenResponse, error) + grpc.ClientStream +} + +type firestoreListenClient struct { + grpc.ClientStream +} + +func (x *firestoreListenClient) Send(m *ListenRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *firestoreListenClient) Recv() (*ListenResponse, error) { + m := new(ListenResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *firestoreClient) ListCollectionIds(ctx context.Context, in *ListCollectionIdsRequest, opts ...grpc.CallOption) (*ListCollectionIdsResponse, error) { + out := new(ListCollectionIdsResponse) + err := grpc.Invoke(ctx, "/google.firestore.v1beta1.Firestore/ListCollectionIds", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Firestore service + +type FirestoreServer interface { + // Gets a single document. + GetDocument(context.Context, *GetDocumentRequest) (*Document, error) + // Lists documents. + ListDocuments(context.Context, *ListDocumentsRequest) (*ListDocumentsResponse, error) + // Creates a new document. + CreateDocument(context.Context, *CreateDocumentRequest) (*Document, error) + // Updates or inserts a document. + UpdateDocument(context.Context, *UpdateDocumentRequest) (*Document, error) + // Deletes a document. + DeleteDocument(context.Context, *DeleteDocumentRequest) (*google_protobuf4.Empty, error) + // Gets multiple documents. + // + // Documents returned by this method are not guaranteed to be returned in the + // same order that they were requested. + BatchGetDocuments(*BatchGetDocumentsRequest, Firestore_BatchGetDocumentsServer) error + // Starts a new transaction. + BeginTransaction(context.Context, *BeginTransactionRequest) (*BeginTransactionResponse, error) + // Commits a transaction, while optionally updating documents. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // Rolls back a transaction. + Rollback(context.Context, *RollbackRequest) (*google_protobuf4.Empty, error) + // Runs a query. + RunQuery(*RunQueryRequest, Firestore_RunQueryServer) error + // Streams batches of document updates and deletes, in order. + Write(Firestore_WriteServer) error + // Listens to changes. + Listen(Firestore_ListenServer) error + // Lists all the collection IDs underneath a document. + ListCollectionIds(context.Context, *ListCollectionIdsRequest) (*ListCollectionIdsResponse, error) +} + +func RegisterFirestoreServer(s *grpc.Server, srv FirestoreServer) { + s.RegisterService(&_Firestore_serviceDesc, srv) +} + +func _Firestore_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDocumentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).GetDocument(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/GetDocument", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).GetDocument(ctx, req.(*GetDocumentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_ListDocuments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDocumentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).ListDocuments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/ListDocuments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).ListDocuments(ctx, req.(*ListDocumentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_CreateDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDocumentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).CreateDocument(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/CreateDocument", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).CreateDocument(ctx, req.(*CreateDocumentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_UpdateDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDocumentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).UpdateDocument(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/UpdateDocument", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).UpdateDocument(ctx, req.(*UpdateDocumentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_DeleteDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDocumentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).DeleteDocument(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/DeleteDocument", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).DeleteDocument(ctx, req.(*DeleteDocumentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_BatchGetDocuments_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BatchGetDocumentsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(FirestoreServer).BatchGetDocuments(m, &firestoreBatchGetDocumentsServer{stream}) +} + +type Firestore_BatchGetDocumentsServer interface { + Send(*BatchGetDocumentsResponse) error + grpc.ServerStream +} + +type firestoreBatchGetDocumentsServer struct { + grpc.ServerStream +} + +func (x *firestoreBatchGetDocumentsServer) Send(m *BatchGetDocumentsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Firestore_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BeginTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).BeginTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/BeginTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).Rollback(ctx, req.(*RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Firestore_RunQuery_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(RunQueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(FirestoreServer).RunQuery(m, &firestoreRunQueryServer{stream}) +} + +type Firestore_RunQueryServer interface { + Send(*RunQueryResponse) error + grpc.ServerStream +} + +type firestoreRunQueryServer struct { + grpc.ServerStream +} + +func (x *firestoreRunQueryServer) Send(m *RunQueryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Firestore_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FirestoreServer).Write(&firestoreWriteServer{stream}) +} + +type Firestore_WriteServer interface { + Send(*WriteResponse) error + Recv() (*WriteRequest, error) + grpc.ServerStream +} + +type firestoreWriteServer struct { + grpc.ServerStream +} + +func (x *firestoreWriteServer) Send(m *WriteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *firestoreWriteServer) Recv() (*WriteRequest, error) { + m := new(WriteRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Firestore_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FirestoreServer).Listen(&firestoreListenServer{stream}) +} + +type Firestore_ListenServer interface { + Send(*ListenResponse) error + Recv() (*ListenRequest, error) + grpc.ServerStream +} + +type firestoreListenServer struct { + grpc.ServerStream +} + +func (x *firestoreListenServer) Send(m *ListenResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *firestoreListenServer) Recv() (*ListenRequest, error) { + m := new(ListenRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Firestore_ListCollectionIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCollectionIdsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FirestoreServer).ListCollectionIds(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.firestore.v1beta1.Firestore/ListCollectionIds", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FirestoreServer).ListCollectionIds(ctx, req.(*ListCollectionIdsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Firestore_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.firestore.v1beta1.Firestore", + HandlerType: (*FirestoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetDocument", + Handler: _Firestore_GetDocument_Handler, + }, + { + MethodName: "ListDocuments", + Handler: _Firestore_ListDocuments_Handler, + }, + { + MethodName: "CreateDocument", + Handler: _Firestore_CreateDocument_Handler, + }, + { + MethodName: "UpdateDocument", + Handler: _Firestore_UpdateDocument_Handler, + }, + { + MethodName: "DeleteDocument", + Handler: _Firestore_DeleteDocument_Handler, + }, + { + MethodName: "BeginTransaction", + Handler: _Firestore_BeginTransaction_Handler, + }, + { + MethodName: "Commit", + Handler: _Firestore_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Firestore_Rollback_Handler, + }, + { + MethodName: "ListCollectionIds", + Handler: _Firestore_ListCollectionIds_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BatchGetDocuments", + Handler: _Firestore_BatchGetDocuments_Handler, + ServerStreams: true, + }, + { + StreamName: "RunQuery", + Handler: _Firestore_RunQuery_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _Firestore_Write_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Listen", + Handler: _Firestore_Listen_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/firestore/v1beta1/firestore.proto", +} + +func init() { proto.RegisterFile("google/firestore/v1beta1/firestore.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 2166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x8f, 0x1b, 0x49, + 0x15, 0x77, 0xf9, 0x6b, 0xec, 0xe7, 0x8f, 0xf1, 0x96, 0x92, 0xac, 0xe3, 0x64, 0xc9, 0xd0, 0x4b, + 0x12, 0x63, 0xad, 0xec, 0x64, 0x22, 0x14, 0xd6, 0x21, 0xcb, 0x66, 0x66, 0x3c, 0xe3, 0x09, 0x49, + 0x66, 0xd2, 0x33, 0x9b, 0x48, 0x5c, 0xac, 0x9e, 0xee, 0x8a, 0xd3, 0x3b, 0x76, 0xb7, 0xb7, 0xbb, + 0x9c, 0xd9, 0xd9, 0xd5, 0x20, 0xe0, 0xc0, 0x05, 0x89, 0x0b, 0x07, 0x40, 0x48, 0x1c, 0xd8, 0x03, + 0xd2, 0x22, 0xe0, 0x82, 0xc4, 0x0d, 0x89, 0x1b, 0x12, 0xe2, 0x84, 0x84, 0x90, 0x38, 0xc0, 0x81, + 0x13, 0x37, 0xfe, 0x03, 0xd4, 0x55, 0xd5, 0xed, 0xee, 0xf6, 0x57, 0xdb, 0x13, 0xed, 0xad, 0xeb, + 0xf9, 0xd5, 0xab, 0xf7, 0xf1, 0x7b, 0xaf, 0x5e, 0x3d, 0x43, 0xb5, 0x6b, 0x9a, 0xdd, 0x1e, 0x69, + 0xbc, 0xd0, 0x2d, 0x62, 0x53, 0xd3, 0x22, 0x8d, 0x57, 0xb7, 0x8f, 0x08, 0x55, 0x6e, 0x8f, 0x28, + 0xf5, 0x81, 0x65, 0x52, 0x13, 0x97, 0x39, 0x67, 0x7d, 0x44, 0x17, 0x9c, 0x95, 0xab, 0x42, 0x86, + 0x32, 0xd0, 0x1b, 0x8a, 0x61, 0x98, 0x54, 0xa1, 0xba, 0x69, 0xd8, 0x7c, 0x5f, 0xe5, 0xfa, 0xd4, + 0x13, 0x54, 0xb3, 0xdf, 0x37, 0x0d, 0xc1, 0x76, 0x73, 0x2a, 0x9b, 0x66, 0xaa, 0xc3, 0x3e, 0x31, + 0xa8, 0x60, 0xfc, 0xca, 0x54, 0xc6, 0x8f, 0x86, 0xc4, 0x3a, 0x9d, 0xcb, 0x75, 0x62, 0xe9, 0x54, + 0xd8, 0x54, 0xb9, 0x22, 0xb8, 0xd8, 0xea, 0x68, 0xf8, 0xa2, 0x41, 0xfa, 0x03, 0xea, 0x8a, 0xb8, + 0x16, 0xfe, 0x91, 0xea, 0x7d, 0x62, 0x53, 0xa5, 0x3f, 0x10, 0x0c, 0x6f, 0x0a, 0x06, 0x6b, 0xa0, + 0x36, 0x6c, 0xaa, 0xd0, 0xa1, 0x30, 0x59, 0xfa, 0x27, 0x02, 0xbc, 0x43, 0xe8, 0x96, 0x50, 0x5c, + 0x26, 0x1f, 0x0d, 0x89, 0x4d, 0x31, 0x86, 0xa4, 0xa1, 0xf4, 0x49, 0x19, 0xad, 0xa1, 0x6a, 0x56, + 0x66, 0xdf, 0xb8, 0x09, 0xc9, 0xbe, 0x62, 0x1f, 0x97, 0xe3, 0x6b, 0xa8, 0x9a, 0x5b, 0xbf, 0x51, + 0x9f, 0xe6, 0xe4, 0xba, 0x2b, 0xec, 0xb1, 0x62, 0x1f, 0xcb, 0x6c, 0x0f, 0x96, 0x20, 0x47, 0x2d, + 0xc5, 0xb0, 0x15, 0xd5, 0xf1, 0x77, 0x39, 0xb1, 0x86, 0xaa, 0xf9, 0x76, 0x4c, 0xf6, 0x13, 0xf1, + 0xbb, 0x90, 0xb5, 0x88, 0xa2, 0x75, 0x1c, 0xdd, 0xcb, 0x29, 0x76, 0x48, 0xc5, 0x3d, 0xc4, 0x35, + 0xac, 0x7e, 0xe8, 0x1a, 0xd6, 0x8e, 0xc9, 0x19, 0x87, 0xdd, 0x21, 0x6c, 0x5c, 0x82, 0x0b, 0xaa, + 0x69, 0xd8, 0xba, 0x4d, 0x89, 0xa1, 0x9e, 0x76, 0x6c, 0xd2, 0x23, 0x2a, 0x35, 0x2d, 0xe9, 0xbb, + 0x09, 0xb8, 0xf0, 0x48, 0xb7, 0x3d, 0xf3, 0x6c, 0xd7, 0xbe, 0x4b, 0x90, 0x1e, 0x28, 0x16, 0x31, + 0xa8, 0xb0, 0x50, 0xac, 0xf0, 0xdb, 0x50, 0x50, 0xcd, 0x9e, 0xb3, 0x5b, 0x37, 0x8d, 0x8e, 0xae, + 0x31, 0x63, 0xb3, 0x72, 0x7e, 0x44, 0xdc, 0xd5, 0xf0, 0x15, 0xc8, 0x0e, 0x94, 0x2e, 0xe9, 0xd8, + 0xfa, 0x27, 0x84, 0x99, 0x92, 0x92, 0x33, 0x0e, 0xe1, 0x40, 0xff, 0x84, 0xe0, 0xb7, 0x00, 0xd8, + 0x8f, 0xd4, 0x3c, 0x26, 0x46, 0x39, 0xc9, 0xb6, 0x33, 0xf6, 0x43, 0x87, 0x80, 0x2f, 0x43, 0xc6, + 0xb4, 0x34, 0x62, 0x75, 0x8e, 0x4e, 0xcb, 0x69, 0xf6, 0xe3, 0x0a, 0x5b, 0x6f, 0x9c, 0x7a, 0xfe, + 0x5d, 0x39, 0xbf, 0x7f, 0x33, 0x73, 0xfd, 0x0b, 0x8b, 0xf8, 0x17, 0x7f, 0x19, 0xf2, 0xf6, 0x4b, + 0xf3, 0xa4, 0xd3, 0xd7, 0x6d, 0x5b, 0x37, 0xba, 0xe5, 0xfc, 0x1a, 0xaa, 0x66, 0xe4, 0x9c, 0x43, + 0x7b, 0xcc, 0x49, 0x53, 0x43, 0xf0, 0x3d, 0x04, 0x17, 0x43, 0x21, 0xb0, 0x07, 0xa6, 0x61, 0x13, + 0xfc, 0x3e, 0x64, 0xdd, 0x7c, 0xb1, 0xcb, 0x68, 0x2d, 0x51, 0xcd, 0xad, 0x4b, 0xf3, 0x8d, 0x96, + 0x47, 0x9b, 0xf0, 0x0d, 0x58, 0x35, 0xc8, 0xc7, 0xb4, 0xe3, 0x73, 0x38, 0x8f, 0x57, 0xc1, 0x21, + 0xef, 0xbb, 0x4e, 0x97, 0xfe, 0x87, 0xe0, 0xe2, 0xa6, 0x45, 0x14, 0x4a, 0xc2, 0x38, 0x3f, 0x17, + 0x0e, 0xae, 0x41, 0xce, 0xd5, 0xc5, 0x61, 0x49, 0x30, 0x16, 0x70, 0x49, 0xbb, 0x1a, 0x7e, 0x0f, + 0x32, 0xee, 0x8a, 0x21, 0x21, 0x9a, 0x81, 0xde, 0x1e, 0x0f, 0x11, 0xa9, 0xc5, 0x11, 0x21, 0xfd, + 0x2e, 0x0e, 0x17, 0x3f, 0x18, 0x68, 0x13, 0x6c, 0xf6, 0x6b, 0x85, 0x96, 0xd0, 0x6a, 0x07, 0x72, + 0x43, 0x26, 0xb8, 0xb3, 0x44, 0x39, 0x00, 0xbe, 0xd5, 0xf9, 0xf6, 0xcc, 0x4b, 0x2c, 0x01, 0xf8, + 0xa7, 0x50, 0x52, 0x87, 0x96, 0x13, 0xab, 0x4e, 0xc8, 0xc5, 0x33, 0xe4, 0xec, 0x5b, 0x44, 0x35, + 0x0d, 0x4d, 0x77, 0xe2, 0x27, 0xaf, 0x8a, 0xfd, 0xae, 0x70, 0xe9, 0x3b, 0x70, 0x71, 0x8b, 0xf4, + 0xc8, 0xb8, 0xc3, 0x26, 0x15, 0xc3, 0x49, 0xe7, 0xc7, 0xcf, 0x77, 0xfe, 0xbf, 0xe3, 0x50, 0xde, + 0x50, 0xa8, 0xfa, 0xd2, 0x57, 0x8f, 0xbd, 0x82, 0x55, 0x81, 0x8c, 0xa6, 0x50, 0xe5, 0x48, 0xb1, + 0x5d, 0x3d, 0xbc, 0x35, 0xbe, 0xea, 0x4f, 0xa4, 0xf8, 0x5a, 0xc2, 0xa9, 0x38, 0xa3, 0x24, 0x39, + 0x8f, 0x97, 0x43, 0x65, 0x25, 0x39, 0xa9, 0xac, 0x3c, 0x77, 0x92, 0xf0, 0xa4, 0xe3, 0xe7, 0xe3, + 0x78, 0x7d, 0x67, 0xfa, 0x51, 0x87, 0x23, 0xe6, 0xbd, 0x01, 0xbb, 0x81, 0xdb, 0x31, 0xb9, 0x68, + 0x90, 0x93, 0xc3, 0x69, 0xf5, 0x6a, 0xe5, 0xb5, 0xdc, 0x07, 0xff, 0x40, 0x70, 0x79, 0x82, 0x8b, + 0x45, 0x41, 0x6a, 0x42, 0xea, 0x85, 0x39, 0x34, 0xb4, 0xe8, 0x59, 0xd1, 0x8e, 0xc9, 0x7c, 0x0b, + 0xae, 0xc0, 0x8a, 0x5b, 0x1c, 0x59, 0xa9, 0x68, 0xc7, 0x64, 0x97, 0x80, 0xd7, 0x26, 0x5c, 0x7e, + 0x41, 0x1f, 0xde, 0xf5, 0x9b, 0x9a, 0x9c, 0x67, 0xaa, 0xcf, 0xd0, 0x0c, 0xa4, 0x2d, 0x62, 0x0f, + 0x7b, 0x54, 0x3a, 0x83, 0x37, 0x37, 0x48, 0x57, 0x37, 0x7c, 0x1e, 0x8c, 0x82, 0x9d, 0x6d, 0x58, + 0x31, 0x79, 0x04, 0x04, 0x7c, 0x17, 0x8a, 0x9a, 0xec, 0x6e, 0x96, 0xbe, 0x01, 0xe5, 0xf1, 0xe3, + 0x85, 0x5f, 0x43, 0xf6, 0xa3, 0x31, 0xfb, 0xa5, 0x1f, 0x20, 0x28, 0x6c, 0x9a, 0xfd, 0xbe, 0x4e, + 0xa3, 0xe8, 0x7c, 0x17, 0xd2, 0xac, 0x33, 0xe2, 0x60, 0xcf, 0xad, 0x5f, 0x9b, 0xae, 0xf2, 0x73, + 0x87, 0x4f, 0x16, 0xec, 0xf3, 0x03, 0x21, 0xfd, 0x0c, 0x41, 0xd1, 0x55, 0x44, 0x68, 0xff, 0x10, + 0x0a, 0x6c, 0x7b, 0x87, 0x3b, 0xda, 0xbd, 0xaa, 0xae, 0xcf, 0x3b, 0x94, 0x71, 0xcb, 0xf9, 0x93, + 0xd1, 0xc2, 0xc6, 0xf7, 0x20, 0xa7, 0x32, 0xe9, 0x3c, 0xd2, 0xf1, 0xb9, 0x91, 0x06, 0xce, 0xee, + 0x10, 0xa4, 0x3d, 0x58, 0x95, 0xcd, 0x5e, 0xef, 0x48, 0x51, 0x8f, 0xa3, 0x78, 0x29, 0x64, 0x6c, + 0x7c, 0xdc, 0xd8, 0x7f, 0xc5, 0x61, 0x55, 0x1e, 0x1a, 0x4f, 0x9d, 0x5e, 0x74, 0xde, 0x85, 0xf8, + 0x0c, 0x4a, 0x36, 0xb5, 0x86, 0x2a, 0x1d, 0x5a, 0x44, 0xeb, 0xb0, 0xf6, 0x55, 0xa8, 0xff, 0xd5, + 0xe9, 0x8e, 0x38, 0xf0, 0x76, 0xb0, 0x33, 0xda, 0x31, 0x79, 0xd5, 0x0e, 0x92, 0xc2, 0x15, 0x26, + 0xc5, 0x2a, 0x0c, 0x9a, 0x5b, 0x61, 0xd2, 0x4b, 0x54, 0x18, 0x74, 0xde, 0x0a, 0x83, 0x7c, 0x89, + 0x97, 0x07, 0x60, 0x4e, 0xe8, 0xd0, 0xd3, 0xc1, 0xf4, 0x7a, 0xf3, 0x77, 0x04, 0xa5, 0x91, 0x87, + 0x27, 0xa7, 0xc3, 0x78, 0x60, 0xce, 0x7d, 0x43, 0x07, 0xca, 0x49, 0x22, 0x7a, 0x39, 0xc1, 0x37, + 0x61, 0xd5, 0x3e, 0xd6, 0x07, 0x03, 0xa2, 0x79, 0x68, 0x4f, 0xb2, 0xfe, 0xb6, 0x28, 0xc8, 0x02, + 0xc8, 0xd2, 0xe7, 0x71, 0xc8, 0x0b, 0x98, 0xcf, 0x47, 0xe2, 0x15, 0xc8, 0xda, 0xd4, 0x22, 0x4a, + 0x7f, 0xd4, 0x48, 0x65, 0x38, 0x61, 0x57, 0xf3, 0x25, 0x73, 0x62, 0xb1, 0x64, 0x76, 0x7a, 0x52, + 0x2e, 0x75, 0xd4, 0x6a, 0xe7, 0xe5, 0x1c, 0xa7, 0xf1, 0x66, 0xfb, 0x21, 0xa4, 0x7b, 0xca, 0x11, + 0xe9, 0xd9, 0xe5, 0x14, 0x93, 0xbd, 0x3e, 0x37, 0x67, 0x99, 0x31, 0xf5, 0x47, 0x6c, 0x53, 0xcb, + 0xa0, 0xd6, 0xa9, 0x2c, 0x24, 0x54, 0xde, 0x85, 0x9c, 0x8f, 0x8c, 0x4b, 0x90, 0x38, 0x26, 0xa7, + 0xc2, 0x54, 0xe7, 0x13, 0x5f, 0x80, 0xd4, 0x2b, 0xa5, 0x37, 0x24, 0xc2, 0x42, 0xbe, 0x68, 0xc6, + 0xbf, 0x8e, 0x9c, 0x5b, 0xa7, 0xe0, 0xd6, 0x04, 0x0e, 0x81, 0x80, 0x47, 0x50, 0xc8, 0x23, 0x61, + 0xc3, 0xe2, 0x93, 0x0c, 0x0b, 0xd5, 0xa4, 0xc4, 0x6b, 0xab, 0x49, 0xc9, 0x85, 0x6a, 0xd2, 0x6f, + 0xe3, 0x50, 0x78, 0xc4, 0x60, 0x1f, 0x05, 0x08, 0x0f, 0x00, 0x14, 0x4d, 0xeb, 0x50, 0xc5, 0xea, + 0x12, 0xb7, 0x5d, 0x5a, 0x9b, 0x91, 0xc3, 0x8c, 0xaf, 0x1d, 0x93, 0xb3, 0x8a, 0xa6, 0xf1, 0x05, + 0xbe, 0x0e, 0x05, 0x8b, 0xf4, 0xcd, 0x57, 0xc4, 0x95, 0xc2, 0xde, 0x5f, 0xed, 0x98, 0x9c, 0xe7, + 0x64, 0xc1, 0xf6, 0x2d, 0x2f, 0xf2, 0x49, 0xe6, 0x99, 0x3b, 0xd3, 0x4f, 0x09, 0xa8, 0xff, 0x9a, + 0x43, 0xbf, 0xb1, 0x0a, 0x05, 0xae, 0x67, 0x47, 0x7d, 0xa9, 0x18, 0x5d, 0x22, 0xfd, 0x3e, 0x01, + 0x45, 0xf7, 0x44, 0x01, 0x86, 0xc7, 0x21, 0x9e, 0xf9, 0x7d, 0x24, 0x37, 0x72, 0x93, 0x71, 0x3b, + 0xa6, 0x53, 0xdf, 0x1a, 0x1f, 0xc0, 0xaa, 0xf7, 0x2a, 0x11, 0x02, 0x79, 0x09, 0xa8, 0xce, 0xaf, + 0x21, 0x9e, 0xc8, 0xa2, 0x16, 0xa0, 0x04, 0x84, 0x6a, 0xac, 0x49, 0x16, 0x40, 0x89, 0x20, 0x94, + 0x37, 0xd5, 0x7e, 0xa1, 0x9c, 0x12, 0x10, 0xca, 0xa3, 0x27, 0xea, 0x7a, 0x04, 0xa1, 0x32, 0xe3, + 0xf7, 0x0b, 0xe5, 0x14, 0xbc, 0x09, 0xe9, 0x17, 0x7a, 0x8f, 0x12, 0x4b, 0x74, 0xa1, 0x33, 0xae, + 0xa7, 0xd6, 0xc7, 0xbc, 0x60, 0x93, 0x6d, 0xb6, 0xa1, 0x1d, 0x93, 0xc5, 0x56, 0x27, 0x6c, 0x96, + 0x08, 0x0f, 0x2b, 0xf0, 0xd2, 0x8f, 0x92, 0x90, 0x16, 0xd0, 0xda, 0x82, 0x94, 0xff, 0xfa, 0x7b, + 0x67, 0x5e, 0x98, 0xea, 0xac, 0xfa, 0x7b, 0x58, 0xe6, 0x9b, 0xf1, 0xbe, 0xbf, 0x67, 0xe7, 0xf1, + 0xb9, 0x35, 0x57, 0x92, 0xd7, 0xb2, 0x8e, 0x32, 0x63, 0xd4, 0xe7, 0xbf, 0x0d, 0x79, 0xa7, 0x1a, + 0xf4, 0xfd, 0xa3, 0x07, 0x76, 0x95, 0x72, 0x2a, 0x2f, 0x1c, 0x81, 0x1b, 0x2f, 0xb7, 0xc8, 0x8d, + 0xe7, 0xd4, 0x2c, 0x01, 0x53, 0x5d, 0x63, 0xbe, 0x4d, 0xc9, 0x19, 0x4e, 0xd8, 0xd5, 0x9c, 0x27, + 0x92, 0x69, 0xa8, 0x3c, 0x7e, 0x19, 0x99, 0x7d, 0x57, 0x1a, 0xb0, 0x1a, 0x52, 0x78, 0xf6, 0x4b, + 0xa5, 0xf2, 0x43, 0x04, 0x39, 0x9f, 0xb3, 0xbe, 0xe8, 0x5e, 0x24, 0x74, 0xa7, 0x17, 0x20, 0x27, + 0xec, 0x75, 0x97, 0xae, 0x7b, 0x1d, 0x40, 0xfc, 0x37, 0x0e, 0x79, 0x7f, 0x1a, 0x62, 0x02, 0x38, + 0x90, 0xc5, 0x8c, 0x8d, 0x29, 0x5e, 0x5c, 0xbf, 0x1b, 0x2d, 0x95, 0x03, 0x8b, 0xc3, 0xd3, 0x01, + 0x91, 0x4b, 0x34, 0x44, 0xc1, 0x6f, 0x01, 0x78, 0x51, 0xe0, 0x2e, 0x4c, 0xc9, 0x59, 0x37, 0x0c, + 0x36, 0xae, 0x42, 0x4a, 0x55, 0x86, 0xb6, 0x9b, 0xf2, 0xd8, 0x3d, 0xd8, 0x1a, 0xa8, 0xf5, 0x03, + 0x36, 0xf7, 0x93, 0x39, 0x83, 0x73, 0xcb, 0x8c, 0xc3, 0x25, 0x08, 0x96, 0x40, 0x1b, 0x91, 0x8e, + 0xde, 0x46, 0x48, 0x4f, 0xa0, 0x14, 0x36, 0x05, 0x17, 0x20, 0xfb, 0x64, 0xaf, 0xb3, 0xd9, 0x7e, + 0xf0, 0x64, 0xa7, 0x55, 0x8a, 0xe1, 0x15, 0x48, 0x3c, 0xd8, 0xda, 0x2a, 0x21, 0x0c, 0x90, 0x96, + 0x5b, 0x8f, 0xf7, 0x9e, 0xb5, 0x4a, 0x71, 0x9c, 0x83, 0x95, 0xcd, 0x0f, 0x64, 0xb9, 0xf5, 0xe4, + 0xb0, 0x94, 0xc0, 0x59, 0x48, 0xc9, 0xad, 0x83, 0xd6, 0x61, 0x29, 0x29, 0x19, 0x50, 0x76, 0x6a, + 0xe6, 0xa6, 0x6f, 0xf8, 0x32, 0x77, 0x92, 0x17, 0x18, 0xd2, 0xc5, 0x67, 0x0e, 0xe9, 0x12, 0xa1, + 0x21, 0x9d, 0xf4, 0x21, 0x5c, 0x9e, 0x70, 0x9e, 0x28, 0xd7, 0xd7, 0xa1, 0x18, 0x18, 0x0d, 0xf1, + 0x07, 0x41, 0x56, 0x2e, 0xf8, 0x67, 0x43, 0x91, 0x67, 0x53, 0xeb, 0xbf, 0xc1, 0x90, 0xdd, 0x76, + 0x61, 0x81, 0x7f, 0x81, 0x20, 0xe7, 0x7b, 0x9b, 0xe2, 0x19, 0xd5, 0x65, 0x7c, 0x6a, 0x5b, 0x89, + 0xd0, 0x25, 0x4a, 0xf7, 0xbf, 0xff, 0xb7, 0xff, 0xfc, 0x38, 0x7e, 0x17, 0x7f, 0xcd, 0x9b, 0x32, + 0x7f, 0x6a, 0x28, 0x7d, 0x72, 0x7f, 0x60, 0x99, 0x1f, 0x12, 0x95, 0xda, 0x8d, 0x5a, 0xc3, 0xbd, + 0xaf, 0xd9, 0xb7, 0x9b, 0x9d, 0x8d, 0x5a, 0xa3, 0x56, 0x3b, 0xc3, 0x7f, 0x42, 0xfc, 0xc2, 0xf7, + 0x32, 0x1b, 0xd7, 0x67, 0x5f, 0xad, 0xe1, 0x49, 0x46, 0xa5, 0x11, 0x99, 0x9f, 0x3b, 0x5c, 0xda, + 0x63, 0x1a, 0xef, 0xe2, 0x9d, 0x91, 0xc6, 0x3c, 0xc6, 0x11, 0x75, 0x6e, 0x7c, 0x1a, 0x88, 0xd7, + 0x19, 0xfe, 0xa3, 0xf3, 0xc8, 0x0b, 0x8c, 0x03, 0xf1, 0x0c, 0xa5, 0x26, 0x0e, 0x0e, 0x23, 0xb9, + 0xfa, 0x39, 0x53, 0xfc, 0xa9, 0xd4, 0x5a, 0x42, 0xf1, 0x71, 0xb5, 0x9b, 0xa3, 0xfe, 0xfe, 0x0f, + 0x08, 0x8a, 0xc1, 0xd9, 0xde, 0x2c, 0x03, 0x26, 0x4e, 0x01, 0x23, 0x19, 0xb0, 0xcf, 0x0c, 0x78, + 0xb8, 0xfe, 0xde, 0xc8, 0x00, 0xef, 0x1f, 0x8e, 0x05, 0x40, 0xe3, 0xd3, 0xfc, 0xa7, 0x08, 0x8a, + 0xc1, 0x21, 0xdb, 0x2c, 0xcd, 0x27, 0x8e, 0xe3, 0x2a, 0x97, 0xc6, 0x6a, 0x50, 0xab, 0x3f, 0xa0, + 0xa7, 0x2e, 0xb2, 0x6b, 0x4b, 0x22, 0xfb, 0xcf, 0x08, 0xde, 0x18, 0x9b, 0x0d, 0xe1, 0x19, 0x4f, + 0x86, 0x69, 0xb3, 0xba, 0xca, 0x9d, 0x85, 0xf6, 0x08, 0x94, 0xb7, 0x99, 0xf6, 0x1b, 0xd2, 0x7d, + 0x9f, 0xaf, 0x85, 0xb6, 0x53, 0x2c, 0x38, 0x1b, 0x99, 0xd0, 0x3c, 0x12, 0x72, 0x9b, 0xa8, 0x76, + 0x0b, 0xe1, 0xbf, 0x20, 0x28, 0x85, 0xa7, 0x31, 0xf8, 0xf6, 0x0c, 0xad, 0x26, 0x0f, 0x8e, 0x2a, + 0xeb, 0x8b, 0x6c, 0x11, 0x76, 0x08, 0xcc, 0xf8, 0x41, 0xbf, 0x88, 0x1d, 0x21, 0xb1, 0x4d, 0x54, + 0xc3, 0x9f, 0x21, 0x48, 0xf3, 0x99, 0x0c, 0xbe, 0x39, 0x23, 0x4d, 0xfd, 0xe3, 0xa3, 0x4a, 0x75, + 0x3e, 0xa3, 0xd0, 0x77, 0x9b, 0xe9, 0xfb, 0xbe, 0x74, 0x6f, 0x29, 0x7d, 0xf9, 0x53, 0xc8, 0xd1, + 0xf2, 0x27, 0x08, 0x32, 0xee, 0x78, 0x06, 0xcf, 0xe8, 0x43, 0x42, 0x23, 0x9c, 0xa9, 0x68, 0x3e, + 0x1f, 0x1e, 0x2c, 0x71, 0x8a, 0xa3, 0xd9, 0xaf, 0x1d, 0xcd, 0xc4, 0x10, 0x62, 0xa6, 0x66, 0xc1, + 0x51, 0x50, 0xa5, 0x16, 0x85, 0x75, 0xba, 0x17, 0x23, 0x96, 0xba, 0xb3, 0xa6, 0x25, 0x84, 0x71, + 0xec, 0x7e, 0x86, 0x20, 0xc5, 0x1e, 0xab, 0xf8, 0x46, 0xb4, 0xd7, 0x7a, 0xe5, 0xe6, 0xfc, 0x57, + 0x2f, 0x57, 0xb2, 0xc5, 0x94, 0xfc, 0xa6, 0xd4, 0x5c, 0xca, 0xa5, 0xec, 0xd1, 0xdc, 0x44, 0xb5, + 0x2a, 0xba, 0x85, 0xf0, 0xaf, 0x10, 0xa4, 0xf9, 0x33, 0x6e, 0x16, 0x26, 0x03, 0x4f, 0xcb, 0x59, + 0x98, 0x0c, 0xbe, 0x08, 0xcf, 0x89, 0xc9, 0x1e, 0x13, 0xe6, 0x6a, 0xfa, 0x57, 0x04, 0x6f, 0x8c, + 0x35, 0x33, 0xb3, 0xca, 0xda, 0xb4, 0x4e, 0xab, 0x72, 0x67, 0xa1, 0x3d, 0xc1, 0xcb, 0x5b, 0xda, + 0x5a, 0x02, 0x18, 0xbd, 0xb0, 0xd4, 0x26, 0xaa, 0x6d, 0xfc, 0x1c, 0xc1, 0x55, 0xd5, 0xec, 0x4f, + 0xd5, 0x65, 0xa3, 0xe8, 0x75, 0x53, 0xfb, 0x4e, 0x46, 0xed, 0xa3, 0x6f, 0x3f, 0x10, 0xbc, 0x5d, + 0xb3, 0xa7, 0x18, 0xdd, 0xba, 0x69, 0x75, 0x1b, 0x5d, 0x62, 0xb0, 0x7c, 0x6b, 0xf0, 0x9f, 0x94, + 0x81, 0x6e, 0x8f, 0xff, 0xff, 0x7e, 0xcf, 0xa3, 0xfc, 0x32, 0x9e, 0xdc, 0xd9, 0xdc, 0x3e, 0xf8, + 0x3c, 0xfe, 0xa5, 0x1d, 0x2e, 0x6a, 0xb3, 0x67, 0x0e, 0xb5, 0xba, 0x77, 0x52, 0xfd, 0xd9, 0xed, + 0x0d, 0x67, 0xc7, 0x51, 0x9a, 0x49, 0xbd, 0xf3, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x4b, + 0x4d, 0x2f, 0xac, 0x20, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/query.pb.go b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/query.pb.go new file mode 100644 index 000000000..d39da6d8f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/query.pb.go @@ -0,0 +1,782 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/firestore/v1beta1/query.proto + +package firestore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A sort direction. +type StructuredQuery_Direction int32 + +const ( + // Unspecified. + StructuredQuery_DIRECTION_UNSPECIFIED StructuredQuery_Direction = 0 + // Ascending. + StructuredQuery_ASCENDING StructuredQuery_Direction = 1 + // Descending. + StructuredQuery_DESCENDING StructuredQuery_Direction = 2 +) + +var StructuredQuery_Direction_name = map[int32]string{ + 0: "DIRECTION_UNSPECIFIED", + 1: "ASCENDING", + 2: "DESCENDING", +} +var StructuredQuery_Direction_value = map[string]int32{ + "DIRECTION_UNSPECIFIED": 0, + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x StructuredQuery_Direction) String() string { + return proto.EnumName(StructuredQuery_Direction_name, int32(x)) +} +func (StructuredQuery_Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +// A composite filter operator. +type StructuredQuery_CompositeFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + StructuredQuery_CompositeFilter_OPERATOR_UNSPECIFIED StructuredQuery_CompositeFilter_Operator = 0 + // The results are required to satisfy each of the combined filters. + StructuredQuery_CompositeFilter_AND StructuredQuery_CompositeFilter_Operator = 1 +) + +var StructuredQuery_CompositeFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "AND", +} +var StructuredQuery_CompositeFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "AND": 1, +} + +func (x StructuredQuery_CompositeFilter_Operator) String() string { + return proto.EnumName(StructuredQuery_CompositeFilter_Operator_name, int32(x)) +} +func (StructuredQuery_CompositeFilter_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 2, 0} +} + +// A field filter operator. +type StructuredQuery_FieldFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + StructuredQuery_FieldFilter_OPERATOR_UNSPECIFIED StructuredQuery_FieldFilter_Operator = 0 + // Less than. Requires that the field come first in `order_by`. + StructuredQuery_FieldFilter_LESS_THAN StructuredQuery_FieldFilter_Operator = 1 + // Less than or equal. Requires that the field come first in `order_by`. + StructuredQuery_FieldFilter_LESS_THAN_OR_EQUAL StructuredQuery_FieldFilter_Operator = 2 + // Greater than. Requires that the field come first in `order_by`. + StructuredQuery_FieldFilter_GREATER_THAN StructuredQuery_FieldFilter_Operator = 3 + // Greater than or equal. Requires that the field come first in + // `order_by`. + StructuredQuery_FieldFilter_GREATER_THAN_OR_EQUAL StructuredQuery_FieldFilter_Operator = 4 + // Equal. + StructuredQuery_FieldFilter_EQUAL StructuredQuery_FieldFilter_Operator = 5 +) + +var StructuredQuery_FieldFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", +} +var StructuredQuery_FieldFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, +} + +func (x StructuredQuery_FieldFilter_Operator) String() string { + return proto.EnumName(StructuredQuery_FieldFilter_Operator_name, int32(x)) +} +func (StructuredQuery_FieldFilter_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 3, 0} +} + +// A unary operator. +type StructuredQuery_UnaryFilter_Operator int32 + +const ( + // Unspecified. This value must not be used. + StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED StructuredQuery_UnaryFilter_Operator = 0 + // Test if a field is equal to NaN. + StructuredQuery_UnaryFilter_IS_NAN StructuredQuery_UnaryFilter_Operator = 2 + // Test if an exprestion evaluates to Null. + StructuredQuery_UnaryFilter_IS_NULL StructuredQuery_UnaryFilter_Operator = 3 +) + +var StructuredQuery_UnaryFilter_Operator_name = map[int32]string{ + 0: "OPERATOR_UNSPECIFIED", + 2: "IS_NAN", + 3: "IS_NULL", +} +var StructuredQuery_UnaryFilter_Operator_value = map[string]int32{ + "OPERATOR_UNSPECIFIED": 0, + "IS_NAN": 2, + "IS_NULL": 3, +} + +func (x StructuredQuery_UnaryFilter_Operator) String() string { + return proto.EnumName(StructuredQuery_UnaryFilter_Operator_name, int32(x)) +} +func (StructuredQuery_UnaryFilter_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 4, 0} +} + +// A Firestore query. +type StructuredQuery struct { + // The projection to return. + Select *StructuredQuery_Projection `protobuf:"bytes,1,opt,name=select" json:"select,omitempty"` + // The collections to query. + From []*StructuredQuery_CollectionSelector `protobuf:"bytes,2,rep,name=from" json:"from,omitempty"` + // The filter to apply. + Where *StructuredQuery_Filter `protobuf:"bytes,3,opt,name=where" json:"where,omitempty"` + // The order to apply to the query results. + // + // Firestore guarantees a stable ordering through the following rules: + // + // * Any field required to appear in `order_by`, that is not already + // specified in `order_by`, is appended to the order in field name order + // by default. + // * If an order on `__name__` is not specified, it is appended by default. + // + // Fields are appended with the same sort direction as the last order + // specified, or 'ASCENDING' if no order was specified. For example: + // + // * `SELECT * FROM Foo ORDER BY A` becomes + // `SELECT * FROM Foo ORDER BY A, __name__` + // * `SELECT * FROM Foo ORDER BY A DESC` becomes + // `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC` + // * `SELECT * FROM Foo WHERE A > 1` becomes + // `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__` + OrderBy []*StructuredQuery_Order `protobuf:"bytes,4,rep,name=order_by,json=orderBy" json:"order_by,omitempty"` + // A starting point for the query results. + StartAt *Cursor `protobuf:"bytes,7,opt,name=start_at,json=startAt" json:"start_at,omitempty"` + // A end point for the query results. + EndAt *Cursor `protobuf:"bytes,8,opt,name=end_at,json=endAt" json:"end_at,omitempty"` + // The number of results to skip. + // + // Applies before limit, but after all other constraints. Must be >= 0 if + // specified. + Offset int32 `protobuf:"varint,6,opt,name=offset" json:"offset,omitempty"` + // The maximum number of results to return. + // + // Applies after all other constraints. + // Must be >= 0 if specified. + Limit *google_protobuf3.Int32Value `protobuf:"bytes,5,opt,name=limit" json:"limit,omitempty"` +} + +func (m *StructuredQuery) Reset() { *m = StructuredQuery{} } +func (m *StructuredQuery) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery) ProtoMessage() {} +func (*StructuredQuery) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *StructuredQuery) GetSelect() *StructuredQuery_Projection { + if m != nil { + return m.Select + } + return nil +} + +func (m *StructuredQuery) GetFrom() []*StructuredQuery_CollectionSelector { + if m != nil { + return m.From + } + return nil +} + +func (m *StructuredQuery) GetWhere() *StructuredQuery_Filter { + if m != nil { + return m.Where + } + return nil +} + +func (m *StructuredQuery) GetOrderBy() []*StructuredQuery_Order { + if m != nil { + return m.OrderBy + } + return nil +} + +func (m *StructuredQuery) GetStartAt() *Cursor { + if m != nil { + return m.StartAt + } + return nil +} + +func (m *StructuredQuery) GetEndAt() *Cursor { + if m != nil { + return m.EndAt + } + return nil +} + +func (m *StructuredQuery) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *StructuredQuery) GetLimit() *google_protobuf3.Int32Value { + if m != nil { + return m.Limit + } + return nil +} + +// A selection of a collection, such as `messages as m1`. +type StructuredQuery_CollectionSelector struct { + // The collection ID. + // When set, selects only collections with this ID. + CollectionId string `protobuf:"bytes,2,opt,name=collection_id,json=collectionId" json:"collection_id,omitempty"` + // When false, selects only collections that are immediate children of + // the `parent` specified in the containing `RunQueryRequest`. + // When true, selects all descendant collections. + AllDescendants bool `protobuf:"varint,3,opt,name=all_descendants,json=allDescendants" json:"all_descendants,omitempty"` +} + +func (m *StructuredQuery_CollectionSelector) Reset() { *m = StructuredQuery_CollectionSelector{} } +func (m *StructuredQuery_CollectionSelector) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_CollectionSelector) ProtoMessage() {} +func (*StructuredQuery_CollectionSelector) Descriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 0} +} + +func (m *StructuredQuery_CollectionSelector) GetCollectionId() string { + if m != nil { + return m.CollectionId + } + return "" +} + +func (m *StructuredQuery_CollectionSelector) GetAllDescendants() bool { + if m != nil { + return m.AllDescendants + } + return false +} + +// A filter. +type StructuredQuery_Filter struct { + // The type of filter. + // + // Types that are valid to be assigned to FilterType: + // *StructuredQuery_Filter_CompositeFilter + // *StructuredQuery_Filter_FieldFilter + // *StructuredQuery_Filter_UnaryFilter + FilterType isStructuredQuery_Filter_FilterType `protobuf_oneof:"filter_type"` +} + +func (m *StructuredQuery_Filter) Reset() { *m = StructuredQuery_Filter{} } +func (m *StructuredQuery_Filter) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_Filter) ProtoMessage() {} +func (*StructuredQuery_Filter) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} } + +type isStructuredQuery_Filter_FilterType interface { + isStructuredQuery_Filter_FilterType() +} + +type StructuredQuery_Filter_CompositeFilter struct { + CompositeFilter *StructuredQuery_CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter,json=compositeFilter,oneof"` +} +type StructuredQuery_Filter_FieldFilter struct { + FieldFilter *StructuredQuery_FieldFilter `protobuf:"bytes,2,opt,name=field_filter,json=fieldFilter,oneof"` +} +type StructuredQuery_Filter_UnaryFilter struct { + UnaryFilter *StructuredQuery_UnaryFilter `protobuf:"bytes,3,opt,name=unary_filter,json=unaryFilter,oneof"` +} + +func (*StructuredQuery_Filter_CompositeFilter) isStructuredQuery_Filter_FilterType() {} +func (*StructuredQuery_Filter_FieldFilter) isStructuredQuery_Filter_FilterType() {} +func (*StructuredQuery_Filter_UnaryFilter) isStructuredQuery_Filter_FilterType() {} + +func (m *StructuredQuery_Filter) GetFilterType() isStructuredQuery_Filter_FilterType { + if m != nil { + return m.FilterType + } + return nil +} + +func (m *StructuredQuery_Filter) GetCompositeFilter() *StructuredQuery_CompositeFilter { + if x, ok := m.GetFilterType().(*StructuredQuery_Filter_CompositeFilter); ok { + return x.CompositeFilter + } + return nil +} + +func (m *StructuredQuery_Filter) GetFieldFilter() *StructuredQuery_FieldFilter { + if x, ok := m.GetFilterType().(*StructuredQuery_Filter_FieldFilter); ok { + return x.FieldFilter + } + return nil +} + +func (m *StructuredQuery_Filter) GetUnaryFilter() *StructuredQuery_UnaryFilter { + if x, ok := m.GetFilterType().(*StructuredQuery_Filter_UnaryFilter); ok { + return x.UnaryFilter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StructuredQuery_Filter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StructuredQuery_Filter_OneofMarshaler, _StructuredQuery_Filter_OneofUnmarshaler, _StructuredQuery_Filter_OneofSizer, []interface{}{ + (*StructuredQuery_Filter_CompositeFilter)(nil), + (*StructuredQuery_Filter_FieldFilter)(nil), + (*StructuredQuery_Filter_UnaryFilter)(nil), + } +} + +func _StructuredQuery_Filter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StructuredQuery_Filter) + // filter_type + switch x := m.FilterType.(type) { + case *StructuredQuery_Filter_CompositeFilter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CompositeFilter); err != nil { + return err + } + case *StructuredQuery_Filter_FieldFilter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FieldFilter); err != nil { + return err + } + case *StructuredQuery_Filter_UnaryFilter: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UnaryFilter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StructuredQuery_Filter.FilterType has unexpected type %T", x) + } + return nil +} + +func _StructuredQuery_Filter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StructuredQuery_Filter) + switch tag { + case 1: // filter_type.composite_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery_CompositeFilter) + err := b.DecodeMessage(msg) + m.FilterType = &StructuredQuery_Filter_CompositeFilter{msg} + return true, err + case 2: // filter_type.field_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery_FieldFilter) + err := b.DecodeMessage(msg) + m.FilterType = &StructuredQuery_Filter_FieldFilter{msg} + return true, err + case 3: // filter_type.unary_filter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery_UnaryFilter) + err := b.DecodeMessage(msg) + m.FilterType = &StructuredQuery_Filter_UnaryFilter{msg} + return true, err + default: + return false, nil + } +} + +func _StructuredQuery_Filter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StructuredQuery_Filter) + // filter_type + switch x := m.FilterType.(type) { + case *StructuredQuery_Filter_CompositeFilter: + s := proto.Size(x.CompositeFilter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StructuredQuery_Filter_FieldFilter: + s := proto.Size(x.FieldFilter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StructuredQuery_Filter_UnaryFilter: + s := proto.Size(x.UnaryFilter) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A filter that merges multiple other filters using the given operator. +type StructuredQuery_CompositeFilter struct { + // The operator for combining multiple filters. + Op StructuredQuery_CompositeFilter_Operator `protobuf:"varint,1,opt,name=op,enum=google.firestore.v1beta1.StructuredQuery_CompositeFilter_Operator" json:"op,omitempty"` + // The list of filters to combine. + // Must contain at least one filter. + Filters []*StructuredQuery_Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"` +} + +func (m *StructuredQuery_CompositeFilter) Reset() { *m = StructuredQuery_CompositeFilter{} } +func (m *StructuredQuery_CompositeFilter) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_CompositeFilter) ProtoMessage() {} +func (*StructuredQuery_CompositeFilter) Descriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 2} +} + +func (m *StructuredQuery_CompositeFilter) GetOp() StructuredQuery_CompositeFilter_Operator { + if m != nil { + return m.Op + } + return StructuredQuery_CompositeFilter_OPERATOR_UNSPECIFIED +} + +func (m *StructuredQuery_CompositeFilter) GetFilters() []*StructuredQuery_Filter { + if m != nil { + return m.Filters + } + return nil +} + +// A filter on a specific field. +type StructuredQuery_FieldFilter struct { + // The field to filter by. + Field *StructuredQuery_FieldReference `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + // The operator to filter by. + Op StructuredQuery_FieldFilter_Operator `protobuf:"varint,2,opt,name=op,enum=google.firestore.v1beta1.StructuredQuery_FieldFilter_Operator" json:"op,omitempty"` + // The value to compare to. + Value *Value `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *StructuredQuery_FieldFilter) Reset() { *m = StructuredQuery_FieldFilter{} } +func (m *StructuredQuery_FieldFilter) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_FieldFilter) ProtoMessage() {} +func (*StructuredQuery_FieldFilter) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 3} } + +func (m *StructuredQuery_FieldFilter) GetField() *StructuredQuery_FieldReference { + if m != nil { + return m.Field + } + return nil +} + +func (m *StructuredQuery_FieldFilter) GetOp() StructuredQuery_FieldFilter_Operator { + if m != nil { + return m.Op + } + return StructuredQuery_FieldFilter_OPERATOR_UNSPECIFIED +} + +func (m *StructuredQuery_FieldFilter) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A filter with a single operand. +type StructuredQuery_UnaryFilter struct { + // The unary operator to apply. + Op StructuredQuery_UnaryFilter_Operator `protobuf:"varint,1,opt,name=op,enum=google.firestore.v1beta1.StructuredQuery_UnaryFilter_Operator" json:"op,omitempty"` + // The argument to the filter. + // + // Types that are valid to be assigned to OperandType: + // *StructuredQuery_UnaryFilter_Field + OperandType isStructuredQuery_UnaryFilter_OperandType `protobuf_oneof:"operand_type"` +} + +func (m *StructuredQuery_UnaryFilter) Reset() { *m = StructuredQuery_UnaryFilter{} } +func (m *StructuredQuery_UnaryFilter) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_UnaryFilter) ProtoMessage() {} +func (*StructuredQuery_UnaryFilter) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 4} } + +type isStructuredQuery_UnaryFilter_OperandType interface { + isStructuredQuery_UnaryFilter_OperandType() +} + +type StructuredQuery_UnaryFilter_Field struct { + Field *StructuredQuery_FieldReference `protobuf:"bytes,2,opt,name=field,oneof"` +} + +func (*StructuredQuery_UnaryFilter_Field) isStructuredQuery_UnaryFilter_OperandType() {} + +func (m *StructuredQuery_UnaryFilter) GetOperandType() isStructuredQuery_UnaryFilter_OperandType { + if m != nil { + return m.OperandType + } + return nil +} + +func (m *StructuredQuery_UnaryFilter) GetOp() StructuredQuery_UnaryFilter_Operator { + if m != nil { + return m.Op + } + return StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED +} + +func (m *StructuredQuery_UnaryFilter) GetField() *StructuredQuery_FieldReference { + if x, ok := m.GetOperandType().(*StructuredQuery_UnaryFilter_Field); ok { + return x.Field + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StructuredQuery_UnaryFilter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StructuredQuery_UnaryFilter_OneofMarshaler, _StructuredQuery_UnaryFilter_OneofUnmarshaler, _StructuredQuery_UnaryFilter_OneofSizer, []interface{}{ + (*StructuredQuery_UnaryFilter_Field)(nil), + } +} + +func _StructuredQuery_UnaryFilter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StructuredQuery_UnaryFilter) + // operand_type + switch x := m.OperandType.(type) { + case *StructuredQuery_UnaryFilter_Field: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Field); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StructuredQuery_UnaryFilter.OperandType has unexpected type %T", x) + } + return nil +} + +func _StructuredQuery_UnaryFilter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StructuredQuery_UnaryFilter) + switch tag { + case 2: // operand_type.field + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StructuredQuery_FieldReference) + err := b.DecodeMessage(msg) + m.OperandType = &StructuredQuery_UnaryFilter_Field{msg} + return true, err + default: + return false, nil + } +} + +func _StructuredQuery_UnaryFilter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StructuredQuery_UnaryFilter) + // operand_type + switch x := m.OperandType.(type) { + case *StructuredQuery_UnaryFilter_Field: + s := proto.Size(x.Field) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An order on a field. +type StructuredQuery_Order struct { + // The field to order by. + Field *StructuredQuery_FieldReference `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + // The direction to order by. Defaults to `ASCENDING`. + Direction StructuredQuery_Direction `protobuf:"varint,2,opt,name=direction,enum=google.firestore.v1beta1.StructuredQuery_Direction" json:"direction,omitempty"` +} + +func (m *StructuredQuery_Order) Reset() { *m = StructuredQuery_Order{} } +func (m *StructuredQuery_Order) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_Order) ProtoMessage() {} +func (*StructuredQuery_Order) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 5} } + +func (m *StructuredQuery_Order) GetField() *StructuredQuery_FieldReference { + if m != nil { + return m.Field + } + return nil +} + +func (m *StructuredQuery_Order) GetDirection() StructuredQuery_Direction { + if m != nil { + return m.Direction + } + return StructuredQuery_DIRECTION_UNSPECIFIED +} + +// A reference to a field, such as `max(messages.time) as max_time`. +type StructuredQuery_FieldReference struct { + FieldPath string `protobuf:"bytes,2,opt,name=field_path,json=fieldPath" json:"field_path,omitempty"` +} + +func (m *StructuredQuery_FieldReference) Reset() { *m = StructuredQuery_FieldReference{} } +func (m *StructuredQuery_FieldReference) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_FieldReference) ProtoMessage() {} +func (*StructuredQuery_FieldReference) Descriptor() ([]byte, []int) { + return fileDescriptor3, []int{0, 6} +} + +func (m *StructuredQuery_FieldReference) GetFieldPath() string { + if m != nil { + return m.FieldPath + } + return "" +} + +// The projection of document's fields to return. +type StructuredQuery_Projection struct { + // The fields to return. + // + // If empty, all fields are returned. To only return the name + // of the document, use `['__name__']`. + Fields []*StructuredQuery_FieldReference `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` +} + +func (m *StructuredQuery_Projection) Reset() { *m = StructuredQuery_Projection{} } +func (m *StructuredQuery_Projection) String() string { return proto.CompactTextString(m) } +func (*StructuredQuery_Projection) ProtoMessage() {} +func (*StructuredQuery_Projection) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 7} } + +func (m *StructuredQuery_Projection) GetFields() []*StructuredQuery_FieldReference { + if m != nil { + return m.Fields + } + return nil +} + +// A position in a query result set. +type Cursor struct { + // The values that represent a position, in the order they appear in + // the order by clause of a query. + // + // Can contain fewer values than specified in the order by clause. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + // If the position is just before or just after the given values, relative + // to the sort order defined by the query. + Before bool `protobuf:"varint,2,opt,name=before" json:"before,omitempty"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} +func (*Cursor) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *Cursor) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (m *Cursor) GetBefore() bool { + if m != nil { + return m.Before + } + return false +} + +func init() { + proto.RegisterType((*StructuredQuery)(nil), "google.firestore.v1beta1.StructuredQuery") + proto.RegisterType((*StructuredQuery_CollectionSelector)(nil), "google.firestore.v1beta1.StructuredQuery.CollectionSelector") + proto.RegisterType((*StructuredQuery_Filter)(nil), "google.firestore.v1beta1.StructuredQuery.Filter") + proto.RegisterType((*StructuredQuery_CompositeFilter)(nil), "google.firestore.v1beta1.StructuredQuery.CompositeFilter") + proto.RegisterType((*StructuredQuery_FieldFilter)(nil), "google.firestore.v1beta1.StructuredQuery.FieldFilter") + proto.RegisterType((*StructuredQuery_UnaryFilter)(nil), "google.firestore.v1beta1.StructuredQuery.UnaryFilter") + proto.RegisterType((*StructuredQuery_Order)(nil), "google.firestore.v1beta1.StructuredQuery.Order") + proto.RegisterType((*StructuredQuery_FieldReference)(nil), "google.firestore.v1beta1.StructuredQuery.FieldReference") + proto.RegisterType((*StructuredQuery_Projection)(nil), "google.firestore.v1beta1.StructuredQuery.Projection") + proto.RegisterType((*Cursor)(nil), "google.firestore.v1beta1.Cursor") + proto.RegisterEnum("google.firestore.v1beta1.StructuredQuery_Direction", StructuredQuery_Direction_name, StructuredQuery_Direction_value) + proto.RegisterEnum("google.firestore.v1beta1.StructuredQuery_CompositeFilter_Operator", StructuredQuery_CompositeFilter_Operator_name, StructuredQuery_CompositeFilter_Operator_value) + proto.RegisterEnum("google.firestore.v1beta1.StructuredQuery_FieldFilter_Operator", StructuredQuery_FieldFilter_Operator_name, StructuredQuery_FieldFilter_Operator_value) + proto.RegisterEnum("google.firestore.v1beta1.StructuredQuery_UnaryFilter_Operator", StructuredQuery_UnaryFilter_Operator_name, StructuredQuery_UnaryFilter_Operator_value) +} + +func init() { proto.RegisterFile("google/firestore/v1beta1/query.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 956 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xc7, 0x6b, 0xa7, 0xce, 0xc7, 0x49, 0x3f, 0xac, 0x11, 0xac, 0x4c, 0x58, 0x96, 0x2a, 0x20, + 0x6d, 0x6f, 0x70, 0x68, 0xcb, 0x0a, 0xd0, 0x02, 0x92, 0x93, 0xb8, 0x6d, 0x56, 0x95, 0x93, 0x4e, + 0xda, 0x95, 0xd8, 0x0b, 0x2c, 0xc7, 0x1e, 0xa7, 0x46, 0xae, 0xc7, 0x8c, 0xc7, 0xbb, 0xea, 0x35, + 0x0f, 0xc1, 0x3d, 0x97, 0xfb, 0x02, 0xbc, 0x0e, 0xd7, 0x3c, 0x02, 0x17, 0x08, 0x79, 0x3c, 0x49, + 0xda, 0xad, 0x22, 0x92, 0xb2, 0x77, 0x39, 0xc7, 0xe7, 0xfc, 0xe6, 0xf8, 0x7f, 0xce, 0xf1, 0x04, + 0x3e, 0x9f, 0x52, 0x3a, 0x8d, 0x49, 0x27, 0x8c, 0x18, 0xc9, 0x38, 0x65, 0xa4, 0xf3, 0xfa, 0x60, + 0x42, 0xb8, 0x77, 0xd0, 0xf9, 0x25, 0x27, 0xec, 0xc6, 0x4c, 0x19, 0xe5, 0x14, 0x19, 0x65, 0x94, + 0x39, 0x8f, 0x32, 0x65, 0x54, 0xeb, 0xb1, 0xcc, 0xf7, 0xd2, 0xa8, 0xe3, 0x25, 0x09, 0xe5, 0x1e, + 0x8f, 0x68, 0x92, 0x95, 0x79, 0xad, 0xa7, 0x4b, 0xe9, 0x01, 0xf5, 0xf3, 0x6b, 0x92, 0x70, 0x19, + 0xf8, 0x44, 0x06, 0x0a, 0x6b, 0x92, 0x87, 0x9d, 0x37, 0xcc, 0x4b, 0x53, 0xc2, 0x24, 0xa8, 0xfd, + 0xb7, 0x0e, 0xbb, 0x63, 0xce, 0x72, 0x9f, 0xe7, 0x8c, 0x04, 0xe7, 0x45, 0x69, 0xe8, 0x0c, 0xaa, + 0x19, 0x89, 0x89, 0xcf, 0x0d, 0x65, 0x4f, 0xd9, 0x6f, 0x1e, 0x7e, 0x65, 0x2e, 0xab, 0xd2, 0x7c, + 0x27, 0xd5, 0x1c, 0x31, 0xfa, 0x33, 0xf1, 0x8b, 0x4a, 0xb1, 0x64, 0xa0, 0x11, 0x6c, 0x86, 0x8c, + 0x5e, 0x1b, 0xea, 0x5e, 0x65, 0xbf, 0x79, 0xf8, 0xdd, 0xea, 0xac, 0x1e, 0x8d, 0xe3, 0x92, 0x35, + 0x16, 0x24, 0xca, 0xb0, 0x20, 0xa1, 0x63, 0xd0, 0xde, 0x5c, 0x11, 0x46, 0x8c, 0x8a, 0x28, 0xef, + 0xcb, 0xd5, 0x91, 0xc7, 0x51, 0xcc, 0x09, 0xc3, 0x65, 0x3a, 0x7a, 0x01, 0x75, 0xca, 0x02, 0xc2, + 0xdc, 0xc9, 0x8d, 0xb1, 0x29, 0xaa, 0xeb, 0xac, 0x8e, 0x1a, 0x16, 0x99, 0xb8, 0x26, 0x00, 0xdd, + 0x1b, 0xf4, 0x1c, 0xea, 0x19, 0xf7, 0x18, 0x77, 0x3d, 0x6e, 0xd4, 0x44, 0x59, 0x7b, 0xcb, 0x59, + 0xbd, 0x9c, 0x65, 0x94, 0xe1, 0x9a, 0xc8, 0xb0, 0x38, 0xfa, 0x1a, 0xaa, 0x24, 0x09, 0x8a, 0xd4, + 0xfa, 0x8a, 0xa9, 0x1a, 0x49, 0x02, 0x8b, 0xa3, 0x47, 0x50, 0xa5, 0x61, 0x98, 0x11, 0x6e, 0x54, + 0xf7, 0x94, 0x7d, 0x0d, 0x4b, 0x0b, 0x1d, 0x80, 0x16, 0x47, 0xd7, 0x11, 0x37, 0x34, 0xc1, 0xfb, + 0x78, 0xc6, 0x9b, 0x4d, 0x81, 0x39, 0x48, 0xf8, 0xd1, 0xe1, 0x4b, 0x2f, 0xce, 0x09, 0x2e, 0x23, + 0x5b, 0x13, 0x40, 0xf7, 0x05, 0x47, 0x9f, 0xc1, 0xb6, 0x3f, 0xf7, 0xba, 0x51, 0x60, 0xa8, 0x7b, + 0xca, 0x7e, 0x03, 0x6f, 0x2d, 0x9c, 0x83, 0x00, 0x3d, 0x85, 0x5d, 0x2f, 0x8e, 0xdd, 0x80, 0x64, + 0x3e, 0x49, 0x02, 0x2f, 0xe1, 0x99, 0xe8, 0x4c, 0x1d, 0xef, 0x78, 0x71, 0xdc, 0x5f, 0x78, 0x5b, + 0x7f, 0xa8, 0x50, 0x2d, 0x5b, 0x80, 0x42, 0xd0, 0x7d, 0x7a, 0x9d, 0xd2, 0x2c, 0xe2, 0xc4, 0x0d, + 0x85, 0x4f, 0x4e, 0xdb, 0xb7, 0xeb, 0x4c, 0x88, 0x24, 0x94, 0xd0, 0xd3, 0x0d, 0xbc, 0xeb, 0xdf, + 0x75, 0xa1, 0x57, 0xb0, 0x15, 0x46, 0x24, 0x0e, 0x66, 0x67, 0xa8, 0xe2, 0x8c, 0x67, 0xeb, 0x8c, + 0x0c, 0x89, 0x83, 0x39, 0xbf, 0x19, 0x2e, 0xcc, 0x82, 0x9d, 0x27, 0x1e, 0xbb, 0x99, 0xb1, 0x2b, + 0xeb, 0xb2, 0x2f, 0x8b, 0xec, 0x05, 0x3b, 0x5f, 0x98, 0xdd, 0x6d, 0x68, 0x96, 0x54, 0x97, 0xdf, + 0xa4, 0xa4, 0xf5, 0xa7, 0x02, 0xbb, 0xef, 0xbc, 0x2d, 0xc2, 0xa0, 0xd2, 0x54, 0x88, 0xb6, 0x73, + 0xd8, 0x7d, 0xb0, 0x68, 0xe6, 0x30, 0x25, 0xcc, 0x2b, 0x96, 0x4b, 0xa5, 0x29, 0x7a, 0x01, 0xb5, + 0xf2, 0xd8, 0x4c, 0xee, 0xeb, 0xfa, 0xcb, 0x35, 0x03, 0xb4, 0xbf, 0x80, 0xfa, 0x8c, 0x8d, 0x0c, + 0xf8, 0x60, 0x38, 0xb2, 0xb1, 0x75, 0x31, 0xc4, 0xee, 0xa5, 0x33, 0x1e, 0xd9, 0xbd, 0xc1, 0xf1, + 0xc0, 0xee, 0xeb, 0x1b, 0xa8, 0x06, 0x15, 0xcb, 0xe9, 0xeb, 0x4a, 0xeb, 0x2f, 0x15, 0x9a, 0xb7, + 0xc4, 0x46, 0x0e, 0x68, 0x42, 0x6c, 0x39, 0x16, 0xdf, 0xac, 0xd9, 0x32, 0x4c, 0x42, 0xc2, 0x48, + 0xe2, 0x13, 0x5c, 0x62, 0x90, 0x23, 0xe4, 0x52, 0x85, 0x5c, 0x3f, 0x3c, 0xa8, 0xff, 0x77, 0xa5, + 0x7a, 0x06, 0xda, 0xeb, 0x62, 0x81, 0x64, 0xdb, 0x3f, 0x5d, 0x8e, 0x94, 0x7b, 0x26, 0xa2, 0xdb, + 0xbf, 0x2a, 0x2b, 0xc9, 0xb2, 0x0d, 0x8d, 0x33, 0x7b, 0x3c, 0x76, 0x2f, 0x4e, 0x2d, 0x47, 0x57, + 0xd0, 0x23, 0x40, 0x73, 0xd3, 0x1d, 0x62, 0xd7, 0x3e, 0xbf, 0xb4, 0xce, 0x74, 0x15, 0xe9, 0xb0, + 0x75, 0x82, 0x6d, 0xeb, 0xc2, 0xc6, 0x65, 0x64, 0x05, 0x7d, 0x04, 0x1f, 0xde, 0xf6, 0x2c, 0x82, + 0x37, 0x51, 0x03, 0xb4, 0xf2, 0xa7, 0xd6, 0xfa, 0x47, 0x81, 0xe6, 0xad, 0xe9, 0x93, 0xe2, 0x28, + 0xeb, 0x8a, 0x73, 0x0b, 0x71, 0x57, 0x9c, 0xd1, 0xac, 0x79, 0xea, 0xff, 0x6b, 0xde, 0xe9, 0x86, + 0x6c, 0x5f, 0xfb, 0xfb, 0x95, 0x64, 0x03, 0xa8, 0x0e, 0xc6, 0xae, 0x63, 0x39, 0xba, 0x8a, 0x9a, + 0x50, 0x2b, 0x7e, 0x5f, 0x9e, 0x9d, 0xe9, 0x95, 0xee, 0x0e, 0x6c, 0xd1, 0x22, 0x3d, 0x09, 0xca, + 0x85, 0x7a, 0xab, 0x80, 0x26, 0x3e, 0xe1, 0xef, 0x7d, 0xce, 0xce, 0xa1, 0x11, 0x44, 0xac, 0xfc, + 0x38, 0xca, 0x71, 0x3b, 0x5a, 0x9d, 0xd9, 0x9f, 0xa5, 0xe2, 0x05, 0xa5, 0xd5, 0x81, 0x9d, 0xbb, + 0x67, 0xa1, 0x4f, 0x00, 0xca, 0xcf, 0x5a, 0xea, 0xf1, 0x2b, 0xf9, 0x51, 0x6e, 0x08, 0xcf, 0xc8, + 0xe3, 0x57, 0xad, 0x9f, 0x00, 0x16, 0x37, 0x31, 0x1a, 0x41, 0x55, 0x3c, 0x9a, 0xed, 0xf4, 0xc3, + 0x5f, 0x51, 0x72, 0xda, 0x36, 0x34, 0xe6, 0x85, 0x16, 0x13, 0xd7, 0x1f, 0x60, 0xbb, 0x77, 0x31, + 0x18, 0x3a, 0xf7, 0xa7, 0xd8, 0x1a, 0xf7, 0x6c, 0xa7, 0x3f, 0x70, 0x4e, 0x74, 0x05, 0xed, 0x00, + 0xf4, 0xed, 0xb9, 0xad, 0xb6, 0x7f, 0x84, 0x6a, 0x79, 0x9f, 0x15, 0x37, 0xa0, 0x58, 0x8f, 0xcc, + 0x50, 0x44, 0x89, 0xff, 0xb9, 0x4d, 0x32, 0xbc, 0xb8, 0x01, 0x27, 0x24, 0xa4, 0x8c, 0x08, 0x11, + 0xea, 0x58, 0x5a, 0xdd, 0xdf, 0x14, 0x78, 0xec, 0xd3, 0xeb, 0xa5, 0x98, 0x2e, 0x88, 0x17, 0x1c, + 0x15, 0x17, 0xe2, 0x48, 0x79, 0x65, 0xc9, 0xb8, 0x29, 0x8d, 0xbd, 0x64, 0x6a, 0x52, 0x36, 0xed, + 0x4c, 0x49, 0x22, 0xae, 0xcb, 0x4e, 0xf9, 0xc8, 0x4b, 0xa3, 0xec, 0xfe, 0xdf, 0xad, 0xe7, 0x73, + 0xcf, 0xef, 0xea, 0xe6, 0x49, 0xef, 0x78, 0xfc, 0x56, 0x7d, 0x72, 0x52, 0xa2, 0x7a, 0x31, 0xcd, + 0x03, 0xf3, 0x78, 0x7e, 0xf0, 0xcb, 0x83, 0x6e, 0x91, 0x31, 0xa9, 0x0a, 0xea, 0xd1, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x1e, 0x07, 0x56, 0x55, 0x21, 0x0a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/write.pb.go b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/write.pb.go new file mode 100644 index 000000000..0b287d5b3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/firestore/v1beta1/write.pb.go @@ -0,0 +1,609 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/firestore/v1beta1/write.proto + +package firestore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A value that is calculated by the server. +type DocumentTransform_FieldTransform_ServerValue int32 + +const ( + // Unspecified. This value must not be used. + DocumentTransform_FieldTransform_SERVER_VALUE_UNSPECIFIED DocumentTransform_FieldTransform_ServerValue = 0 + // The time at which the server processed the request. + DocumentTransform_FieldTransform_REQUEST_TIME DocumentTransform_FieldTransform_ServerValue = 1 +) + +var DocumentTransform_FieldTransform_ServerValue_name = map[int32]string{ + 0: "SERVER_VALUE_UNSPECIFIED", + 1: "REQUEST_TIME", +} +var DocumentTransform_FieldTransform_ServerValue_value = map[string]int32{ + "SERVER_VALUE_UNSPECIFIED": 0, + "REQUEST_TIME": 1, +} + +func (x DocumentTransform_FieldTransform_ServerValue) String() string { + return proto.EnumName(DocumentTransform_FieldTransform_ServerValue_name, int32(x)) +} +func (DocumentTransform_FieldTransform_ServerValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor4, []int{1, 0, 0} +} + +// A write on a document. +type Write struct { + // The operation to execute. + // + // Types that are valid to be assigned to Operation: + // *Write_Update + // *Write_Delete + // *Write_Transform + Operation isWrite_Operation `protobuf_oneof:"operation"` + // The fields to update in this write. + // + // This field can be set only when the operation is `update`. + // None of the field paths in the mask may contain a reserved name. + // If the document exists on the server and has fields not referenced in the + // mask, they are left unchanged. + // Fields referenced in the mask, but not present in the input document, are + // deleted from the document on the server. + // The field paths in this mask must not contain a reserved field name. + UpdateMask *DocumentMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // An optional precondition on the document. + // + // The write will fail if this is set and not met by the target document. + CurrentDocument *Precondition `protobuf:"bytes,4,opt,name=current_document,json=currentDocument" json:"current_document,omitempty"` +} + +func (m *Write) Reset() { *m = Write{} } +func (m *Write) String() string { return proto.CompactTextString(m) } +func (*Write) ProtoMessage() {} +func (*Write) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +type isWrite_Operation interface { + isWrite_Operation() +} + +type Write_Update struct { + Update *Document `protobuf:"bytes,1,opt,name=update,oneof"` +} +type Write_Delete struct { + Delete string `protobuf:"bytes,2,opt,name=delete,oneof"` +} +type Write_Transform struct { + Transform *DocumentTransform `protobuf:"bytes,6,opt,name=transform,oneof"` +} + +func (*Write_Update) isWrite_Operation() {} +func (*Write_Delete) isWrite_Operation() {} +func (*Write_Transform) isWrite_Operation() {} + +func (m *Write) GetOperation() isWrite_Operation { + if m != nil { + return m.Operation + } + return nil +} + +func (m *Write) GetUpdate() *Document { + if x, ok := m.GetOperation().(*Write_Update); ok { + return x.Update + } + return nil +} + +func (m *Write) GetDelete() string { + if x, ok := m.GetOperation().(*Write_Delete); ok { + return x.Delete + } + return "" +} + +func (m *Write) GetTransform() *DocumentTransform { + if x, ok := m.GetOperation().(*Write_Transform); ok { + return x.Transform + } + return nil +} + +func (m *Write) GetUpdateMask() *DocumentMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *Write) GetCurrentDocument() *Precondition { + if m != nil { + return m.CurrentDocument + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Write) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Write_OneofMarshaler, _Write_OneofUnmarshaler, _Write_OneofSizer, []interface{}{ + (*Write_Update)(nil), + (*Write_Delete)(nil), + (*Write_Transform)(nil), + } +} + +func _Write_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Write) + // operation + switch x := m.Operation.(type) { + case *Write_Update: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Write_Delete: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *Write_Transform: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Transform); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Write.Operation has unexpected type %T", x) + } + return nil +} + +func _Write_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Write) + switch tag { + case 1: // operation.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Document) + err := b.DecodeMessage(msg) + m.Operation = &Write_Update{msg} + return true, err + case 2: // operation.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Operation = &Write_Delete{x} + return true, err + case 6: // operation.transform + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DocumentTransform) + err := b.DecodeMessage(msg) + m.Operation = &Write_Transform{msg} + return true, err + default: + return false, nil + } +} + +func _Write_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Write) + // operation + switch x := m.Operation.(type) { + case *Write_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Write_Delete: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *Write_Transform: + s := proto.Size(x.Transform) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A transformation of a document. +type DocumentTransform struct { + // The name of the document to transform. + Document string `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // The list of transformations to apply to the fields of the document, in + // order. + FieldTransforms []*DocumentTransform_FieldTransform `protobuf:"bytes,2,rep,name=field_transforms,json=fieldTransforms" json:"field_transforms,omitempty"` +} + +func (m *DocumentTransform) Reset() { *m = DocumentTransform{} } +func (m *DocumentTransform) String() string { return proto.CompactTextString(m) } +func (*DocumentTransform) ProtoMessage() {} +func (*DocumentTransform) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *DocumentTransform) GetDocument() string { + if m != nil { + return m.Document + } + return "" +} + +func (m *DocumentTransform) GetFieldTransforms() []*DocumentTransform_FieldTransform { + if m != nil { + return m.FieldTransforms + } + return nil +} + +// A transformation of a field of the document. +type DocumentTransform_FieldTransform struct { + // The path of the field. See [Document.fields][google.firestore.v1beta1.Document.fields] for the field path syntax + // reference. + FieldPath string `protobuf:"bytes,1,opt,name=field_path,json=fieldPath" json:"field_path,omitempty"` + // The transformation to apply on the field. + // + // Types that are valid to be assigned to TransformType: + // *DocumentTransform_FieldTransform_SetToServerValue + TransformType isDocumentTransform_FieldTransform_TransformType `protobuf_oneof:"transform_type"` +} + +func (m *DocumentTransform_FieldTransform) Reset() { *m = DocumentTransform_FieldTransform{} } +func (m *DocumentTransform_FieldTransform) String() string { return proto.CompactTextString(m) } +func (*DocumentTransform_FieldTransform) ProtoMessage() {} +func (*DocumentTransform_FieldTransform) Descriptor() ([]byte, []int) { + return fileDescriptor4, []int{1, 0} +} + +type isDocumentTransform_FieldTransform_TransformType interface { + isDocumentTransform_FieldTransform_TransformType() +} + +type DocumentTransform_FieldTransform_SetToServerValue struct { + SetToServerValue DocumentTransform_FieldTransform_ServerValue `protobuf:"varint,2,opt,name=set_to_server_value,json=setToServerValue,enum=google.firestore.v1beta1.DocumentTransform_FieldTransform_ServerValue,oneof"` +} + +func (*DocumentTransform_FieldTransform_SetToServerValue) isDocumentTransform_FieldTransform_TransformType() { +} + +func (m *DocumentTransform_FieldTransform) GetTransformType() isDocumentTransform_FieldTransform_TransformType { + if m != nil { + return m.TransformType + } + return nil +} + +func (m *DocumentTransform_FieldTransform) GetFieldPath() string { + if m != nil { + return m.FieldPath + } + return "" +} + +func (m *DocumentTransform_FieldTransform) GetSetToServerValue() DocumentTransform_FieldTransform_ServerValue { + if x, ok := m.GetTransformType().(*DocumentTransform_FieldTransform_SetToServerValue); ok { + return x.SetToServerValue + } + return DocumentTransform_FieldTransform_SERVER_VALUE_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DocumentTransform_FieldTransform) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DocumentTransform_FieldTransform_OneofMarshaler, _DocumentTransform_FieldTransform_OneofUnmarshaler, _DocumentTransform_FieldTransform_OneofSizer, []interface{}{ + (*DocumentTransform_FieldTransform_SetToServerValue)(nil), + } +} + +func _DocumentTransform_FieldTransform_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DocumentTransform_FieldTransform) + // transform_type + switch x := m.TransformType.(type) { + case *DocumentTransform_FieldTransform_SetToServerValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.SetToServerValue)) + case nil: + default: + return fmt.Errorf("DocumentTransform_FieldTransform.TransformType has unexpected type %T", x) + } + return nil +} + +func _DocumentTransform_FieldTransform_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DocumentTransform_FieldTransform) + switch tag { + case 2: // transform_type.set_to_server_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TransformType = &DocumentTransform_FieldTransform_SetToServerValue{DocumentTransform_FieldTransform_ServerValue(x)} + return true, err + default: + return false, nil + } +} + +func _DocumentTransform_FieldTransform_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DocumentTransform_FieldTransform) + // transform_type + switch x := m.TransformType.(type) { + case *DocumentTransform_FieldTransform_SetToServerValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.SetToServerValue)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The result of applying a write. +type WriteResult struct { + // The last update time of the document after applying the write. Not set + // after a `delete`. + // + // If the write did not actually change the document, this will be the + // previous update_time. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1beta1.DocumentTransform.FieldTransform], in the + // same order. + TransformResults []*Value `protobuf:"bytes,2,rep,name=transform_results,json=transformResults" json:"transform_results,omitempty"` +} + +func (m *WriteResult) Reset() { *m = WriteResult{} } +func (m *WriteResult) String() string { return proto.CompactTextString(m) } +func (*WriteResult) ProtoMessage() {} +func (*WriteResult) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +func (m *WriteResult) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *WriteResult) GetTransformResults() []*Value { + if m != nil { + return m.TransformResults + } + return nil +} + +// A [Document][google.firestore.v1beta1.Document] has changed. +// +// May be the result of multiple [writes][google.firestore.v1beta1.Write], including deletes, that +// ultimately resulted in a new value for the [Document][google.firestore.v1beta1.Document]. +// +// Multiple [DocumentChange][google.firestore.v1beta1.DocumentChange] messages may be returned for the same logical +// change, if multiple targets are affected. +type DocumentChange struct { + // The new state of the [Document][google.firestore.v1beta1.Document]. + // + // If `mask` is set, contains only fields that were updated or added. + Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // A set of target IDs of targets that match this document. + TargetIds []int32 `protobuf:"varint,5,rep,packed,name=target_ids,json=targetIds" json:"target_ids,omitempty"` + // A set of target IDs for targets that no longer match this document. + RemovedTargetIds []int32 `protobuf:"varint,6,rep,packed,name=removed_target_ids,json=removedTargetIds" json:"removed_target_ids,omitempty"` +} + +func (m *DocumentChange) Reset() { *m = DocumentChange{} } +func (m *DocumentChange) String() string { return proto.CompactTextString(m) } +func (*DocumentChange) ProtoMessage() {} +func (*DocumentChange) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *DocumentChange) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *DocumentChange) GetTargetIds() []int32 { + if m != nil { + return m.TargetIds + } + return nil +} + +func (m *DocumentChange) GetRemovedTargetIds() []int32 { + if m != nil { + return m.RemovedTargetIds + } + return nil +} + +// A [Document][google.firestore.v1beta1.Document] has been deleted. +// +// May be the result of multiple [writes][google.firestore.v1beta1.Write], including updates, the +// last of which deleted the [Document][google.firestore.v1beta1.Document]. +// +// Multiple [DocumentDelete][google.firestore.v1beta1.DocumentDelete] messages may be returned for the same logical +// delete, if multiple targets are affected. +type DocumentDelete struct { + // The resource name of the [Document][google.firestore.v1beta1.Document] that was deleted. + Document string `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // A set of target IDs for targets that previously matched this entity. + RemovedTargetIds []int32 `protobuf:"varint,6,rep,packed,name=removed_target_ids,json=removedTargetIds" json:"removed_target_ids,omitempty"` + // The read timestamp at which the delete was observed. + // + // Greater or equal to the `commit_time` of the delete. + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime" json:"read_time,omitempty"` +} + +func (m *DocumentDelete) Reset() { *m = DocumentDelete{} } +func (m *DocumentDelete) String() string { return proto.CompactTextString(m) } +func (*DocumentDelete) ProtoMessage() {} +func (*DocumentDelete) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } + +func (m *DocumentDelete) GetDocument() string { + if m != nil { + return m.Document + } + return "" +} + +func (m *DocumentDelete) GetRemovedTargetIds() []int32 { + if m != nil { + return m.RemovedTargetIds + } + return nil +} + +func (m *DocumentDelete) GetReadTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ReadTime + } + return nil +} + +// A [Document][google.firestore.v1beta1.Document] has been removed from the view of the targets. +// +// Sent if the document is no longer relevant to a target and is out of view. +// Can be sent instead of a DocumentDelete or a DocumentChange if the server +// can not send the new value of the document. +// +// Multiple [DocumentRemove][google.firestore.v1beta1.DocumentRemove] messages may be returned for the same logical +// write or delete, if multiple targets are affected. +type DocumentRemove struct { + // The resource name of the [Document][google.firestore.v1beta1.Document] that has gone out of view. + Document string `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"` + // A set of target IDs for targets that previously matched this document. + RemovedTargetIds []int32 `protobuf:"varint,2,rep,packed,name=removed_target_ids,json=removedTargetIds" json:"removed_target_ids,omitempty"` + // The read timestamp at which the remove was observed. + // + // Greater or equal to the `commit_time` of the change/delete/remove. + ReadTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime" json:"read_time,omitempty"` +} + +func (m *DocumentRemove) Reset() { *m = DocumentRemove{} } +func (m *DocumentRemove) String() string { return proto.CompactTextString(m) } +func (*DocumentRemove) ProtoMessage() {} +func (*DocumentRemove) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } + +func (m *DocumentRemove) GetDocument() string { + if m != nil { + return m.Document + } + return "" +} + +func (m *DocumentRemove) GetRemovedTargetIds() []int32 { + if m != nil { + return m.RemovedTargetIds + } + return nil +} + +func (m *DocumentRemove) GetReadTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ReadTime + } + return nil +} + +// A digest of all the documents that match a given target. +type ExistenceFilter struct { + // The target ID to which this filter applies. + TargetId int32 `protobuf:"varint,1,opt,name=target_id,json=targetId" json:"target_id,omitempty"` + // The total count of documents that match [target_id][google.firestore.v1beta1.ExistenceFilter.target_id]. + // + // If different from the count of documents in the client that match, the + // client must manually determine which documents no longer match the target. + Count int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` +} + +func (m *ExistenceFilter) Reset() { *m = ExistenceFilter{} } +func (m *ExistenceFilter) String() string { return proto.CompactTextString(m) } +func (*ExistenceFilter) ProtoMessage() {} +func (*ExistenceFilter) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } + +func (m *ExistenceFilter) GetTargetId() int32 { + if m != nil { + return m.TargetId + } + return 0 +} + +func (m *ExistenceFilter) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +func init() { + proto.RegisterType((*Write)(nil), "google.firestore.v1beta1.Write") + proto.RegisterType((*DocumentTransform)(nil), "google.firestore.v1beta1.DocumentTransform") + proto.RegisterType((*DocumentTransform_FieldTransform)(nil), "google.firestore.v1beta1.DocumentTransform.FieldTransform") + proto.RegisterType((*WriteResult)(nil), "google.firestore.v1beta1.WriteResult") + proto.RegisterType((*DocumentChange)(nil), "google.firestore.v1beta1.DocumentChange") + proto.RegisterType((*DocumentDelete)(nil), "google.firestore.v1beta1.DocumentDelete") + proto.RegisterType((*DocumentRemove)(nil), "google.firestore.v1beta1.DocumentRemove") + proto.RegisterType((*ExistenceFilter)(nil), "google.firestore.v1beta1.ExistenceFilter") + proto.RegisterEnum("google.firestore.v1beta1.DocumentTransform_FieldTransform_ServerValue", DocumentTransform_FieldTransform_ServerValue_name, DocumentTransform_FieldTransform_ServerValue_value) +} + +func init() { proto.RegisterFile("google/firestore/v1beta1/write.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 739 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xeb, 0x44, + 0x14, 0x8e, 0x93, 0x26, 0xaa, 0x4f, 0x50, 0xea, 0x3b, 0xb0, 0xb0, 0x42, 0x2f, 0x37, 0x8a, 0xf8, + 0x89, 0x04, 0x72, 0xd4, 0xb2, 0x40, 0xa2, 0x80, 0xd4, 0x24, 0x4e, 0x1b, 0xd1, 0xa2, 0x74, 0x92, + 0x06, 0x89, 0x8d, 0x35, 0x8d, 0x27, 0xae, 0x55, 0xdb, 0x63, 0xcd, 0x8c, 0x53, 0x78, 0x0d, 0x58, + 0xc0, 0x86, 0x0d, 0x4b, 0xde, 0x87, 0x87, 0x61, 0x87, 0x3c, 0xfe, 0x69, 0x4a, 0x15, 0x42, 0xab, + 0xbb, 0xf3, 0x39, 0xf3, 0x9d, 0xef, 0x7c, 0xe7, 0x4f, 0x86, 0x0f, 0x3d, 0xc6, 0xbc, 0x80, 0xf6, + 0x57, 0x3e, 0xa7, 0x42, 0x32, 0x4e, 0xfb, 0xeb, 0xa3, 0x1b, 0x2a, 0xc9, 0x51, 0xff, 0x9e, 0xfb, + 0x92, 0x5a, 0x31, 0x67, 0x92, 0x21, 0x33, 0x43, 0x59, 0x25, 0xca, 0xca, 0x51, 0xed, 0xc3, 0x3c, + 0x9e, 0xc4, 0x7e, 0x9f, 0x44, 0x11, 0x93, 0x44, 0xfa, 0x2c, 0x12, 0x59, 0x5c, 0xfb, 0xa3, 0xad, + 0xec, 0x4b, 0x16, 0x86, 0x2c, 0xca, 0x61, 0x9f, 0x6c, 0x85, 0xb9, 0x6c, 0x99, 0x84, 0x34, 0x92, + 0x39, 0xf0, 0x4d, 0x0e, 0x54, 0xd6, 0x4d, 0xb2, 0xea, 0x4b, 0x3f, 0xa4, 0x42, 0x92, 0x30, 0xce, + 0x00, 0xdd, 0xbf, 0xaa, 0x50, 0xff, 0x3e, 0x15, 0x8e, 0xbe, 0x82, 0x46, 0x12, 0xbb, 0x44, 0x52, + 0x53, 0xeb, 0x68, 0xbd, 0xe6, 0x71, 0xd7, 0xda, 0x56, 0x83, 0x35, 0xca, 0x93, 0x9c, 0x57, 0x70, + 0x1e, 0x83, 0x4c, 0x68, 0xb8, 0x34, 0xa0, 0x92, 0x9a, 0xd5, 0x8e, 0xd6, 0xd3, 0xd3, 0x97, 0xcc, + 0x46, 0xdf, 0x82, 0x2e, 0x39, 0x89, 0xc4, 0x8a, 0xf1, 0xd0, 0x6c, 0x28, 0xea, 0x4f, 0x77, 0x53, + 0xcf, 0x8b, 0x90, 0xf3, 0x0a, 0x7e, 0x88, 0x47, 0x67, 0xd0, 0xcc, 0x12, 0x3a, 0x21, 0x11, 0x77, + 0x66, 0x4d, 0xd1, 0x7d, 0xbc, 0x9b, 0xee, 0x92, 0x88, 0x3b, 0x0c, 0x59, 0x68, 0xfa, 0x8d, 0xae, + 0xc0, 0x58, 0x26, 0x9c, 0xd3, 0x48, 0x3a, 0x45, 0xcb, 0xcc, 0xbd, 0x5d, 0x6c, 0x53, 0x4e, 0x97, + 0x2c, 0x72, 0xfd, 0x74, 0x62, 0xf8, 0x20, 0x8f, 0x2f, 0x52, 0x0c, 0x9a, 0xa0, 0xb3, 0x98, 0x72, + 0x35, 0xcf, 0xee, 0xcf, 0x35, 0x78, 0xf5, 0xa4, 0x16, 0xd4, 0x86, 0xfd, 0x32, 0x5b, 0xda, 0x65, + 0x1d, 0x97, 0x36, 0xa2, 0x60, 0xac, 0x7c, 0x1a, 0xb8, 0x4e, 0x59, 0xad, 0x30, 0xab, 0x9d, 0x5a, + 0xaf, 0x79, 0xfc, 0xe5, 0x33, 0xda, 0x65, 0x8d, 0x53, 0x8e, 0xd2, 0xc4, 0x07, 0xab, 0x47, 0xb6, + 0x68, 0xff, 0xad, 0x41, 0xeb, 0x31, 0x06, 0xbd, 0x06, 0xc8, 0x32, 0xc7, 0x44, 0xde, 0xe6, 0xba, + 0x74, 0xe5, 0x99, 0x12, 0x79, 0x8b, 0xee, 0xe1, 0x5d, 0x41, 0xa5, 0x23, 0x99, 0x23, 0x28, 0x5f, + 0x53, 0xee, 0xac, 0x49, 0x90, 0x64, 0x73, 0x6e, 0x1d, 0x8f, 0x5f, 0xae, 0xcd, 0x9a, 0x29, 0xba, + 0x45, 0xca, 0x76, 0x5e, 0xc1, 0x86, 0xa0, 0x72, 0xce, 0x36, 0x7c, 0xdd, 0xaf, 0xa1, 0xb9, 0x61, + 0xa2, 0x43, 0x30, 0x67, 0x36, 0x5e, 0xd8, 0xd8, 0x59, 0x9c, 0x5e, 0x5c, 0xdb, 0xce, 0xf5, 0x77, + 0xb3, 0xa9, 0x3d, 0x9c, 0x8c, 0x27, 0xf6, 0xc8, 0xa8, 0x20, 0x03, 0xde, 0xc1, 0xf6, 0xd5, 0xb5, + 0x3d, 0x9b, 0x3b, 0xf3, 0xc9, 0xa5, 0x6d, 0x68, 0x03, 0x03, 0x5a, 0x65, 0x2b, 0x1d, 0xf9, 0x53, + 0x4c, 0xbb, 0xbf, 0x69, 0xd0, 0x54, 0xcb, 0x8e, 0xa9, 0x48, 0x02, 0x89, 0x4e, 0xca, 0x6d, 0x4a, + 0xcf, 0x22, 0xdf, 0xfb, 0x76, 0x51, 0x51, 0x71, 0x33, 0xd6, 0xbc, 0xb8, 0x99, 0x62, 0x83, 0x52, + 0x07, 0xba, 0x80, 0x57, 0x0f, 0xf4, 0x5c, 0x11, 0x16, 0x03, 0x7b, 0xb3, 0xbd, 0x29, 0xaa, 0x14, + 0x6c, 0x94, 0x91, 0x99, 0x12, 0xd1, 0xfd, 0x5d, 0x83, 0x56, 0xd1, 0xb0, 0xe1, 0x2d, 0x89, 0x3c, + 0x8a, 0xbe, 0xf9, 0xd7, 0xb2, 0xfc, 0xaf, 0x93, 0xdc, 0x58, 0xa8, 0xd7, 0x00, 0x92, 0x70, 0x8f, + 0x4a, 0xc7, 0x77, 0x85, 0x59, 0xef, 0xd4, 0x7a, 0x75, 0xac, 0x67, 0x9e, 0x89, 0x2b, 0xd0, 0x67, + 0x80, 0x38, 0x0d, 0xd9, 0x9a, 0xba, 0xce, 0x06, 0xac, 0xa1, 0x60, 0x46, 0xfe, 0x32, 0x2f, 0xd0, + 0xdd, 0x5f, 0x36, 0xf4, 0x8d, 0xb2, 0xc3, 0xfe, 0xaf, 0x65, 0x7e, 0x16, 0x39, 0xfa, 0x02, 0x74, + 0x4e, 0x89, 0x9b, 0x4d, 0x61, 0x6f, 0xe7, 0x14, 0xf6, 0x53, 0x70, 0x6a, 0x3e, 0x52, 0x85, 0x15, + 0xeb, 0x0b, 0x54, 0x55, 0xdf, 0xb6, 0xaa, 0x11, 0x1c, 0xd8, 0x3f, 0xfa, 0x42, 0xd2, 0x68, 0x49, + 0xc7, 0x7e, 0x20, 0x29, 0x47, 0xef, 0x83, 0x5e, 0x66, 0x54, 0xb2, 0xea, 0x78, 0xbf, 0x18, 0x05, + 0x7a, 0x0f, 0xea, 0x4b, 0x96, 0x44, 0x52, 0x9d, 0x54, 0x1d, 0x67, 0xc6, 0xe0, 0x57, 0x0d, 0x0e, + 0x97, 0x2c, 0xdc, 0x3a, 0xf2, 0x01, 0xa8, 0x55, 0x9e, 0xa6, 0x4a, 0xa6, 0xda, 0x0f, 0xa7, 0x39, + 0xce, 0x63, 0x01, 0x89, 0x3c, 0x8b, 0x71, 0xaf, 0xef, 0xd1, 0x48, 0xe9, 0xec, 0x67, 0x4f, 0x24, + 0xf6, 0xc5, 0xd3, 0x3f, 0xc6, 0x49, 0xe9, 0xf9, 0xa3, 0xba, 0x77, 0x36, 0x1c, 0xcf, 0xfe, 0xac, + 0x7e, 0x70, 0x96, 0x51, 0x0d, 0x03, 0x96, 0xb8, 0xd6, 0xb8, 0x4c, 0xbc, 0x38, 0x1a, 0xa4, 0x11, + 0x37, 0x0d, 0xc5, 0xfa, 0xf9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0x36, 0x10, 0xb5, 0x0b, + 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/annotations.pb.go new file mode 100644 index 000000000..7f2c5348b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/annotations.pb.go @@ -0,0 +1,2227 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/annotations.proto + +/* +Package genomics is a generated protocol buffer package. + +It is generated from these files: + google/genomics/v1/annotations.proto + google/genomics/v1/cigar.proto + google/genomics/v1/datasets.proto + google/genomics/v1/operations.proto + google/genomics/v1/position.proto + google/genomics/v1/range.proto + google/genomics/v1/readalignment.proto + google/genomics/v1/readgroup.proto + google/genomics/v1/readgroupset.proto + google/genomics/v1/reads.proto + google/genomics/v1/references.proto + google/genomics/v1/variants.proto + +It has these top-level messages: + AnnotationSet + Annotation + VariantAnnotation + Transcript + ExternalId + CreateAnnotationSetRequest + GetAnnotationSetRequest + UpdateAnnotationSetRequest + DeleteAnnotationSetRequest + SearchAnnotationSetsRequest + SearchAnnotationSetsResponse + CreateAnnotationRequest + BatchCreateAnnotationsRequest + BatchCreateAnnotationsResponse + GetAnnotationRequest + UpdateAnnotationRequest + DeleteAnnotationRequest + SearchAnnotationsRequest + SearchAnnotationsResponse + CigarUnit + Dataset + ListDatasetsRequest + ListDatasetsResponse + CreateDatasetRequest + UpdateDatasetRequest + DeleteDatasetRequest + UndeleteDatasetRequest + GetDatasetRequest + OperationMetadata + OperationEvent + Position + Range + LinearAlignment + Read + ReadGroup + ReadGroupSet + SearchReadGroupSetsRequest + SearchReadGroupSetsResponse + ImportReadGroupSetsRequest + ImportReadGroupSetsResponse + ExportReadGroupSetRequest + UpdateReadGroupSetRequest + DeleteReadGroupSetRequest + GetReadGroupSetRequest + ListCoverageBucketsRequest + CoverageBucket + ListCoverageBucketsResponse + SearchReadsRequest + SearchReadsResponse + StreamReadsRequest + StreamReadsResponse + Reference + ReferenceSet + SearchReferenceSetsRequest + SearchReferenceSetsResponse + GetReferenceSetRequest + SearchReferencesRequest + SearchReferencesResponse + GetReferenceRequest + ListBasesRequest + ListBasesResponse + VariantSetMetadata + VariantSet + Variant + VariantCall + CallSet + ReferenceBound + ImportVariantsRequest + ImportVariantsResponse + CreateVariantSetRequest + ExportVariantSetRequest + GetVariantSetRequest + SearchVariantSetsRequest + SearchVariantSetsResponse + DeleteVariantSetRequest + UpdateVariantSetRequest + SearchVariantsRequest + SearchVariantsResponse + CreateVariantRequest + UpdateVariantRequest + DeleteVariantRequest + GetVariantRequest + MergeVariantsRequest + SearchCallSetsRequest + SearchCallSetsResponse + CreateCallSetRequest + UpdateCallSetRequest + DeleteCallSetRequest + GetCallSetRequest + StreamVariantsRequest + StreamVariantsResponse +*/ +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// When an [Annotation][google.genomics.v1.Annotation] or +// [AnnotationSet][google.genomics.v1.AnnotationSet] is created, if `type` is +// not specified it will be set to `GENERIC`. +type AnnotationType int32 + +const ( + AnnotationType_ANNOTATION_TYPE_UNSPECIFIED AnnotationType = 0 + // A `GENERIC` annotation type should be used when no other annotation + // type will suffice. This represents an untyped annotation of the reference + // genome. + AnnotationType_GENERIC AnnotationType = 1 + // A `VARIANT` annotation type. + AnnotationType_VARIANT AnnotationType = 2 + // A `GENE` annotation type represents the existence of a gene at the + // associated reference coordinates. The start coordinate is typically the + // gene's transcription start site and the end is typically the end of the + // gene's last exon. + AnnotationType_GENE AnnotationType = 3 + // A `TRANSCRIPT` annotation type represents the assertion that a + // particular region of the reference genome may be transcribed as RNA. + AnnotationType_TRANSCRIPT AnnotationType = 4 +) + +var AnnotationType_name = map[int32]string{ + 0: "ANNOTATION_TYPE_UNSPECIFIED", + 1: "GENERIC", + 2: "VARIANT", + 3: "GENE", + 4: "TRANSCRIPT", +} +var AnnotationType_value = map[string]int32{ + "ANNOTATION_TYPE_UNSPECIFIED": 0, + "GENERIC": 1, + "VARIANT": 2, + "GENE": 3, + "TRANSCRIPT": 4, +} + +func (x AnnotationType) String() string { + return proto.EnumName(AnnotationType_name, int32(x)) +} +func (AnnotationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type VariantAnnotation_Type int32 + +const ( + VariantAnnotation_TYPE_UNSPECIFIED VariantAnnotation_Type = 0 + // `TYPE_OTHER` should be used when no other Type will suffice. + // Further explanation of the variant type may be included in the + // [info][google.genomics.v1.Annotation.info] field. + VariantAnnotation_TYPE_OTHER VariantAnnotation_Type = 1 + // `INSERTION` indicates an insertion. + VariantAnnotation_INSERTION VariantAnnotation_Type = 2 + // `DELETION` indicates a deletion. + VariantAnnotation_DELETION VariantAnnotation_Type = 3 + // `SUBSTITUTION` indicates a block substitution of + // two or more nucleotides. + VariantAnnotation_SUBSTITUTION VariantAnnotation_Type = 4 + // `SNP` indicates a single nucleotide polymorphism. + VariantAnnotation_SNP VariantAnnotation_Type = 5 + // `STRUCTURAL` indicates a large structural variant, + // including chromosomal fusions, inversions, etc. + VariantAnnotation_STRUCTURAL VariantAnnotation_Type = 6 + // `CNV` indicates a variation in copy number. + VariantAnnotation_CNV VariantAnnotation_Type = 7 +) + +var VariantAnnotation_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "TYPE_OTHER", + 2: "INSERTION", + 3: "DELETION", + 4: "SUBSTITUTION", + 5: "SNP", + 6: "STRUCTURAL", + 7: "CNV", +} +var VariantAnnotation_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "TYPE_OTHER": 1, + "INSERTION": 2, + "DELETION": 3, + "SUBSTITUTION": 4, + "SNP": 5, + "STRUCTURAL": 6, + "CNV": 7, +} + +func (x VariantAnnotation_Type) String() string { + return proto.EnumName(VariantAnnotation_Type_name, int32(x)) +} +func (VariantAnnotation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type VariantAnnotation_Effect int32 + +const ( + VariantAnnotation_EFFECT_UNSPECIFIED VariantAnnotation_Effect = 0 + // `EFFECT_OTHER` should be used when no other Effect + // will suffice. + VariantAnnotation_EFFECT_OTHER VariantAnnotation_Effect = 1 + // `FRAMESHIFT` indicates a mutation in which the insertion or + // deletion of nucleotides resulted in a frameshift change. + VariantAnnotation_FRAMESHIFT VariantAnnotation_Effect = 2 + // `FRAME_PRESERVING_INDEL` indicates a mutation in which a + // multiple of three nucleotides has been inserted or deleted, resulting + // in no change to the reading frame of the coding sequence. + VariantAnnotation_FRAME_PRESERVING_INDEL VariantAnnotation_Effect = 3 + // `SYNONYMOUS_SNP` indicates a single nucleotide polymorphism + // mutation that results in no amino acid change. + VariantAnnotation_SYNONYMOUS_SNP VariantAnnotation_Effect = 4 + // `NONSYNONYMOUS_SNP` indicates a single nucleotide + // polymorphism mutation that results in an amino acid change. + VariantAnnotation_NONSYNONYMOUS_SNP VariantAnnotation_Effect = 5 + // `STOP_GAIN` indicates a mutation that leads to the creation + // of a stop codon at the variant site. Frameshift mutations creating + // downstream stop codons do not count as `STOP_GAIN`. + VariantAnnotation_STOP_GAIN VariantAnnotation_Effect = 6 + // `STOP_LOSS` indicates a mutation that eliminates a + // stop codon at the variant site. + VariantAnnotation_STOP_LOSS VariantAnnotation_Effect = 7 + // `SPLICE_SITE_DISRUPTION` indicates that this variant is + // found in a splice site for the associated transcript, and alters the + // normal splicing pattern. + VariantAnnotation_SPLICE_SITE_DISRUPTION VariantAnnotation_Effect = 8 +) + +var VariantAnnotation_Effect_name = map[int32]string{ + 0: "EFFECT_UNSPECIFIED", + 1: "EFFECT_OTHER", + 2: "FRAMESHIFT", + 3: "FRAME_PRESERVING_INDEL", + 4: "SYNONYMOUS_SNP", + 5: "NONSYNONYMOUS_SNP", + 6: "STOP_GAIN", + 7: "STOP_LOSS", + 8: "SPLICE_SITE_DISRUPTION", +} +var VariantAnnotation_Effect_value = map[string]int32{ + "EFFECT_UNSPECIFIED": 0, + "EFFECT_OTHER": 1, + "FRAMESHIFT": 2, + "FRAME_PRESERVING_INDEL": 3, + "SYNONYMOUS_SNP": 4, + "NONSYNONYMOUS_SNP": 5, + "STOP_GAIN": 6, + "STOP_LOSS": 7, + "SPLICE_SITE_DISRUPTION": 8, +} + +func (x VariantAnnotation_Effect) String() string { + return proto.EnumName(VariantAnnotation_Effect_name, int32(x)) +} +func (VariantAnnotation_Effect) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +type VariantAnnotation_ClinicalSignificance int32 + +const ( + VariantAnnotation_CLINICAL_SIGNIFICANCE_UNSPECIFIED VariantAnnotation_ClinicalSignificance = 0 + // `OTHER` should be used when no other clinical significance + // value will suffice. + VariantAnnotation_CLINICAL_SIGNIFICANCE_OTHER VariantAnnotation_ClinicalSignificance = 1 + VariantAnnotation_UNCERTAIN VariantAnnotation_ClinicalSignificance = 2 + VariantAnnotation_BENIGN VariantAnnotation_ClinicalSignificance = 3 + VariantAnnotation_LIKELY_BENIGN VariantAnnotation_ClinicalSignificance = 4 + VariantAnnotation_LIKELY_PATHOGENIC VariantAnnotation_ClinicalSignificance = 5 + VariantAnnotation_PATHOGENIC VariantAnnotation_ClinicalSignificance = 6 + VariantAnnotation_DRUG_RESPONSE VariantAnnotation_ClinicalSignificance = 7 + VariantAnnotation_HISTOCOMPATIBILITY VariantAnnotation_ClinicalSignificance = 8 + VariantAnnotation_CONFERS_SENSITIVITY VariantAnnotation_ClinicalSignificance = 9 + VariantAnnotation_RISK_FACTOR VariantAnnotation_ClinicalSignificance = 10 + VariantAnnotation_ASSOCIATION VariantAnnotation_ClinicalSignificance = 11 + VariantAnnotation_PROTECTIVE VariantAnnotation_ClinicalSignificance = 12 + // `MULTIPLE_REPORTED` should be used when multiple clinical + // signficances are reported for a variant. The original clinical + // significance values may be provided in the `info` field. + VariantAnnotation_MULTIPLE_REPORTED VariantAnnotation_ClinicalSignificance = 13 +) + +var VariantAnnotation_ClinicalSignificance_name = map[int32]string{ + 0: "CLINICAL_SIGNIFICANCE_UNSPECIFIED", + 1: "CLINICAL_SIGNIFICANCE_OTHER", + 2: "UNCERTAIN", + 3: "BENIGN", + 4: "LIKELY_BENIGN", + 5: "LIKELY_PATHOGENIC", + 6: "PATHOGENIC", + 7: "DRUG_RESPONSE", + 8: "HISTOCOMPATIBILITY", + 9: "CONFERS_SENSITIVITY", + 10: "RISK_FACTOR", + 11: "ASSOCIATION", + 12: "PROTECTIVE", + 13: "MULTIPLE_REPORTED", +} +var VariantAnnotation_ClinicalSignificance_value = map[string]int32{ + "CLINICAL_SIGNIFICANCE_UNSPECIFIED": 0, + "CLINICAL_SIGNIFICANCE_OTHER": 1, + "UNCERTAIN": 2, + "BENIGN": 3, + "LIKELY_BENIGN": 4, + "LIKELY_PATHOGENIC": 5, + "PATHOGENIC": 6, + "DRUG_RESPONSE": 7, + "HISTOCOMPATIBILITY": 8, + "CONFERS_SENSITIVITY": 9, + "RISK_FACTOR": 10, + "ASSOCIATION": 11, + "PROTECTIVE": 12, + "MULTIPLE_REPORTED": 13, +} + +func (x VariantAnnotation_ClinicalSignificance) String() string { + return proto.EnumName(VariantAnnotation_ClinicalSignificance_name, int32(x)) +} +func (VariantAnnotation_ClinicalSignificance) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 2} +} + +// An annotation set is a logical grouping of annotations that share consistent +// type information and provenance. Examples of annotation sets include 'all +// genes from refseq', and 'all variant annotations from ClinVar'. +type AnnotationSet struct { + // The server-generated annotation set ID, unique across all annotation sets. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The dataset to which this annotation set belongs. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The ID of the reference set that defines the coordinate space for this + // set's annotations. + ReferenceSetId string `protobuf:"bytes,3,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // The display name for this annotation set. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The source URI describing the file from which this annotation set was + // generated, if any. + SourceUri string `protobuf:"bytes,5,opt,name=source_uri,json=sourceUri" json:"source_uri,omitempty"` + // The type of annotations contained within this set. + Type AnnotationType `protobuf:"varint,6,opt,name=type,enum=google.genomics.v1.AnnotationType" json:"type,omitempty"` + // A map of additional read alignment information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,17,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *AnnotationSet) Reset() { *m = AnnotationSet{} } +func (m *AnnotationSet) String() string { return proto.CompactTextString(m) } +func (*AnnotationSet) ProtoMessage() {} +func (*AnnotationSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AnnotationSet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *AnnotationSet) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *AnnotationSet) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *AnnotationSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AnnotationSet) GetSourceUri() string { + if m != nil { + return m.SourceUri + } + return "" +} + +func (m *AnnotationSet) GetType() AnnotationType { + if m != nil { + return m.Type + } + return AnnotationType_ANNOTATION_TYPE_UNSPECIFIED +} + +func (m *AnnotationSet) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +// An annotation describes a region of reference genome. The value of an +// annotation may be one of several canonical types, supplemented by arbitrary +// info tags. An annotation is not inherently associated with a specific +// sample or individual (though a client could choose to use annotations in +// this way). Example canonical annotation types are `GENE` and +// `VARIANT`. +type Annotation struct { + // The server-generated annotation ID, unique across all annotations. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The annotation set to which this annotation belongs. + AnnotationSetId string `protobuf:"bytes,2,opt,name=annotation_set_id,json=annotationSetId" json:"annotation_set_id,omitempty"` + // The display name of this annotation. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The ID of the Google Genomics reference associated with this range. + ReferenceId string `protobuf:"bytes,4,opt,name=reference_id,json=referenceId" json:"reference_id,omitempty"` + // The display name corresponding to the reference specified by + // `referenceId`, for example `chr1`, `1`, or `chrX`. + ReferenceName string `protobuf:"bytes,5,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The start position of the range on the reference, 0-based inclusive. + Start int64 `protobuf:"varint,6,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. + End int64 `protobuf:"varint,7,opt,name=end" json:"end,omitempty"` + // Whether this range refers to the reverse strand, as opposed to the forward + // strand. Note that regardless of this field, the start/end position of the + // range always refer to the forward strand. + ReverseStrand bool `protobuf:"varint,8,opt,name=reverse_strand,json=reverseStrand" json:"reverse_strand,omitempty"` + // The data type for this annotation. Must match the containing annotation + // set's type. + Type AnnotationType `protobuf:"varint,9,opt,name=type,enum=google.genomics.v1.AnnotationType" json:"type,omitempty"` + // Types that are valid to be assigned to Value: + // *Annotation_Variant + // *Annotation_Transcript + Value isAnnotation_Value `protobuf_oneof:"value"` + // A map of additional read alignment information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,12,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Annotation) Reset() { *m = Annotation{} } +func (m *Annotation) String() string { return proto.CompactTextString(m) } +func (*Annotation) ProtoMessage() {} +func (*Annotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isAnnotation_Value interface { + isAnnotation_Value() +} + +type Annotation_Variant struct { + Variant *VariantAnnotation `protobuf:"bytes,10,opt,name=variant,oneof"` +} +type Annotation_Transcript struct { + Transcript *Transcript `protobuf:"bytes,11,opt,name=transcript,oneof"` +} + +func (*Annotation_Variant) isAnnotation_Value() {} +func (*Annotation_Transcript) isAnnotation_Value() {} + +func (m *Annotation) GetValue() isAnnotation_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Annotation) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Annotation) GetAnnotationSetId() string { + if m != nil { + return m.AnnotationSetId + } + return "" +} + +func (m *Annotation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Annotation) GetReferenceId() string { + if m != nil { + return m.ReferenceId + } + return "" +} + +func (m *Annotation) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *Annotation) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Annotation) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *Annotation) GetReverseStrand() bool { + if m != nil { + return m.ReverseStrand + } + return false +} + +func (m *Annotation) GetType() AnnotationType { + if m != nil { + return m.Type + } + return AnnotationType_ANNOTATION_TYPE_UNSPECIFIED +} + +func (m *Annotation) GetVariant() *VariantAnnotation { + if x, ok := m.GetValue().(*Annotation_Variant); ok { + return x.Variant + } + return nil +} + +func (m *Annotation) GetTranscript() *Transcript { + if x, ok := m.GetValue().(*Annotation_Transcript); ok { + return x.Transcript + } + return nil +} + +func (m *Annotation) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Annotation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Annotation_OneofMarshaler, _Annotation_OneofUnmarshaler, _Annotation_OneofSizer, []interface{}{ + (*Annotation_Variant)(nil), + (*Annotation_Transcript)(nil), + } +} + +func _Annotation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Annotation) + // value + switch x := m.Value.(type) { + case *Annotation_Variant: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Variant); err != nil { + return err + } + case *Annotation_Transcript: + b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Transcript); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Annotation.Value has unexpected type %T", x) + } + return nil +} + +func _Annotation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Annotation) + switch tag { + case 10: // value.variant + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VariantAnnotation) + err := b.DecodeMessage(msg) + m.Value = &Annotation_Variant{msg} + return true, err + case 11: // value.transcript + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Transcript) + err := b.DecodeMessage(msg) + m.Value = &Annotation_Transcript{msg} + return true, err + default: + return false, nil + } +} + +func _Annotation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Annotation) + // value + switch x := m.Value.(type) { + case *Annotation_Variant: + s := proto.Size(x.Variant) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Annotation_Transcript: + s := proto.Size(x.Transcript) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type VariantAnnotation struct { + // Type has been adapted from ClinVar's list of variant types. + Type VariantAnnotation_Type `protobuf:"varint,1,opt,name=type,enum=google.genomics.v1.VariantAnnotation_Type" json:"type,omitempty"` + // Effect of the variant on the coding sequence. + Effect VariantAnnotation_Effect `protobuf:"varint,2,opt,name=effect,enum=google.genomics.v1.VariantAnnotation_Effect" json:"effect,omitempty"` + // The alternate allele for this variant. If multiple alternate alleles + // exist at this location, create a separate variant for each one, as they + // may represent distinct conditions. + AlternateBases string `protobuf:"bytes,3,opt,name=alternate_bases,json=alternateBases" json:"alternate_bases,omitempty"` + // Google annotation ID of the gene affected by this variant. This should + // be provided when the variant is created. + GeneId string `protobuf:"bytes,4,opt,name=gene_id,json=geneId" json:"gene_id,omitempty"` + // Google annotation IDs of the transcripts affected by this variant. These + // should be provided when the variant is created. + TranscriptIds []string `protobuf:"bytes,5,rep,name=transcript_ids,json=transcriptIds" json:"transcript_ids,omitempty"` + // The set of conditions associated with this variant. + // A condition describes the way a variant influences human health. + Conditions []*VariantAnnotation_ClinicalCondition `protobuf:"bytes,6,rep,name=conditions" json:"conditions,omitempty"` + // Describes the clinical significance of a variant. + // It is adapted from the ClinVar controlled vocabulary for clinical + // significance described at: + // http://www.ncbi.nlm.nih.gov/clinvar/docs/clinsig/ + ClinicalSignificance VariantAnnotation_ClinicalSignificance `protobuf:"varint,7,opt,name=clinical_significance,json=clinicalSignificance,enum=google.genomics.v1.VariantAnnotation_ClinicalSignificance" json:"clinical_significance,omitempty"` +} + +func (m *VariantAnnotation) Reset() { *m = VariantAnnotation{} } +func (m *VariantAnnotation) String() string { return proto.CompactTextString(m) } +func (*VariantAnnotation) ProtoMessage() {} +func (*VariantAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *VariantAnnotation) GetType() VariantAnnotation_Type { + if m != nil { + return m.Type + } + return VariantAnnotation_TYPE_UNSPECIFIED +} + +func (m *VariantAnnotation) GetEffect() VariantAnnotation_Effect { + if m != nil { + return m.Effect + } + return VariantAnnotation_EFFECT_UNSPECIFIED +} + +func (m *VariantAnnotation) GetAlternateBases() string { + if m != nil { + return m.AlternateBases + } + return "" +} + +func (m *VariantAnnotation) GetGeneId() string { + if m != nil { + return m.GeneId + } + return "" +} + +func (m *VariantAnnotation) GetTranscriptIds() []string { + if m != nil { + return m.TranscriptIds + } + return nil +} + +func (m *VariantAnnotation) GetConditions() []*VariantAnnotation_ClinicalCondition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *VariantAnnotation) GetClinicalSignificance() VariantAnnotation_ClinicalSignificance { + if m != nil { + return m.ClinicalSignificance + } + return VariantAnnotation_CLINICAL_SIGNIFICANCE_UNSPECIFIED +} + +type VariantAnnotation_ClinicalCondition struct { + // A set of names for the condition. + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + // The set of external IDs for this condition. + ExternalIds []*ExternalId `protobuf:"bytes,2,rep,name=external_ids,json=externalIds" json:"external_ids,omitempty"` + // The MedGen concept id associated with this gene. + // Search for these IDs at http://www.ncbi.nlm.nih.gov/medgen/ + ConceptId string `protobuf:"bytes,3,opt,name=concept_id,json=conceptId" json:"concept_id,omitempty"` + // The OMIM id for this condition. + // Search for these IDs at http://omim.org/ + OmimId string `protobuf:"bytes,4,opt,name=omim_id,json=omimId" json:"omim_id,omitempty"` +} + +func (m *VariantAnnotation_ClinicalCondition) Reset() { *m = VariantAnnotation_ClinicalCondition{} } +func (m *VariantAnnotation_ClinicalCondition) String() string { return proto.CompactTextString(m) } +func (*VariantAnnotation_ClinicalCondition) ProtoMessage() {} +func (*VariantAnnotation_ClinicalCondition) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *VariantAnnotation_ClinicalCondition) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +func (m *VariantAnnotation_ClinicalCondition) GetExternalIds() []*ExternalId { + if m != nil { + return m.ExternalIds + } + return nil +} + +func (m *VariantAnnotation_ClinicalCondition) GetConceptId() string { + if m != nil { + return m.ConceptId + } + return "" +} + +func (m *VariantAnnotation_ClinicalCondition) GetOmimId() string { + if m != nil { + return m.OmimId + } + return "" +} + +// A transcript represents the assertion that a particular region of the +// reference genome may be transcribed as RNA. +type Transcript struct { + // The annotation ID of the gene from which this transcript is transcribed. + GeneId string `protobuf:"bytes,1,opt,name=gene_id,json=geneId" json:"gene_id,omitempty"` + // The exons that compose + // this transcript. This field should be unset for genomes where transcript + // splicing does not occur, for example prokaryotes. + // + // Introns are regions of the transcript that are not included in the + // spliced RNA product. Though not explicitly modeled here, intron ranges can + // be deduced; all regions of this transcript that are not exons are introns. + // + // Exonic sequences do not necessarily code for a translational product + // (amino acids). Only the regions of exons bounded by the + // [codingSequence][google.genomics.v1.Transcript.coding_sequence] correspond + // to coding DNA sequence. + // + // Exons are ordered by start position and may not overlap. + Exons []*Transcript_Exon `protobuf:"bytes,2,rep,name=exons" json:"exons,omitempty"` + // The range of the coding sequence for this transcript, if any. To determine + // the exact ranges of coding sequence, intersect this range with those of the + // [exons][google.genomics.v1.Transcript.exons], if any. If there are any + // [exons][google.genomics.v1.Transcript.exons], the + // [codingSequence][google.genomics.v1.Transcript.coding_sequence] must start + // and end within them. + // + // Note that in some cases, the reference genome will not exactly match the + // observed mRNA transcript e.g. due to variance in the source genome from + // reference. In these cases, + // [exon.frame][google.genomics.v1.Transcript.Exon.frame] will not necessarily + // match the expected reference reading frame and coding exon reference bases + // cannot necessarily be concatenated to produce the original transcript mRNA. + CodingSequence *Transcript_CodingSequence `protobuf:"bytes,3,opt,name=coding_sequence,json=codingSequence" json:"coding_sequence,omitempty"` +} + +func (m *Transcript) Reset() { *m = Transcript{} } +func (m *Transcript) String() string { return proto.CompactTextString(m) } +func (*Transcript) ProtoMessage() {} +func (*Transcript) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Transcript) GetGeneId() string { + if m != nil { + return m.GeneId + } + return "" +} + +func (m *Transcript) GetExons() []*Transcript_Exon { + if m != nil { + return m.Exons + } + return nil +} + +func (m *Transcript) GetCodingSequence() *Transcript_CodingSequence { + if m != nil { + return m.CodingSequence + } + return nil +} + +type Transcript_Exon struct { + // The start position of the exon on this annotation's reference sequence, + // 0-based inclusive. Note that this is relative to the reference start, and + // **not** the containing annotation start. + Start int64 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + // The end position of the exon on this annotation's reference sequence, + // 0-based exclusive. Note that this is relative to the reference start, and + // *not* the containing annotation start. + End int64 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + // The frame of this exon. Contains a value of 0, 1, or 2, which indicates + // the offset of the first coding base of the exon within the reading frame + // of the coding DNA sequence, if any. This field is dependent on the + // strandedness of this annotation (see + // [Annotation.reverse_strand][google.genomics.v1.Annotation.reverse_strand]). + // For forward stranded annotations, this offset is relative to the + // [exon.start][google.genomics.v1.Transcript.Exon.start]. For reverse + // strand annotations, this offset is relative to the + // [exon.end][google.genomics.v1.Transcript.Exon.end] `- 1`. + // + // Unset if this exon does not intersect the coding sequence. Upon creation + // of a transcript, the frame must be populated for all or none of the + // coding exons. + Frame *google_protobuf4.Int32Value `protobuf:"bytes,3,opt,name=frame" json:"frame,omitempty"` +} + +func (m *Transcript_Exon) Reset() { *m = Transcript_Exon{} } +func (m *Transcript_Exon) String() string { return proto.CompactTextString(m) } +func (*Transcript_Exon) ProtoMessage() {} +func (*Transcript_Exon) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +func (m *Transcript_Exon) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Transcript_Exon) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *Transcript_Exon) GetFrame() *google_protobuf4.Int32Value { + if m != nil { + return m.Frame + } + return nil +} + +type Transcript_CodingSequence struct { + // The start of the coding sequence on this annotation's reference sequence, + // 0-based inclusive. Note that this position is relative to the reference + // start, and *not* the containing annotation start. + Start int64 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + // The end of the coding sequence on this annotation's reference sequence, + // 0-based exclusive. Note that this position is relative to the reference + // start, and *not* the containing annotation start. + End int64 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` +} + +func (m *Transcript_CodingSequence) Reset() { *m = Transcript_CodingSequence{} } +func (m *Transcript_CodingSequence) String() string { return proto.CompactTextString(m) } +func (*Transcript_CodingSequence) ProtoMessage() {} +func (*Transcript_CodingSequence) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 1} } + +func (m *Transcript_CodingSequence) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Transcript_CodingSequence) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +type ExternalId struct { + // The name of the source of this data. + SourceName string `protobuf:"bytes,1,opt,name=source_name,json=sourceName" json:"source_name,omitempty"` + // The id used by the source of this data. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` +} + +func (m *ExternalId) Reset() { *m = ExternalId{} } +func (m *ExternalId) String() string { return proto.CompactTextString(m) } +func (*ExternalId) ProtoMessage() {} +func (*ExternalId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExternalId) GetSourceName() string { + if m != nil { + return m.SourceName + } + return "" +} + +func (m *ExternalId) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type CreateAnnotationSetRequest struct { + // The annotation set to create. + AnnotationSet *AnnotationSet `protobuf:"bytes,1,opt,name=annotation_set,json=annotationSet" json:"annotation_set,omitempty"` +} + +func (m *CreateAnnotationSetRequest) Reset() { *m = CreateAnnotationSetRequest{} } +func (m *CreateAnnotationSetRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAnnotationSetRequest) ProtoMessage() {} +func (*CreateAnnotationSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateAnnotationSetRequest) GetAnnotationSet() *AnnotationSet { + if m != nil { + return m.AnnotationSet + } + return nil +} + +type GetAnnotationSetRequest struct { + // The ID of the annotation set to be retrieved. + AnnotationSetId string `protobuf:"bytes,1,opt,name=annotation_set_id,json=annotationSetId" json:"annotation_set_id,omitempty"` +} + +func (m *GetAnnotationSetRequest) Reset() { *m = GetAnnotationSetRequest{} } +func (m *GetAnnotationSetRequest) String() string { return proto.CompactTextString(m) } +func (*GetAnnotationSetRequest) ProtoMessage() {} +func (*GetAnnotationSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GetAnnotationSetRequest) GetAnnotationSetId() string { + if m != nil { + return m.AnnotationSetId + } + return "" +} + +type UpdateAnnotationSetRequest struct { + // The ID of the annotation set to be updated. + AnnotationSetId string `protobuf:"bytes,1,opt,name=annotation_set_id,json=annotationSetId" json:"annotation_set_id,omitempty"` + // The new annotation set. + AnnotationSet *AnnotationSet `protobuf:"bytes,2,opt,name=annotation_set,json=annotationSet" json:"annotation_set,omitempty"` + // An optional mask specifying which fields to update. Mutable fields are + // [name][google.genomics.v1.AnnotationSet.name], + // [source_uri][google.genomics.v1.AnnotationSet.source_uri], and + // [info][google.genomics.v1.AnnotationSet.info]. If unspecified, all + // mutable fields will be updated. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateAnnotationSetRequest) Reset() { *m = UpdateAnnotationSetRequest{} } +func (m *UpdateAnnotationSetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAnnotationSetRequest) ProtoMessage() {} +func (*UpdateAnnotationSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *UpdateAnnotationSetRequest) GetAnnotationSetId() string { + if m != nil { + return m.AnnotationSetId + } + return "" +} + +func (m *UpdateAnnotationSetRequest) GetAnnotationSet() *AnnotationSet { + if m != nil { + return m.AnnotationSet + } + return nil +} + +func (m *UpdateAnnotationSetRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteAnnotationSetRequest struct { + // The ID of the annotation set to be deleted. + AnnotationSetId string `protobuf:"bytes,1,opt,name=annotation_set_id,json=annotationSetId" json:"annotation_set_id,omitempty"` +} + +func (m *DeleteAnnotationSetRequest) Reset() { *m = DeleteAnnotationSetRequest{} } +func (m *DeleteAnnotationSetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAnnotationSetRequest) ProtoMessage() {} +func (*DeleteAnnotationSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *DeleteAnnotationSetRequest) GetAnnotationSetId() string { + if m != nil { + return m.AnnotationSetId + } + return "" +} + +type SearchAnnotationSetsRequest struct { + // Required. The dataset IDs to search within. Caller must have `READ` access + // to these datasets. + DatasetIds []string `protobuf:"bytes,1,rep,name=dataset_ids,json=datasetIds" json:"dataset_ids,omitempty"` + // If specified, only annotation sets associated with the given reference set + // are returned. + ReferenceSetId string `protobuf:"bytes,2,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // Only return annotations sets for which a substring of the name matches this + // string (case insensitive). + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // If specified, only annotation sets that have any of these types are + // returned. + Types []AnnotationType `protobuf:"varint,4,rep,packed,name=types,enum=google.genomics.v1.AnnotationType" json:"types,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 128. The maximum value is 1024. + PageSize int32 `protobuf:"varint,6,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchAnnotationSetsRequest) Reset() { *m = SearchAnnotationSetsRequest{} } +func (m *SearchAnnotationSetsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchAnnotationSetsRequest) ProtoMessage() {} +func (*SearchAnnotationSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *SearchAnnotationSetsRequest) GetDatasetIds() []string { + if m != nil { + return m.DatasetIds + } + return nil +} + +func (m *SearchAnnotationSetsRequest) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *SearchAnnotationSetsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SearchAnnotationSetsRequest) GetTypes() []AnnotationType { + if m != nil { + return m.Types + } + return nil +} + +func (m *SearchAnnotationSetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchAnnotationSetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +type SearchAnnotationSetsResponse struct { + // The matching annotation sets. + AnnotationSets []*AnnotationSet `protobuf:"bytes,1,rep,name=annotation_sets,json=annotationSets" json:"annotation_sets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchAnnotationSetsResponse) Reset() { *m = SearchAnnotationSetsResponse{} } +func (m *SearchAnnotationSetsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchAnnotationSetsResponse) ProtoMessage() {} +func (*SearchAnnotationSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *SearchAnnotationSetsResponse) GetAnnotationSets() []*AnnotationSet { + if m != nil { + return m.AnnotationSets + } + return nil +} + +func (m *SearchAnnotationSetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateAnnotationRequest struct { + // The annotation to be created. + Annotation *Annotation `protobuf:"bytes,1,opt,name=annotation" json:"annotation,omitempty"` +} + +func (m *CreateAnnotationRequest) Reset() { *m = CreateAnnotationRequest{} } +func (m *CreateAnnotationRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAnnotationRequest) ProtoMessage() {} +func (*CreateAnnotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CreateAnnotationRequest) GetAnnotation() *Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type BatchCreateAnnotationsRequest struct { + // The annotations to be created. At most 4096 can be specified in a single + // request. + Annotations []*Annotation `protobuf:"bytes,1,rep,name=annotations" json:"annotations,omitempty"` + // A unique request ID which enables the server to detect duplicated requests. + // If provided, duplicated requests will result in the same response; if not + // provided, duplicated requests may result in duplicated data. For a given + // annotation set, callers should not reuse `request_id`s when writing + // different batches of annotations - behavior in this case is undefined. + // A common approach is to use a UUID. For batch jobs where worker crashes are + // a possibility, consider using some unique variant of a worker or run ID. + RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId" json:"request_id,omitempty"` +} + +func (m *BatchCreateAnnotationsRequest) Reset() { *m = BatchCreateAnnotationsRequest{} } +func (m *BatchCreateAnnotationsRequest) String() string { return proto.CompactTextString(m) } +func (*BatchCreateAnnotationsRequest) ProtoMessage() {} +func (*BatchCreateAnnotationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *BatchCreateAnnotationsRequest) GetAnnotations() []*Annotation { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *BatchCreateAnnotationsRequest) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +type BatchCreateAnnotationsResponse struct { + // The resulting per-annotation entries, ordered consistently with the + // original request. + Entries []*BatchCreateAnnotationsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` +} + +func (m *BatchCreateAnnotationsResponse) Reset() { *m = BatchCreateAnnotationsResponse{} } +func (m *BatchCreateAnnotationsResponse) String() string { return proto.CompactTextString(m) } +func (*BatchCreateAnnotationsResponse) ProtoMessage() {} +func (*BatchCreateAnnotationsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *BatchCreateAnnotationsResponse) GetEntries() []*BatchCreateAnnotationsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type BatchCreateAnnotationsResponse_Entry struct { + // The creation status. + Status *google_rpc.Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + // The created annotation, if creation was successful. + Annotation *Annotation `protobuf:"bytes,2,opt,name=annotation" json:"annotation,omitempty"` +} + +func (m *BatchCreateAnnotationsResponse_Entry) Reset() { *m = BatchCreateAnnotationsResponse_Entry{} } +func (m *BatchCreateAnnotationsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*BatchCreateAnnotationsResponse_Entry) ProtoMessage() {} +func (*BatchCreateAnnotationsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{13, 0} +} + +func (m *BatchCreateAnnotationsResponse_Entry) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *BatchCreateAnnotationsResponse_Entry) GetAnnotation() *Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GetAnnotationRequest struct { + // The ID of the annotation to be retrieved. + AnnotationId string `protobuf:"bytes,1,opt,name=annotation_id,json=annotationId" json:"annotation_id,omitempty"` +} + +func (m *GetAnnotationRequest) Reset() { *m = GetAnnotationRequest{} } +func (m *GetAnnotationRequest) String() string { return proto.CompactTextString(m) } +func (*GetAnnotationRequest) ProtoMessage() {} +func (*GetAnnotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *GetAnnotationRequest) GetAnnotationId() string { + if m != nil { + return m.AnnotationId + } + return "" +} + +type UpdateAnnotationRequest struct { + // The ID of the annotation to be updated. + AnnotationId string `protobuf:"bytes,1,opt,name=annotation_id,json=annotationId" json:"annotation_id,omitempty"` + // The new annotation. + Annotation *Annotation `protobuf:"bytes,2,opt,name=annotation" json:"annotation,omitempty"` + // An optional mask specifying which fields to update. Mutable fields are + // [name][google.genomics.v1.Annotation.name], + // [variant][google.genomics.v1.Annotation.variant], + // [transcript][google.genomics.v1.Annotation.transcript], and + // [info][google.genomics.v1.Annotation.info]. If unspecified, all mutable + // fields will be updated. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateAnnotationRequest) Reset() { *m = UpdateAnnotationRequest{} } +func (m *UpdateAnnotationRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAnnotationRequest) ProtoMessage() {} +func (*UpdateAnnotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *UpdateAnnotationRequest) GetAnnotationId() string { + if m != nil { + return m.AnnotationId + } + return "" +} + +func (m *UpdateAnnotationRequest) GetAnnotation() *Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +func (m *UpdateAnnotationRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteAnnotationRequest struct { + // The ID of the annotation to be deleted. + AnnotationId string `protobuf:"bytes,1,opt,name=annotation_id,json=annotationId" json:"annotation_id,omitempty"` +} + +func (m *DeleteAnnotationRequest) Reset() { *m = DeleteAnnotationRequest{} } +func (m *DeleteAnnotationRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAnnotationRequest) ProtoMessage() {} +func (*DeleteAnnotationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *DeleteAnnotationRequest) GetAnnotationId() string { + if m != nil { + return m.AnnotationId + } + return "" +} + +type SearchAnnotationsRequest struct { + // Required. The annotation sets to search within. The caller must have + // `READ` access to these annotation sets. + // All queried annotation sets must have the same type. + AnnotationSetIds []string `protobuf:"bytes,1,rep,name=annotation_set_ids,json=annotationSetIds" json:"annotation_set_ids,omitempty"` + // Required. `reference_id` or `reference_name` must be set. + // + // Types that are valid to be assigned to Reference: + // *SearchAnnotationsRequest_ReferenceId + // *SearchAnnotationsRequest_ReferenceName + Reference isSearchAnnotationsRequest_Reference `protobuf_oneof:"reference"` + // The start position of the range on the reference, 0-based inclusive. If + // specified, + // [referenceId][google.genomics.v1.SearchAnnotationsRequest.reference_id] or + // [referenceName][google.genomics.v1.SearchAnnotationsRequest.reference_name] + // must be specified. Defaults to 0. + Start int64 `protobuf:"varint,4,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. If + // [referenceId][google.genomics.v1.SearchAnnotationsRequest.reference_id] or + // [referenceName][google.genomics.v1.SearchAnnotationsRequest.reference_name] + // must be specified, Defaults to the length of the reference. + End int64 `protobuf:"varint,5,opt,name=end" json:"end,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 256. The maximum value is 2048. + PageSize int32 `protobuf:"varint,7,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchAnnotationsRequest) Reset() { *m = SearchAnnotationsRequest{} } +func (m *SearchAnnotationsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchAnnotationsRequest) ProtoMessage() {} +func (*SearchAnnotationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +type isSearchAnnotationsRequest_Reference interface { + isSearchAnnotationsRequest_Reference() +} + +type SearchAnnotationsRequest_ReferenceId struct { + ReferenceId string `protobuf:"bytes,2,opt,name=reference_id,json=referenceId,oneof"` +} +type SearchAnnotationsRequest_ReferenceName struct { + ReferenceName string `protobuf:"bytes,3,opt,name=reference_name,json=referenceName,oneof"` +} + +func (*SearchAnnotationsRequest_ReferenceId) isSearchAnnotationsRequest_Reference() {} +func (*SearchAnnotationsRequest_ReferenceName) isSearchAnnotationsRequest_Reference() {} + +func (m *SearchAnnotationsRequest) GetReference() isSearchAnnotationsRequest_Reference { + if m != nil { + return m.Reference + } + return nil +} + +func (m *SearchAnnotationsRequest) GetAnnotationSetIds() []string { + if m != nil { + return m.AnnotationSetIds + } + return nil +} + +func (m *SearchAnnotationsRequest) GetReferenceId() string { + if x, ok := m.GetReference().(*SearchAnnotationsRequest_ReferenceId); ok { + return x.ReferenceId + } + return "" +} + +func (m *SearchAnnotationsRequest) GetReferenceName() string { + if x, ok := m.GetReference().(*SearchAnnotationsRequest_ReferenceName); ok { + return x.ReferenceName + } + return "" +} + +func (m *SearchAnnotationsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *SearchAnnotationsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *SearchAnnotationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchAnnotationsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SearchAnnotationsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SearchAnnotationsRequest_OneofMarshaler, _SearchAnnotationsRequest_OneofUnmarshaler, _SearchAnnotationsRequest_OneofSizer, []interface{}{ + (*SearchAnnotationsRequest_ReferenceId)(nil), + (*SearchAnnotationsRequest_ReferenceName)(nil), + } +} + +func _SearchAnnotationsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SearchAnnotationsRequest) + // reference + switch x := m.Reference.(type) { + case *SearchAnnotationsRequest_ReferenceId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ReferenceId) + case *SearchAnnotationsRequest_ReferenceName: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ReferenceName) + case nil: + default: + return fmt.Errorf("SearchAnnotationsRequest.Reference has unexpected type %T", x) + } + return nil +} + +func _SearchAnnotationsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SearchAnnotationsRequest) + switch tag { + case 2: // reference.reference_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Reference = &SearchAnnotationsRequest_ReferenceId{x} + return true, err + case 3: // reference.reference_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Reference = &SearchAnnotationsRequest_ReferenceName{x} + return true, err + default: + return false, nil + } +} + +func _SearchAnnotationsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SearchAnnotationsRequest) + // reference + switch x := m.Reference.(type) { + case *SearchAnnotationsRequest_ReferenceId: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferenceId))) + n += len(x.ReferenceId) + case *SearchAnnotationsRequest_ReferenceName: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferenceName))) + n += len(x.ReferenceName) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SearchAnnotationsResponse struct { + // The matching annotations. + Annotations []*Annotation `protobuf:"bytes,1,rep,name=annotations" json:"annotations,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchAnnotationsResponse) Reset() { *m = SearchAnnotationsResponse{} } +func (m *SearchAnnotationsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchAnnotationsResponse) ProtoMessage() {} +func (*SearchAnnotationsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *SearchAnnotationsResponse) GetAnnotations() []*Annotation { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *SearchAnnotationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*AnnotationSet)(nil), "google.genomics.v1.AnnotationSet") + proto.RegisterType((*Annotation)(nil), "google.genomics.v1.Annotation") + proto.RegisterType((*VariantAnnotation)(nil), "google.genomics.v1.VariantAnnotation") + proto.RegisterType((*VariantAnnotation_ClinicalCondition)(nil), "google.genomics.v1.VariantAnnotation.ClinicalCondition") + proto.RegisterType((*Transcript)(nil), "google.genomics.v1.Transcript") + proto.RegisterType((*Transcript_Exon)(nil), "google.genomics.v1.Transcript.Exon") + proto.RegisterType((*Transcript_CodingSequence)(nil), "google.genomics.v1.Transcript.CodingSequence") + proto.RegisterType((*ExternalId)(nil), "google.genomics.v1.ExternalId") + proto.RegisterType((*CreateAnnotationSetRequest)(nil), "google.genomics.v1.CreateAnnotationSetRequest") + proto.RegisterType((*GetAnnotationSetRequest)(nil), "google.genomics.v1.GetAnnotationSetRequest") + proto.RegisterType((*UpdateAnnotationSetRequest)(nil), "google.genomics.v1.UpdateAnnotationSetRequest") + proto.RegisterType((*DeleteAnnotationSetRequest)(nil), "google.genomics.v1.DeleteAnnotationSetRequest") + proto.RegisterType((*SearchAnnotationSetsRequest)(nil), "google.genomics.v1.SearchAnnotationSetsRequest") + proto.RegisterType((*SearchAnnotationSetsResponse)(nil), "google.genomics.v1.SearchAnnotationSetsResponse") + proto.RegisterType((*CreateAnnotationRequest)(nil), "google.genomics.v1.CreateAnnotationRequest") + proto.RegisterType((*BatchCreateAnnotationsRequest)(nil), "google.genomics.v1.BatchCreateAnnotationsRequest") + proto.RegisterType((*BatchCreateAnnotationsResponse)(nil), "google.genomics.v1.BatchCreateAnnotationsResponse") + proto.RegisterType((*BatchCreateAnnotationsResponse_Entry)(nil), "google.genomics.v1.BatchCreateAnnotationsResponse.Entry") + proto.RegisterType((*GetAnnotationRequest)(nil), "google.genomics.v1.GetAnnotationRequest") + proto.RegisterType((*UpdateAnnotationRequest)(nil), "google.genomics.v1.UpdateAnnotationRequest") + proto.RegisterType((*DeleteAnnotationRequest)(nil), "google.genomics.v1.DeleteAnnotationRequest") + proto.RegisterType((*SearchAnnotationsRequest)(nil), "google.genomics.v1.SearchAnnotationsRequest") + proto.RegisterType((*SearchAnnotationsResponse)(nil), "google.genomics.v1.SearchAnnotationsResponse") + proto.RegisterEnum("google.genomics.v1.AnnotationType", AnnotationType_name, AnnotationType_value) + proto.RegisterEnum("google.genomics.v1.VariantAnnotation_Type", VariantAnnotation_Type_name, VariantAnnotation_Type_value) + proto.RegisterEnum("google.genomics.v1.VariantAnnotation_Effect", VariantAnnotation_Effect_name, VariantAnnotation_Effect_value) + proto.RegisterEnum("google.genomics.v1.VariantAnnotation_ClinicalSignificance", VariantAnnotation_ClinicalSignificance_name, VariantAnnotation_ClinicalSignificance_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for AnnotationServiceV1 service + +type AnnotationServiceV1Client interface { + // Creates a new annotation set. Caller must have WRITE permission for the + // associated dataset. + // + // The following fields are required: + // + // * [datasetId][google.genomics.v1.AnnotationSet.dataset_id] + // * [referenceSetId][google.genomics.v1.AnnotationSet.reference_set_id] + // + // All other fields may be optionally specified, unless documented as being + // server-generated (for example, the `id` field). + CreateAnnotationSet(ctx context.Context, in *CreateAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) + // Gets an annotation set. Caller must have READ permission for + // the associated dataset. + GetAnnotationSet(ctx context.Context, in *GetAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) + // Updates an annotation set. The update must respect all mutability + // restrictions and other invariants described on the annotation set resource. + // Caller must have WRITE permission for the associated dataset. + UpdateAnnotationSet(ctx context.Context, in *UpdateAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) + // Deletes an annotation set. Caller must have WRITE permission + // for the associated annotation set. + DeleteAnnotationSet(ctx context.Context, in *DeleteAnnotationSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Searches for annotation sets that match the given criteria. Annotation sets + // are returned in an unspecified order. This order is consistent, such that + // two queries for the same content (regardless of page size) yield annotation + // sets in the same order across their respective streams of paginated + // responses. Caller must have READ permission for the queried datasets. + SearchAnnotationSets(ctx context.Context, in *SearchAnnotationSetsRequest, opts ...grpc.CallOption) (*SearchAnnotationSetsResponse, error) + // Creates a new annotation. Caller must have WRITE permission + // for the associated annotation set. + // + // The following fields are required: + // + // * [annotationSetId][google.genomics.v1.Annotation.annotation_set_id] + // * [referenceName][google.genomics.v1.Annotation.reference_name] or + // [referenceId][google.genomics.v1.Annotation.reference_id] + // + // ### Transcripts + // + // For annotations of type TRANSCRIPT, the following fields of + // [transcript][google.genomics.v1.Annotation.transcript] must be provided: + // + // * [exons.start][google.genomics.v1.Transcript.Exon.start] + // * [exons.end][google.genomics.v1.Transcript.Exon.end] + // + // All other fields may be optionally specified, unless documented as being + // server-generated (for example, the `id` field). The annotated + // range must be no longer than 100Mbp (mega base pairs). See the + // [Annotation resource][google.genomics.v1.Annotation] + // for additional restrictions on each field. + CreateAnnotation(ctx context.Context, in *CreateAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) + // Creates one or more new annotations atomically. All annotations must + // belong to the same annotation set. Caller must have WRITE + // permission for this annotation set. For optimal performance, batch + // positionally adjacent annotations together. + // + // If the request has a systemic issue, such as an attempt to write to + // an inaccessible annotation set, the entire RPC will fail accordingly. For + // lesser data issues, when possible an error will be isolated to the + // corresponding batch entry in the response; the remaining well formed + // annotations will be created normally. + // + // For details on the requirements for each individual annotation resource, + // see + // [CreateAnnotation][google.genomics.v1.AnnotationServiceV1.CreateAnnotation]. + BatchCreateAnnotations(ctx context.Context, in *BatchCreateAnnotationsRequest, opts ...grpc.CallOption) (*BatchCreateAnnotationsResponse, error) + // Gets an annotation. Caller must have READ permission + // for the associated annotation set. + GetAnnotation(ctx context.Context, in *GetAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) + // Updates an annotation. Caller must have + // WRITE permission for the associated dataset. + UpdateAnnotation(ctx context.Context, in *UpdateAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) + // Deletes an annotation. Caller must have WRITE permission for + // the associated annotation set. + DeleteAnnotation(ctx context.Context, in *DeleteAnnotationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Searches for annotations that match the given criteria. Results are + // ordered by genomic coordinate (by reference sequence, then position). + // Annotations with equivalent genomic coordinates are returned in an + // unspecified order. This order is consistent, such that two queries for the + // same content (regardless of page size) yield annotations in the same order + // across their respective streams of paginated responses. Caller must have + // READ permission for the queried annotation sets. + SearchAnnotations(ctx context.Context, in *SearchAnnotationsRequest, opts ...grpc.CallOption) (*SearchAnnotationsResponse, error) +} + +type annotationServiceV1Client struct { + cc *grpc.ClientConn +} + +func NewAnnotationServiceV1Client(cc *grpc.ClientConn) AnnotationServiceV1Client { + return &annotationServiceV1Client{cc} +} + +func (c *annotationServiceV1Client) CreateAnnotationSet(ctx context.Context, in *CreateAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) { + out := new(AnnotationSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/CreateAnnotationSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) GetAnnotationSet(ctx context.Context, in *GetAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) { + out := new(AnnotationSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/GetAnnotationSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) UpdateAnnotationSet(ctx context.Context, in *UpdateAnnotationSetRequest, opts ...grpc.CallOption) (*AnnotationSet, error) { + out := new(AnnotationSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/UpdateAnnotationSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) DeleteAnnotationSet(ctx context.Context, in *DeleteAnnotationSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/DeleteAnnotationSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) SearchAnnotationSets(ctx context.Context, in *SearchAnnotationSetsRequest, opts ...grpc.CallOption) (*SearchAnnotationSetsResponse, error) { + out := new(SearchAnnotationSetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/SearchAnnotationSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) CreateAnnotation(ctx context.Context, in *CreateAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) { + out := new(Annotation) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/CreateAnnotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) BatchCreateAnnotations(ctx context.Context, in *BatchCreateAnnotationsRequest, opts ...grpc.CallOption) (*BatchCreateAnnotationsResponse, error) { + out := new(BatchCreateAnnotationsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/BatchCreateAnnotations", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) GetAnnotation(ctx context.Context, in *GetAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) { + out := new(Annotation) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/GetAnnotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) UpdateAnnotation(ctx context.Context, in *UpdateAnnotationRequest, opts ...grpc.CallOption) (*Annotation, error) { + out := new(Annotation) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/UpdateAnnotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) DeleteAnnotation(ctx context.Context, in *DeleteAnnotationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/DeleteAnnotation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *annotationServiceV1Client) SearchAnnotations(ctx context.Context, in *SearchAnnotationsRequest, opts ...grpc.CallOption) (*SearchAnnotationsResponse, error) { + out := new(SearchAnnotationsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.AnnotationServiceV1/SearchAnnotations", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for AnnotationServiceV1 service + +type AnnotationServiceV1Server interface { + // Creates a new annotation set. Caller must have WRITE permission for the + // associated dataset. + // + // The following fields are required: + // + // * [datasetId][google.genomics.v1.AnnotationSet.dataset_id] + // * [referenceSetId][google.genomics.v1.AnnotationSet.reference_set_id] + // + // All other fields may be optionally specified, unless documented as being + // server-generated (for example, the `id` field). + CreateAnnotationSet(context.Context, *CreateAnnotationSetRequest) (*AnnotationSet, error) + // Gets an annotation set. Caller must have READ permission for + // the associated dataset. + GetAnnotationSet(context.Context, *GetAnnotationSetRequest) (*AnnotationSet, error) + // Updates an annotation set. The update must respect all mutability + // restrictions and other invariants described on the annotation set resource. + // Caller must have WRITE permission for the associated dataset. + UpdateAnnotationSet(context.Context, *UpdateAnnotationSetRequest) (*AnnotationSet, error) + // Deletes an annotation set. Caller must have WRITE permission + // for the associated annotation set. + DeleteAnnotationSet(context.Context, *DeleteAnnotationSetRequest) (*google_protobuf1.Empty, error) + // Searches for annotation sets that match the given criteria. Annotation sets + // are returned in an unspecified order. This order is consistent, such that + // two queries for the same content (regardless of page size) yield annotation + // sets in the same order across their respective streams of paginated + // responses. Caller must have READ permission for the queried datasets. + SearchAnnotationSets(context.Context, *SearchAnnotationSetsRequest) (*SearchAnnotationSetsResponse, error) + // Creates a new annotation. Caller must have WRITE permission + // for the associated annotation set. + // + // The following fields are required: + // + // * [annotationSetId][google.genomics.v1.Annotation.annotation_set_id] + // * [referenceName][google.genomics.v1.Annotation.reference_name] or + // [referenceId][google.genomics.v1.Annotation.reference_id] + // + // ### Transcripts + // + // For annotations of type TRANSCRIPT, the following fields of + // [transcript][google.genomics.v1.Annotation.transcript] must be provided: + // + // * [exons.start][google.genomics.v1.Transcript.Exon.start] + // * [exons.end][google.genomics.v1.Transcript.Exon.end] + // + // All other fields may be optionally specified, unless documented as being + // server-generated (for example, the `id` field). The annotated + // range must be no longer than 100Mbp (mega base pairs). See the + // [Annotation resource][google.genomics.v1.Annotation] + // for additional restrictions on each field. + CreateAnnotation(context.Context, *CreateAnnotationRequest) (*Annotation, error) + // Creates one or more new annotations atomically. All annotations must + // belong to the same annotation set. Caller must have WRITE + // permission for this annotation set. For optimal performance, batch + // positionally adjacent annotations together. + // + // If the request has a systemic issue, such as an attempt to write to + // an inaccessible annotation set, the entire RPC will fail accordingly. For + // lesser data issues, when possible an error will be isolated to the + // corresponding batch entry in the response; the remaining well formed + // annotations will be created normally. + // + // For details on the requirements for each individual annotation resource, + // see + // [CreateAnnotation][google.genomics.v1.AnnotationServiceV1.CreateAnnotation]. + BatchCreateAnnotations(context.Context, *BatchCreateAnnotationsRequest) (*BatchCreateAnnotationsResponse, error) + // Gets an annotation. Caller must have READ permission + // for the associated annotation set. + GetAnnotation(context.Context, *GetAnnotationRequest) (*Annotation, error) + // Updates an annotation. Caller must have + // WRITE permission for the associated dataset. + UpdateAnnotation(context.Context, *UpdateAnnotationRequest) (*Annotation, error) + // Deletes an annotation. Caller must have WRITE permission for + // the associated annotation set. + DeleteAnnotation(context.Context, *DeleteAnnotationRequest) (*google_protobuf1.Empty, error) + // Searches for annotations that match the given criteria. Results are + // ordered by genomic coordinate (by reference sequence, then position). + // Annotations with equivalent genomic coordinates are returned in an + // unspecified order. This order is consistent, such that two queries for the + // same content (regardless of page size) yield annotations in the same order + // across their respective streams of paginated responses. Caller must have + // READ permission for the queried annotation sets. + SearchAnnotations(context.Context, *SearchAnnotationsRequest) (*SearchAnnotationsResponse, error) +} + +func RegisterAnnotationServiceV1Server(s *grpc.Server, srv AnnotationServiceV1Server) { + s.RegisterService(&_AnnotationServiceV1_serviceDesc, srv) +} + +func _AnnotationServiceV1_CreateAnnotationSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAnnotationSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).CreateAnnotationSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/CreateAnnotationSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).CreateAnnotationSet(ctx, req.(*CreateAnnotationSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_GetAnnotationSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAnnotationSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).GetAnnotationSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/GetAnnotationSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).GetAnnotationSet(ctx, req.(*GetAnnotationSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_UpdateAnnotationSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAnnotationSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).UpdateAnnotationSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/UpdateAnnotationSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).UpdateAnnotationSet(ctx, req.(*UpdateAnnotationSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_DeleteAnnotationSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAnnotationSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).DeleteAnnotationSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/DeleteAnnotationSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).DeleteAnnotationSet(ctx, req.(*DeleteAnnotationSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_SearchAnnotationSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchAnnotationSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).SearchAnnotationSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/SearchAnnotationSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).SearchAnnotationSets(ctx, req.(*SearchAnnotationSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_CreateAnnotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAnnotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).CreateAnnotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/CreateAnnotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).CreateAnnotation(ctx, req.(*CreateAnnotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_BatchCreateAnnotations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchCreateAnnotationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).BatchCreateAnnotations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/BatchCreateAnnotations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).BatchCreateAnnotations(ctx, req.(*BatchCreateAnnotationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_GetAnnotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAnnotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).GetAnnotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/GetAnnotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).GetAnnotation(ctx, req.(*GetAnnotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_UpdateAnnotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAnnotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).UpdateAnnotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/UpdateAnnotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).UpdateAnnotation(ctx, req.(*UpdateAnnotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_DeleteAnnotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAnnotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).DeleteAnnotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/DeleteAnnotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).DeleteAnnotation(ctx, req.(*DeleteAnnotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AnnotationServiceV1_SearchAnnotations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchAnnotationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotationServiceV1Server).SearchAnnotations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.AnnotationServiceV1/SearchAnnotations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotationServiceV1Server).SearchAnnotations(ctx, req.(*SearchAnnotationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AnnotationServiceV1_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.AnnotationServiceV1", + HandlerType: (*AnnotationServiceV1Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateAnnotationSet", + Handler: _AnnotationServiceV1_CreateAnnotationSet_Handler, + }, + { + MethodName: "GetAnnotationSet", + Handler: _AnnotationServiceV1_GetAnnotationSet_Handler, + }, + { + MethodName: "UpdateAnnotationSet", + Handler: _AnnotationServiceV1_UpdateAnnotationSet_Handler, + }, + { + MethodName: "DeleteAnnotationSet", + Handler: _AnnotationServiceV1_DeleteAnnotationSet_Handler, + }, + { + MethodName: "SearchAnnotationSets", + Handler: _AnnotationServiceV1_SearchAnnotationSets_Handler, + }, + { + MethodName: "CreateAnnotation", + Handler: _AnnotationServiceV1_CreateAnnotation_Handler, + }, + { + MethodName: "BatchCreateAnnotations", + Handler: _AnnotationServiceV1_BatchCreateAnnotations_Handler, + }, + { + MethodName: "GetAnnotation", + Handler: _AnnotationServiceV1_GetAnnotation_Handler, + }, + { + MethodName: "UpdateAnnotation", + Handler: _AnnotationServiceV1_UpdateAnnotation_Handler, + }, + { + MethodName: "DeleteAnnotation", + Handler: _AnnotationServiceV1_DeleteAnnotation_Handler, + }, + { + MethodName: "SearchAnnotations", + Handler: _AnnotationServiceV1_SearchAnnotations_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1/annotations.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1/annotations.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2188 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x0f, 0xf5, 0x69, 0x3f, 0xd9, 0x32, 0x3d, 0xf1, 0xda, 0x5a, 0x39, 0x1f, 0x0e, 0xf3, 0x65, + 0xb8, 0x89, 0xbc, 0x51, 0x80, 0x36, 0x75, 0xda, 0x74, 0x65, 0x9a, 0xb6, 0xa7, 0xab, 0x50, 0xc2, + 0x90, 0x72, 0xe1, 0x5e, 0x08, 0x86, 0x1a, 0x7b, 0x89, 0xd8, 0x94, 0x4a, 0xd2, 0x6e, 0xbc, 0xc5, + 0x02, 0x8b, 0xc5, 0x16, 0x3d, 0xf5, 0xb2, 0xbb, 0xf7, 0x5e, 0x0a, 0xb4, 0xff, 0x43, 0x2f, 0x05, + 0x7a, 0x6f, 0x0f, 0x45, 0xff, 0x83, 0x5e, 0xf6, 0xda, 0x53, 0x81, 0x5e, 0x8a, 0x19, 0x92, 0x12, + 0x45, 0x51, 0xb6, 0xdc, 0x00, 0xbd, 0x71, 0xde, 0xbc, 0xf7, 0xe6, 0x37, 0xbf, 0x99, 0xf7, 0x31, + 0x12, 0x3c, 0x38, 0xee, 0xf5, 0x8e, 0x4f, 0xe8, 0xe6, 0x31, 0x75, 0x7a, 0xa7, 0xb6, 0xe5, 0x6d, + 0x9e, 0x3f, 0xdb, 0x34, 0x1d, 0xa7, 0xe7, 0x9b, 0xbe, 0xdd, 0x73, 0xbc, 0x5a, 0xdf, 0xed, 0xf9, + 0x3d, 0x84, 0x02, 0xad, 0x5a, 0xa4, 0x55, 0x3b, 0x7f, 0x56, 0xbd, 0x15, 0x5a, 0x9a, 0x7d, 0x7b, + 0xdc, 0xa2, 0xba, 0x1a, 0xce, 0xf2, 0xd1, 0x9b, 0xb3, 0xa3, 0x4d, 0x7a, 0xda, 0xf7, 0x2f, 0xc2, + 0xc9, 0xb5, 0xe4, 0xe4, 0x91, 0x4d, 0x4f, 0xba, 0xc6, 0xa9, 0xe9, 0xbd, 0x0d, 0x35, 0x6e, 0x25, + 0x35, 0x3c, 0xdf, 0x3d, 0xb3, 0xfc, 0x70, 0xf6, 0x4e, 0x72, 0xf6, 0x97, 0xae, 0xd9, 0xef, 0x53, + 0x37, 0x5a, 0x7c, 0x25, 0x9c, 0x77, 0xfb, 0xd6, 0xa6, 0xe7, 0x9b, 0xfe, 0x59, 0x38, 0x21, 0x7d, + 0x97, 0x81, 0xf9, 0xc6, 0x00, 0xab, 0x46, 0x7d, 0x54, 0x86, 0x8c, 0xdd, 0xad, 0x08, 0x6b, 0xc2, + 0xfa, 0x2c, 0xc9, 0xd8, 0x5d, 0x74, 0x1b, 0xa0, 0x6b, 0xfa, 0xa6, 0x47, 0x7d, 0xc3, 0xee, 0x56, + 0x32, 0x5c, 0x3e, 0x1b, 0x4a, 0x70, 0x17, 0xad, 0x83, 0xe8, 0xd2, 0x23, 0xea, 0x52, 0xc7, 0xa2, + 0x46, 0xa8, 0x94, 0xe5, 0x4a, 0xe5, 0x81, 0x5c, 0xe3, 0x9a, 0x08, 0x72, 0x8e, 0x79, 0x4a, 0x2b, + 0x39, 0x3e, 0xcb, 0xbf, 0x99, 0x73, 0xaf, 0x77, 0xe6, 0x5a, 0xd4, 0x38, 0x73, 0xed, 0x4a, 0x3e, + 0x70, 0x1e, 0x48, 0x3a, 0xae, 0x8d, 0xbe, 0x0f, 0x39, 0xff, 0xa2, 0x4f, 0x2b, 0x85, 0x35, 0x61, + 0xbd, 0x5c, 0x97, 0x6a, 0xe3, 0xa4, 0xd7, 0x86, 0xe0, 0xf5, 0x8b, 0x3e, 0x25, 0x5c, 0x1f, 0xfd, + 0x04, 0x72, 0xb6, 0x73, 0xd4, 0xab, 0x2c, 0xae, 0x65, 0xd7, 0x4b, 0xf5, 0xef, 0x5d, 0x6e, 0xa7, + 0x51, 0xbf, 0x86, 0x9d, 0xa3, 0x9e, 0xe2, 0xf8, 0xee, 0x05, 0xe1, 0x86, 0x55, 0x0d, 0x66, 0x07, + 0x22, 0x24, 0x42, 0xf6, 0x2d, 0xbd, 0x08, 0x29, 0x61, 0x9f, 0xe8, 0x23, 0xc8, 0x9f, 0x9b, 0x27, + 0x67, 0x94, 0xd3, 0x51, 0xaa, 0x57, 0xa3, 0x05, 0x22, 0xfa, 0x6b, 0x4d, 0xdb, 0xf3, 0x0f, 0x98, + 0x06, 0x09, 0x14, 0xb7, 0x32, 0x2f, 0x04, 0xe9, 0xaf, 0x39, 0x80, 0xe1, 0xb2, 0x63, 0x44, 0x6f, + 0xc0, 0xe2, 0xf0, 0xd6, 0x18, 0x23, 0x7c, 0x2f, 0x98, 0x71, 0xb4, 0x31, 0x2e, 0xb3, 0x31, 0x2e, + 0xef, 0xc1, 0xdc, 0xf0, 0x24, 0xec, 0x6e, 0xc8, 0x73, 0x69, 0x20, 0xc3, 0x5d, 0xf4, 0x10, 0x86, + 0x87, 0x62, 0x70, 0x07, 0x01, 0xe5, 0xf3, 0x03, 0xa9, 0xca, 0x3c, 0x2d, 0x41, 0xde, 0xf3, 0x4d, + 0xd7, 0xe7, 0xbc, 0x67, 0x49, 0x30, 0x60, 0x34, 0x50, 0xa7, 0x5b, 0x29, 0x72, 0x19, 0xfb, 0x0c, + 0xdc, 0x9d, 0x53, 0xd7, 0xa3, 0x86, 0xe7, 0xbb, 0xa6, 0xd3, 0xad, 0xcc, 0xac, 0x09, 0xeb, 0x33, + 0xcc, 0x1d, 0x97, 0x6a, 0x5c, 0x38, 0x38, 0xc5, 0xd9, 0x6b, 0x9e, 0x62, 0x03, 0x8a, 0xe7, 0xa6, + 0x6b, 0x9b, 0x8e, 0x5f, 0x01, 0xce, 0xf3, 0xc3, 0x34, 0xd3, 0x83, 0x40, 0x65, 0xe8, 0x61, 0xff, + 0x06, 0x89, 0xec, 0xd0, 0xc7, 0x00, 0x0c, 0x83, 0x67, 0xb9, 0x76, 0xdf, 0xaf, 0x94, 0xb8, 0x97, + 0x3b, 0x69, 0x5e, 0xf4, 0x81, 0xd6, 0xfe, 0x0d, 0x12, 0xb3, 0x41, 0x3f, 0x0a, 0xaf, 0xd2, 0x1c, + 0xbf, 0x4a, 0xeb, 0x97, 0x83, 0xff, 0xbf, 0xdc, 0xa3, 0xed, 0x62, 0x68, 0x25, 0xfd, 0x67, 0x16, + 0x16, 0xc7, 0xb6, 0x8f, 0x5e, 0x85, 0x74, 0x0b, 0x9c, 0xee, 0x8d, 0xa9, 0x38, 0xab, 0xc5, 0x68, + 0xdf, 0x81, 0x02, 0x3d, 0x3a, 0xa2, 0x96, 0xcf, 0x51, 0x95, 0xeb, 0x4f, 0xa6, 0xf3, 0xa0, 0x70, + 0x1b, 0x12, 0xda, 0xa2, 0xc7, 0xb0, 0x60, 0x9e, 0xf8, 0xd4, 0x75, 0x4c, 0x9f, 0x1a, 0x6f, 0x4c, + 0x8f, 0x7a, 0x51, 0x5a, 0x18, 0x88, 0xb7, 0x99, 0x14, 0xad, 0x40, 0xf1, 0x98, 0x3a, 0xb1, 0x1b, + 0x5b, 0x60, 0xc3, 0xe0, 0xb2, 0x0e, 0xcf, 0xc1, 0xb0, 0xbb, 0x5e, 0x25, 0xbf, 0x96, 0x65, 0x97, + 0x75, 0x28, 0xc5, 0x5d, 0x0f, 0xfd, 0x0c, 0xc0, 0xea, 0x39, 0x5d, 0x9b, 0xe7, 0xda, 0x4a, 0x81, + 0x1f, 0xd3, 0x0f, 0xa6, 0x83, 0x2c, 0x9f, 0xd8, 0x8e, 0x6d, 0x99, 0x27, 0x72, 0x64, 0x4f, 0x62, + 0xae, 0x50, 0x0f, 0x3e, 0xb0, 0x42, 0x05, 0xc3, 0xb3, 0x8f, 0x1d, 0xfb, 0xc8, 0xb6, 0x4c, 0xc7, + 0xa2, 0x3c, 0x02, 0xca, 0xf5, 0xad, 0xeb, 0xad, 0xa1, 0xc5, 0x3c, 0x90, 0x25, 0x2b, 0x45, 0x5a, + 0xfd, 0xbd, 0x00, 0x8b, 0x63, 0x90, 0x58, 0x30, 0xb2, 0x48, 0xf5, 0x2a, 0x02, 0xdf, 0x7d, 0x30, + 0x40, 0x0d, 0x98, 0xa3, 0xef, 0x38, 0x8f, 0x27, 0x9c, 0x9a, 0x0c, 0xdf, 0x77, 0xea, 0xd5, 0x56, + 0x42, 0x3d, 0xdc, 0x25, 0x25, 0x3a, 0xf8, 0xf6, 0x58, 0xee, 0xb5, 0x7a, 0x8e, 0x45, 0xfb, 0xb1, + 0x9c, 0x3d, 0x1b, 0x4a, 0x70, 0x97, 0x9d, 0x4b, 0xef, 0xd4, 0x3e, 0x8d, 0x9d, 0x0b, 0x1b, 0xe2, + 0xae, 0xf4, 0x39, 0xe4, 0xd8, 0x6d, 0x41, 0x4b, 0x20, 0xea, 0x87, 0x6d, 0xc5, 0xe8, 0xa8, 0x5a, + 0x5b, 0x91, 0xf1, 0x2e, 0x56, 0x76, 0xc4, 0x1b, 0xa8, 0x0c, 0xc0, 0xa5, 0x2d, 0x7d, 0x5f, 0x21, + 0xa2, 0x80, 0xe6, 0x61, 0x16, 0xab, 0x9a, 0x42, 0x74, 0xdc, 0x52, 0xc5, 0x0c, 0x9a, 0x83, 0x99, + 0x1d, 0xa5, 0xa9, 0xf0, 0x51, 0x16, 0x89, 0x30, 0xa7, 0x75, 0xb6, 0x35, 0x1d, 0xeb, 0x1d, 0x2e, + 0xc9, 0xa1, 0x22, 0x64, 0x35, 0xb5, 0x2d, 0xe6, 0x99, 0x1f, 0x4d, 0x27, 0x1d, 0x59, 0xef, 0x90, + 0x46, 0x53, 0x2c, 0xb0, 0x09, 0x59, 0x3d, 0x10, 0x8b, 0xd2, 0x5f, 0x04, 0x28, 0x04, 0x77, 0x0d, + 0x2d, 0x03, 0x52, 0x76, 0x77, 0x15, 0x59, 0x4f, 0x60, 0x10, 0x61, 0x2e, 0x94, 0x47, 0x28, 0xca, + 0x00, 0xbb, 0xa4, 0xf1, 0x5a, 0xd1, 0xf6, 0xf1, 0xae, 0x2e, 0x66, 0x50, 0x15, 0x96, 0xf9, 0xd8, + 0x68, 0x13, 0x45, 0x53, 0xc8, 0x01, 0x56, 0xf7, 0x0c, 0xac, 0xee, 0x28, 0x4d, 0x31, 0x8b, 0x10, + 0x94, 0xb5, 0x43, 0xb5, 0xa5, 0x1e, 0xbe, 0x6e, 0x75, 0x34, 0x83, 0xa1, 0xc9, 0xa1, 0x0f, 0x60, + 0x51, 0x6d, 0xa9, 0x09, 0x71, 0x9e, 0x6d, 0x4e, 0xd3, 0x5b, 0x6d, 0x63, 0xaf, 0x81, 0x55, 0xb1, + 0x30, 0x18, 0x36, 0x5b, 0x9a, 0x26, 0x16, 0xd9, 0x22, 0x5a, 0xbb, 0x89, 0x65, 0xc5, 0xd0, 0xb0, + 0xae, 0x18, 0x3b, 0x58, 0x23, 0x9d, 0x36, 0xdf, 0xe7, 0x8c, 0xf4, 0xe7, 0x0c, 0x2c, 0xa5, 0x5d, + 0x0d, 0xf4, 0x10, 0xee, 0xc9, 0x4d, 0xac, 0x62, 0xb9, 0xd1, 0x34, 0x34, 0xbc, 0xa7, 0xe2, 0x5d, + 0x2c, 0x37, 0x54, 0x39, 0x49, 0xf3, 0x5d, 0x58, 0x4d, 0x57, 0x8b, 0xf1, 0xde, 0x51, 0x65, 0x85, + 0xe8, 0x0c, 0x5a, 0x06, 0x01, 0x14, 0xb6, 0x15, 0x15, 0xef, 0x31, 0xd6, 0x17, 0x61, 0xbe, 0x89, + 0x3f, 0x51, 0x9a, 0x87, 0x46, 0x28, 0xe2, 0xfb, 0x0b, 0x45, 0xed, 0x86, 0xbe, 0xdf, 0xda, 0x53, + 0x54, 0x2c, 0x07, 0x87, 0x10, 0x1b, 0x17, 0x98, 0xe5, 0x0e, 0xe9, 0xec, 0x19, 0x44, 0xd1, 0xda, + 0x2d, 0x55, 0x53, 0xc4, 0x22, 0x3b, 0x83, 0x7d, 0xac, 0xe9, 0x2d, 0xb9, 0xf5, 0xba, 0xdd, 0xd0, + 0xf1, 0x36, 0x6e, 0x62, 0xfd, 0x50, 0x9c, 0x41, 0x2b, 0x70, 0x53, 0x6e, 0xa9, 0xbb, 0x0a, 0xd1, + 0x0c, 0x4d, 0x51, 0x35, 0xac, 0xe3, 0x03, 0x36, 0x31, 0x8b, 0x16, 0xa0, 0x44, 0xb0, 0xf6, 0x89, + 0xb1, 0xdb, 0x90, 0xf5, 0x16, 0x11, 0x81, 0x09, 0x1a, 0x9a, 0xd6, 0x92, 0x71, 0x83, 0x73, 0x53, + 0xe2, 0xab, 0x92, 0x96, 0xae, 0xc8, 0x3a, 0x3e, 0x50, 0xc4, 0x39, 0x06, 0xee, 0x75, 0xa7, 0xa9, + 0xe3, 0x76, 0x53, 0x31, 0x88, 0xd2, 0x6e, 0x11, 0x5d, 0xd9, 0x11, 0xe7, 0xa5, 0x7f, 0x64, 0x00, + 0x86, 0x69, 0x3b, 0x9e, 0x47, 0x84, 0x91, 0x3c, 0xf2, 0x43, 0xc8, 0xd3, 0x77, 0x2c, 0x37, 0x04, + 0x31, 0x72, 0xff, 0xf2, 0xf4, 0x5f, 0x53, 0xde, 0xf5, 0x1c, 0x12, 0x58, 0xa0, 0x03, 0x58, 0xb0, + 0x7a, 0x5d, 0xdb, 0x39, 0x36, 0x3c, 0xfa, 0x8b, 0x33, 0x56, 0x1f, 0x79, 0x9c, 0x94, 0xea, 0x4f, + 0xaf, 0x70, 0x22, 0x73, 0x2b, 0x2d, 0x34, 0x22, 0x65, 0x6b, 0x64, 0x5c, 0x35, 0x21, 0xc7, 0x96, + 0x19, 0x16, 0x5a, 0x21, 0xa5, 0xd0, 0x66, 0x86, 0x85, 0xf6, 0x19, 0xe4, 0x8f, 0xdc, 0xa8, 0xde, + 0x97, 0xea, 0xab, 0x63, 0x75, 0x02, 0x3b, 0xfe, 0xf3, 0x7a, 0x58, 0x28, 0xb8, 0x66, 0xf5, 0x05, + 0x94, 0x47, 0x41, 0x4c, 0xbb, 0x98, 0xf4, 0x63, 0x80, 0x61, 0xca, 0x40, 0x77, 0xa1, 0x14, 0x76, + 0x68, 0xbc, 0x5f, 0x08, 0xa8, 0x0d, 0x9b, 0x36, 0xde, 0x2c, 0x04, 0x6d, 0x4c, 0x26, 0x6a, 0x63, + 0xa4, 0x23, 0xa8, 0xca, 0x2e, 0x35, 0x7d, 0x3a, 0xd2, 0x61, 0x11, 0x86, 0xc2, 0xf3, 0xd1, 0x3e, + 0x94, 0x47, 0x9b, 0x1c, 0xee, 0xb1, 0x54, 0xbf, 0x77, 0x65, 0x8f, 0x46, 0xe6, 0x47, 0x9a, 0x20, + 0x49, 0x81, 0x95, 0x3d, 0xea, 0xa7, 0x2e, 0x92, 0xda, 0x49, 0x09, 0xa9, 0x9d, 0x94, 0xf4, 0x37, + 0x01, 0xaa, 0x9d, 0x7e, 0x77, 0x12, 0xde, 0x6b, 0xb8, 0x4a, 0xd9, 0x5b, 0xe6, 0x7f, 0xdb, 0x1b, + 0x7a, 0x09, 0xa5, 0x33, 0x8e, 0x89, 0xbf, 0x00, 0xc2, 0x53, 0x1f, 0xef, 0x0e, 0x76, 0xd9, 0x23, + 0xe1, 0xb5, 0xe9, 0xbd, 0x25, 0x10, 0xa8, 0xb3, 0x6f, 0x69, 0x1f, 0xaa, 0x3b, 0xf4, 0x84, 0xbe, + 0xff, 0x86, 0xa4, 0x7f, 0x09, 0xb0, 0xaa, 0x51, 0xd3, 0xb5, 0x3e, 0x1d, 0x71, 0xe5, 0x45, 0xbe, + 0xee, 0x42, 0x69, 0xf8, 0x34, 0x88, 0x0a, 0x14, 0x0c, 0xde, 0x06, 0x5e, 0xea, 0xe3, 0x20, 0x73, + 0xe9, 0xe3, 0x20, 0xde, 0xd0, 0xbe, 0x80, 0x3c, 0x6b, 0x48, 0xbc, 0x4a, 0x6e, 0x2d, 0x3b, 0x65, + 0xe3, 0x18, 0x18, 0xb0, 0xd2, 0xd6, 0x37, 0x8f, 0xa9, 0xe1, 0xf7, 0xde, 0x52, 0x27, 0x7a, 0x56, + 0x30, 0x89, 0xce, 0x04, 0x68, 0x15, 0xf8, 0xc0, 0xf0, 0xec, 0xcf, 0x82, 0xb7, 0x45, 0x9e, 0xcc, + 0x30, 0x81, 0x66, 0x7f, 0x46, 0xa5, 0xaf, 0x05, 0xb8, 0x95, 0xbe, 0x69, 0xaf, 0xdf, 0x73, 0x3c, + 0x8a, 0x7e, 0x0a, 0x0b, 0xa3, 0x0c, 0x06, 0x3b, 0x9f, 0xea, 0x9c, 0xcb, 0x23, 0x14, 0x7b, 0xe8, + 0x11, 0x2c, 0x38, 0xf4, 0x9d, 0x6f, 0xc4, 0xd0, 0x06, 0xfc, 0xcc, 0x33, 0x71, 0x3b, 0x42, 0x2c, + 0x1d, 0xc2, 0x4a, 0x32, 0xa8, 0xa2, 0x43, 0x78, 0x05, 0x30, 0x74, 0x1a, 0x46, 0xd3, 0x9d, 0xcb, + 0x91, 0x90, 0x98, 0x85, 0xf4, 0x85, 0x00, 0xb7, 0xb7, 0x4d, 0xdf, 0xfa, 0x34, 0xb9, 0xc0, 0xe0, + 0x98, 0x3f, 0x86, 0x52, 0xec, 0x39, 0x1b, 0x6e, 0xf6, 0xaa, 0x25, 0xe2, 0x26, 0xec, 0x3c, 0xdc, + 0xc0, 0x59, 0xec, 0x0d, 0x19, 0x4a, 0x70, 0x57, 0xfa, 0x4e, 0x80, 0x3b, 0x93, 0x20, 0x84, 0xa4, + 0x13, 0x28, 0x52, 0xc7, 0x77, 0x6d, 0x1a, 0xad, 0xff, 0x22, 0x6d, 0xfd, 0xcb, 0x9d, 0xd4, 0x82, + 0xce, 0x3c, 0x72, 0x54, 0xf5, 0x20, 0x1f, 0x34, 0xe6, 0x1b, 0x50, 0x08, 0x1e, 0xc5, 0x21, 0x7d, + 0x28, 0xf2, 0xed, 0xf6, 0xad, 0x9a, 0xc6, 0x67, 0x48, 0xa8, 0x91, 0xa0, 0x3b, 0x73, 0x6d, 0xba, + 0x5f, 0xc2, 0xd2, 0x48, 0xda, 0x8a, 0x48, 0xbe, 0x0f, 0xb1, 0x1c, 0x30, 0x8c, 0xc9, 0xb9, 0xa1, + 0x10, 0x77, 0xa5, 0x3f, 0x09, 0xb0, 0x92, 0x4c, 0x56, 0xd7, 0x71, 0xf0, 0xbe, 0xe8, 0xdf, 0x2f, + 0x31, 0xbd, 0x82, 0x95, 0x64, 0x62, 0xba, 0xd6, 0xee, 0x7f, 0x93, 0x81, 0x4a, 0x32, 0x32, 0x07, + 0x97, 0xf4, 0x09, 0xa0, 0xb1, 0xbc, 0x16, 0xa5, 0x24, 0x31, 0x91, 0xd8, 0x3c, 0x74, 0x3f, 0xf1, + 0x56, 0xe6, 0x57, 0x72, 0xff, 0xc6, 0xe8, 0x6b, 0xf9, 0xf1, 0xd8, 0x6b, 0x39, 0x1b, 0xaa, 0x4d, + 0x7a, 0x2f, 0xe7, 0x52, 0x2a, 0x6b, 0x7e, 0x58, 0xc6, 0x47, 0xd3, 0x52, 0xe1, 0xd2, 0xb4, 0x54, + 0x1c, 0x4d, 0x4b, 0xdb, 0x25, 0x98, 0x1d, 0x2c, 0x2a, 0xfd, 0x5a, 0x80, 0x0f, 0x53, 0x98, 0x08, + 0x63, 0xe5, 0xfd, 0xe3, 0x75, 0xca, 0xb4, 0xb4, 0x41, 0xa1, 0x3c, 0x9a, 0x80, 0x59, 0x5f, 0xda, + 0x50, 0xd5, 0x96, 0xce, 0x7b, 0x39, 0x23, 0xe5, 0x7d, 0x50, 0x82, 0xe2, 0x9e, 0xa2, 0x2a, 0x04, + 0xcb, 0xa2, 0xc0, 0x06, 0x07, 0x0d, 0x82, 0x1b, 0x2a, 0xeb, 0xc9, 0x67, 0x20, 0xc7, 0x66, 0xc4, + 0x2c, 0x7f, 0x43, 0x90, 0x86, 0xaa, 0xc9, 0x04, 0xb7, 0x75, 0x31, 0x57, 0xff, 0x6a, 0x1e, 0x6e, + 0xc6, 0xf3, 0xa8, 0x7b, 0x6e, 0x5b, 0xf4, 0xe0, 0x19, 0xfa, 0x56, 0x80, 0x9b, 0x29, 0xbd, 0x06, + 0xaa, 0xa5, 0xed, 0x75, 0x72, 0x53, 0x52, 0xbd, 0x3a, 0x71, 0x4b, 0x1b, 0x5f, 0xfe, 0xfd, 0x9f, + 0xdf, 0x64, 0x1e, 0x48, 0x28, 0xf1, 0x73, 0x20, 0xf5, 0xbd, 0xad, 0x44, 0xd5, 0x47, 0x5f, 0x0b, + 0x20, 0x26, 0x5b, 0x13, 0x94, 0xfa, 0x23, 0xd4, 0x84, 0x06, 0x66, 0x1a, 0x40, 0x35, 0x0e, 0x68, + 0x1d, 0x3d, 0x1a, 0x07, 0xb4, 0xf9, 0xab, 0xb1, 0x48, 0xf8, 0x1c, 0xfd, 0x51, 0x80, 0x9b, 0x29, + 0x7d, 0x4e, 0x3a, 0x57, 0x93, 0x1b, 0xa2, 0x69, 0xa0, 0xbd, 0xe2, 0xd0, 0x5e, 0x54, 0xa7, 0x84, + 0x36, 0xc6, 0xdf, 0x6f, 0x05, 0xb8, 0x99, 0xd2, 0xc1, 0xa4, 0x43, 0x9d, 0xdc, 0xea, 0x54, 0x97, + 0xc7, 0xf2, 0x92, 0x72, 0xda, 0xf7, 0x2f, 0x22, 0xea, 0x36, 0xa6, 0xa5, 0xee, 0x77, 0x02, 0x2c, + 0xa5, 0x75, 0x04, 0x68, 0x33, 0x0d, 0xd0, 0x25, 0x0d, 0x53, 0xf5, 0xa3, 0xe9, 0x0d, 0x82, 0x58, + 0x96, 0x1e, 0x70, 0xac, 0x77, 0xa4, 0x0f, 0x53, 0xb0, 0x7a, 0xdc, 0x70, 0x4b, 0xd8, 0x40, 0x5f, + 0x09, 0x20, 0x26, 0xef, 0x77, 0xfa, 0x8d, 0x9b, 0xd0, 0x45, 0x54, 0xaf, 0x48, 0x0f, 0xd2, 0x7d, + 0x8e, 0xe3, 0xb6, 0xb4, 0x90, 0xc0, 0xb1, 0x15, 0xaf, 0x0e, 0x7f, 0x10, 0x60, 0x39, 0xbd, 0x04, + 0xa3, 0x67, 0xd7, 0x29, 0xd7, 0x01, 0xa4, 0xfa, 0xf5, 0x2b, 0xbc, 0xf4, 0x88, 0xc3, 0x5c, 0x93, + 0x56, 0x93, 0x30, 0xdf, 0x0c, 0xed, 0x18, 0x61, 0x5f, 0x0a, 0x30, 0x3f, 0x12, 0x7c, 0x68, 0xfd, + 0xca, 0xf8, 0x9c, 0x96, 0xaa, 0xc7, 0x1c, 0xc3, 0x3d, 0x74, 0x37, 0x81, 0x61, 0xe4, 0x6e, 0xb1, + 0x7b, 0xf5, 0xad, 0x00, 0x62, 0x32, 0xd2, 0xd2, 0x4f, 0x6d, 0x42, 0xcd, 0xbf, 0x12, 0xca, 0x73, + 0x0e, 0xe5, 0x69, 0xf5, 0x2a, 0x28, 0x23, 0xa7, 0xf8, 0x85, 0x00, 0x62, 0x32, 0xaa, 0xd2, 0x61, + 0x4d, 0xa8, 0xe6, 0x13, 0x03, 0x2f, 0x64, 0x66, 0xe3, 0x4a, 0x66, 0xbe, 0x11, 0x60, 0x71, 0xac, + 0xbe, 0xa1, 0x27, 0xd3, 0x44, 0xcf, 0xe0, 0xfa, 0x3c, 0x9d, 0x52, 0x3b, 0xbc, 0x39, 0xf7, 0x38, + 0xb6, 0x55, 0x69, 0x39, 0x89, 0x6d, 0x10, 0x65, 0xdb, 0x6f, 0x61, 0xd9, 0xea, 0x9d, 0xa6, 0xb8, + 0xdd, 0x16, 0x63, 0x1e, 0xdb, 0x6c, 0xcf, 0x6d, 0xe1, 0xe7, 0x5b, 0x91, 0x5e, 0xef, 0xc4, 0x74, + 0x8e, 0x6b, 0x3d, 0xf7, 0x78, 0xf3, 0x98, 0x3a, 0x9c, 0x91, 0xcd, 0x60, 0xca, 0xec, 0xdb, 0x5e, + 0xfc, 0x6f, 0xa6, 0x97, 0xd1, 0xf7, 0xbf, 0x05, 0xe1, 0x4d, 0x81, 0x6b, 0x3e, 0xff, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xf4, 0x94, 0x75, 0x44, 0x8f, 0x1a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/cigar.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/cigar.pb.go new file mode 100644 index 000000000..a45daa0b5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/cigar.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/cigar.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Describes the different types of CIGAR alignment operations that exist. +// Used wherever CIGAR alignments are used. +type CigarUnit_Operation int32 + +const ( + CigarUnit_OPERATION_UNSPECIFIED CigarUnit_Operation = 0 + // An alignment match indicates that a sequence can be aligned to the + // reference without evidence of an INDEL. Unlike the + // `SEQUENCE_MATCH` and `SEQUENCE_MISMATCH` operators, + // the `ALIGNMENT_MATCH` operator does not indicate whether the + // reference and read sequences are an exact match. This operator is + // equivalent to SAM's `M`. + CigarUnit_ALIGNMENT_MATCH CigarUnit_Operation = 1 + // The insert operator indicates that the read contains evidence of bases + // being inserted into the reference. This operator is equivalent to SAM's + // `I`. + CigarUnit_INSERT CigarUnit_Operation = 2 + // The delete operator indicates that the read contains evidence of bases + // being deleted from the reference. This operator is equivalent to SAM's + // `D`. + CigarUnit_DELETE CigarUnit_Operation = 3 + // The skip operator indicates that this read skips a long segment of the + // reference, but the bases have not been deleted. This operator is commonly + // used when working with RNA-seq data, where reads may skip long segments + // of the reference between exons. This operator is equivalent to SAM's + // `N`. + CigarUnit_SKIP CigarUnit_Operation = 4 + // The soft clip operator indicates that bases at the start/end of a read + // have not been considered during alignment. This may occur if the majority + // of a read maps, except for low quality bases at the start/end of a read. + // This operator is equivalent to SAM's `S`. Bases that are soft + // clipped will still be stored in the read. + CigarUnit_CLIP_SOFT CigarUnit_Operation = 5 + // The hard clip operator indicates that bases at the start/end of a read + // have been omitted from this alignment. This may occur if this linear + // alignment is part of a chimeric alignment, or if the read has been + // trimmed (for example, during error correction or to trim poly-A tails for + // RNA-seq). This operator is equivalent to SAM's `H`. + CigarUnit_CLIP_HARD CigarUnit_Operation = 6 + // The pad operator indicates that there is padding in an alignment. This + // operator is equivalent to SAM's `P`. + CigarUnit_PAD CigarUnit_Operation = 7 + // This operator indicates that this portion of the aligned sequence exactly + // matches the reference. This operator is equivalent to SAM's `=`. + CigarUnit_SEQUENCE_MATCH CigarUnit_Operation = 8 + // This operator indicates that this portion of the aligned sequence is an + // alignment match to the reference, but a sequence mismatch. This can + // indicate a SNP or a read error. This operator is equivalent to SAM's + // `X`. + CigarUnit_SEQUENCE_MISMATCH CigarUnit_Operation = 9 +) + +var CigarUnit_Operation_name = map[int32]string{ + 0: "OPERATION_UNSPECIFIED", + 1: "ALIGNMENT_MATCH", + 2: "INSERT", + 3: "DELETE", + 4: "SKIP", + 5: "CLIP_SOFT", + 6: "CLIP_HARD", + 7: "PAD", + 8: "SEQUENCE_MATCH", + 9: "SEQUENCE_MISMATCH", +} +var CigarUnit_Operation_value = map[string]int32{ + "OPERATION_UNSPECIFIED": 0, + "ALIGNMENT_MATCH": 1, + "INSERT": 2, + "DELETE": 3, + "SKIP": 4, + "CLIP_SOFT": 5, + "CLIP_HARD": 6, + "PAD": 7, + "SEQUENCE_MATCH": 8, + "SEQUENCE_MISMATCH": 9, +} + +func (x CigarUnit_Operation) String() string { + return proto.EnumName(CigarUnit_Operation_name, int32(x)) +} +func (CigarUnit_Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// A single CIGAR operation. +type CigarUnit struct { + Operation CigarUnit_Operation `protobuf:"varint,1,opt,name=operation,enum=google.genomics.v1.CigarUnit_Operation" json:"operation,omitempty"` + // The number of genomic bases that the operation runs for. Required. + OperationLength int64 `protobuf:"varint,2,opt,name=operation_length,json=operationLength" json:"operation_length,omitempty"` + // `referenceSequence` is only used at mismatches + // (`SEQUENCE_MISMATCH`) and deletions (`DELETE`). + // Filling this field replaces SAM's MD tag. If the relevant information is + // not available, this field is unset. + ReferenceSequence string `protobuf:"bytes,3,opt,name=reference_sequence,json=referenceSequence" json:"reference_sequence,omitempty"` +} + +func (m *CigarUnit) Reset() { *m = CigarUnit{} } +func (m *CigarUnit) String() string { return proto.CompactTextString(m) } +func (*CigarUnit) ProtoMessage() {} +func (*CigarUnit) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *CigarUnit) GetOperation() CigarUnit_Operation { + if m != nil { + return m.Operation + } + return CigarUnit_OPERATION_UNSPECIFIED +} + +func (m *CigarUnit) GetOperationLength() int64 { + if m != nil { + return m.OperationLength + } + return 0 +} + +func (m *CigarUnit) GetReferenceSequence() string { + if m != nil { + return m.ReferenceSequence + } + return "" +} + +func init() { + proto.RegisterType((*CigarUnit)(nil), "google.genomics.v1.CigarUnit") + proto.RegisterEnum("google.genomics.v1.CigarUnit_Operation", CigarUnit_Operation_name, CigarUnit_Operation_value) +} + +func init() { proto.RegisterFile("google/genomics/v1/cigar.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcf, 0x0e, 0x93, 0x30, + 0x1c, 0xb6, 0x63, 0x6e, 0xe3, 0x97, 0xb8, 0x75, 0x35, 0x33, 0xd3, 0x18, 0xb3, 0xec, 0xe2, 0x3c, + 0x08, 0x99, 0xde, 0xf4, 0xc4, 0xa0, 0x73, 0x8d, 0x0c, 0x10, 0xd8, 0xc5, 0x0b, 0x41, 0x52, 0x91, + 0x64, 0x6b, 0x11, 0x70, 0xaf, 0xe5, 0x1b, 0xf9, 0x1c, 0x1e, 0x0d, 0x30, 0x98, 0x89, 0xde, 0xbe, + 0x7e, 0xff, 0x9a, 0xfc, 0x3e, 0x78, 0x91, 0x4a, 0x99, 0x9e, 0xb9, 0x9e, 0x72, 0x21, 0x2f, 0x59, + 0x52, 0xea, 0xd7, 0xad, 0x9e, 0x64, 0x69, 0x5c, 0x68, 0x79, 0x21, 0x2b, 0x49, 0x48, 0xab, 0x6b, + 0x9d, 0xae, 0x5d, 0xb7, 0xcf, 0x9e, 0xdf, 0x32, 0x71, 0x9e, 0xe9, 0xb1, 0x10, 0xb2, 0x8a, 0xab, + 0x4c, 0x8a, 0xb2, 0x4d, 0xac, 0x7f, 0x0d, 0x40, 0x35, 0xeb, 0x86, 0x93, 0xc8, 0x2a, 0x42, 0x41, + 0x95, 0x39, 0x2f, 0x1a, 0xc7, 0x12, 0xad, 0xd0, 0x66, 0xfa, 0xe6, 0xa5, 0xf6, 0x6f, 0xa7, 0xd6, + 0x27, 0x34, 0xb7, 0xb3, 0xfb, 0xf7, 0x24, 0x79, 0x05, 0xb8, 0x7f, 0x44, 0x67, 0x2e, 0xd2, 0xea, + 0xdb, 0x72, 0xb0, 0x42, 0x1b, 0xc5, 0x9f, 0xf5, 0xbc, 0xdd, 0xd0, 0xe4, 0x35, 0x90, 0x82, 0x7f, + 0xe5, 0x05, 0x17, 0x09, 0x8f, 0x4a, 0xfe, 0xfd, 0x47, 0x0d, 0x96, 0xca, 0x0a, 0x6d, 0x54, 0x7f, + 0xde, 0x2b, 0xc1, 0x4d, 0x58, 0xff, 0x44, 0xa0, 0xf6, 0x5f, 0x92, 0xa7, 0xb0, 0x70, 0x3d, 0xea, + 0x1b, 0x21, 0x73, 0x9d, 0xe8, 0xe4, 0x04, 0x1e, 0x35, 0xd9, 0x9e, 0x51, 0x0b, 0x3f, 0x20, 0x8f, + 0x61, 0x66, 0xd8, 0xec, 0x83, 0x73, 0xa4, 0x4e, 0x18, 0x1d, 0x8d, 0xd0, 0x3c, 0x60, 0x44, 0x00, + 0x46, 0xcc, 0x09, 0xa8, 0x1f, 0xe2, 0x41, 0x8d, 0x2d, 0x6a, 0xd3, 0x90, 0x62, 0x85, 0x4c, 0x60, + 0x18, 0x7c, 0x64, 0x1e, 0x1e, 0x92, 0x47, 0xa0, 0x9a, 0x36, 0xf3, 0xa2, 0xc0, 0xdd, 0x87, 0xf8, + 0x61, 0xff, 0x3c, 0x18, 0xbe, 0x85, 0x47, 0x64, 0x0c, 0x8a, 0x67, 0x58, 0x78, 0x4c, 0x08, 0x4c, + 0x03, 0xfa, 0xe9, 0x44, 0x1d, 0x93, 0xde, 0xca, 0x27, 0x64, 0x01, 0xf3, 0x3b, 0xc7, 0x82, 0x96, + 0x56, 0x77, 0x1c, 0x9e, 0x24, 0xf2, 0xf2, 0x9f, 0x23, 0xee, 0xa0, 0xb9, 0xa2, 0x57, 0xcf, 0xe0, + 0xa1, 0xcf, 0xef, 0x3a, 0x87, 0x3c, 0xc7, 0x22, 0xd5, 0x64, 0x91, 0xd6, 0x2b, 0x37, 0x23, 0xe9, + 0xad, 0x14, 0xe7, 0x59, 0xf9, 0xf7, 0xf2, 0xef, 0x3b, 0xfc, 0x1b, 0xa1, 0x2f, 0xa3, 0xc6, 0xf9, + 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x98, 0xcc, 0xce, 0xde, 0x22, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/datasets.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/datasets.pb.go new file mode 100644 index 000000000..a871906f4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/datasets.pb.go @@ -0,0 +1,777 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/datasets.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf6 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Dataset is a collection of genomic data. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type Dataset struct { + // The server-generated dataset ID, unique across all datasets. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The Google Cloud project ID that this dataset belongs to. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The dataset name. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The time this dataset was created, in seconds from the epoch. + CreateTime *google_protobuf6.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` +} + +func (m *Dataset) Reset() { *m = Dataset{} } +func (m *Dataset) String() string { return proto.CompactTextString(m) } +func (*Dataset) ProtoMessage() {} +func (*Dataset) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Dataset) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Dataset) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Dataset) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Dataset) GetCreateTime() *google_protobuf6.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +// The dataset list request. +type ListDatasetsRequest struct { + // Required. The Google Cloud project ID to list datasets for. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 50. The maximum value is 1024. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListDatasetsRequest) Reset() { *m = ListDatasetsRequest{} } +func (m *ListDatasetsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatasetsRequest) ProtoMessage() {} +func (*ListDatasetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListDatasetsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListDatasetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatasetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The dataset list response. +type ListDatasetsResponse struct { + // The list of matching Datasets. + Datasets []*Dataset `protobuf:"bytes,1,rep,name=datasets" json:"datasets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDatasetsResponse) Reset() { *m = ListDatasetsResponse{} } +func (m *ListDatasetsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatasetsResponse) ProtoMessage() {} +func (*ListDatasetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *ListDatasetsResponse) GetDatasets() []*Dataset { + if m != nil { + return m.Datasets + } + return nil +} + +func (m *ListDatasetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateDatasetRequest struct { + // The dataset to be created. Must contain projectId and name. + Dataset *Dataset `protobuf:"bytes,1,opt,name=dataset" json:"dataset,omitempty"` +} + +func (m *CreateDatasetRequest) Reset() { *m = CreateDatasetRequest{} } +func (m *CreateDatasetRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatasetRequest) ProtoMessage() {} +func (*CreateDatasetRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *CreateDatasetRequest) GetDataset() *Dataset { + if m != nil { + return m.Dataset + } + return nil +} + +type UpdateDatasetRequest struct { + // The ID of the dataset to be updated. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The new dataset data. + Dataset *Dataset `protobuf:"bytes,2,opt,name=dataset" json:"dataset,omitempty"` + // An optional mask specifying which fields to update. At this time, the only + // mutable field is [name][google.genomics.v1.Dataset.name]. The only + // acceptable value is "name". If unspecified, all mutable fields will be + // updated. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateDatasetRequest) Reset() { *m = UpdateDatasetRequest{} } +func (m *UpdateDatasetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDatasetRequest) ProtoMessage() {} +func (*UpdateDatasetRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *UpdateDatasetRequest) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *UpdateDatasetRequest) GetDataset() *Dataset { + if m != nil { + return m.Dataset + } + return nil +} + +func (m *UpdateDatasetRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteDatasetRequest struct { + // The ID of the dataset to be deleted. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` +} + +func (m *DeleteDatasetRequest) Reset() { *m = DeleteDatasetRequest{} } +func (m *DeleteDatasetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDatasetRequest) ProtoMessage() {} +func (*DeleteDatasetRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *DeleteDatasetRequest) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +type UndeleteDatasetRequest struct { + // The ID of the dataset to be undeleted. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` +} + +func (m *UndeleteDatasetRequest) Reset() { *m = UndeleteDatasetRequest{} } +func (m *UndeleteDatasetRequest) String() string { return proto.CompactTextString(m) } +func (*UndeleteDatasetRequest) ProtoMessage() {} +func (*UndeleteDatasetRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *UndeleteDatasetRequest) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +type GetDatasetRequest struct { + // The ID of the dataset. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` +} + +func (m *GetDatasetRequest) Reset() { *m = GetDatasetRequest{} } +func (m *GetDatasetRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatasetRequest) ProtoMessage() {} +func (*GetDatasetRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *GetDatasetRequest) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func init() { + proto.RegisterType((*Dataset)(nil), "google.genomics.v1.Dataset") + proto.RegisterType((*ListDatasetsRequest)(nil), "google.genomics.v1.ListDatasetsRequest") + proto.RegisterType((*ListDatasetsResponse)(nil), "google.genomics.v1.ListDatasetsResponse") + proto.RegisterType((*CreateDatasetRequest)(nil), "google.genomics.v1.CreateDatasetRequest") + proto.RegisterType((*UpdateDatasetRequest)(nil), "google.genomics.v1.UpdateDatasetRequest") + proto.RegisterType((*DeleteDatasetRequest)(nil), "google.genomics.v1.DeleteDatasetRequest") + proto.RegisterType((*UndeleteDatasetRequest)(nil), "google.genomics.v1.UndeleteDatasetRequest") + proto.RegisterType((*GetDatasetRequest)(nil), "google.genomics.v1.GetDatasetRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DatasetServiceV1 service + +type DatasetServiceV1Client interface { + // Lists datasets within a project. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error) + // Creates a new dataset. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) + // Gets a dataset by ID. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) + // Updates a dataset. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateDataset(ctx context.Context, in *UpdateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) + // Deletes a dataset and all of its contents (all read group sets, + // reference sets, variant sets, call sets, annotation sets, etc.) + // This is reversible (up to one week after the deletion) via + // the + // [datasets.undelete][google.genomics.v1.DatasetServiceV1.UndeleteDataset] + // operation. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteDataset(ctx context.Context, in *DeleteDatasetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Undeletes a dataset by restoring a dataset which was deleted via this API. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This operation is only possible for a week after the deletion occurred. + UndeleteDataset(ctx context.Context, in *UndeleteDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) + // Sets the access control policy on the specified dataset. Replaces any + // existing policy. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // See Setting a + // Policy for more information. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for the dataset. This is empty if the + // policy or resource does not exist. + // + // See Getting a + // Policy for more information. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // See Testing + // Permissions for more information. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type datasetServiceV1Client struct { + cc *grpc.ClientConn +} + +func NewDatasetServiceV1Client(cc *grpc.ClientConn) DatasetServiceV1Client { + return &datasetServiceV1Client{cc} +} + +func (c *datasetServiceV1Client) ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error) { + out := new(ListDatasetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/ListDatasets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) { + out := new(Dataset) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/CreateDataset", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) { + out := new(Dataset) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/GetDataset", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) UpdateDataset(ctx context.Context, in *UpdateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) { + out := new(Dataset) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/UpdateDataset", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) DeleteDataset(ctx context.Context, in *DeleteDatasetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/DeleteDataset", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) UndeleteDataset(ctx context.Context, in *UndeleteDatasetRequest, opts ...grpc.CallOption) (*Dataset, error) { + out := new(Dataset) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/UndeleteDataset", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *datasetServiceV1Client) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.DatasetServiceV1/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DatasetServiceV1 service + +type DatasetServiceV1Server interface { + // Lists datasets within a project. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + ListDatasets(context.Context, *ListDatasetsRequest) (*ListDatasetsResponse, error) + // Creates a new dataset. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateDataset(context.Context, *CreateDatasetRequest) (*Dataset, error) + // Gets a dataset by ID. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetDataset(context.Context, *GetDatasetRequest) (*Dataset, error) + // Updates a dataset. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateDataset(context.Context, *UpdateDatasetRequest) (*Dataset, error) + // Deletes a dataset and all of its contents (all read group sets, + // reference sets, variant sets, call sets, annotation sets, etc.) + // This is reversible (up to one week after the deletion) via + // the + // [datasets.undelete][google.genomics.v1.DatasetServiceV1.UndeleteDataset] + // operation. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteDataset(context.Context, *DeleteDatasetRequest) (*google_protobuf1.Empty, error) + // Undeletes a dataset by restoring a dataset which was deleted via this API. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This operation is only possible for a week after the deletion occurred. + UndeleteDataset(context.Context, *UndeleteDatasetRequest) (*Dataset, error) + // Sets the access control policy on the specified dataset. Replaces any + // existing policy. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // See Setting a + // Policy for more information. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for the dataset. This is empty if the + // policy or resource does not exist. + // + // See Getting a + // Policy for more information. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // See Testing + // Permissions for more information. + // + // For the definitions of datasets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterDatasetServiceV1Server(s *grpc.Server, srv DatasetServiceV1Server) { + s.RegisterService(&_DatasetServiceV1_serviceDesc, srv) +} + +func _DatasetServiceV1_ListDatasets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatasetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).ListDatasets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/ListDatasets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).ListDatasets(ctx, req.(*ListDatasetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_CreateDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).CreateDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/CreateDataset", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).CreateDataset(ctx, req.(*CreateDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_GetDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).GetDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/GetDataset", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).GetDataset(ctx, req.(*GetDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_UpdateDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).UpdateDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/UpdateDataset", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).UpdateDataset(ctx, req.(*UpdateDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_DeleteDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).DeleteDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/DeleteDataset", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).DeleteDataset(ctx, req.(*DeleteDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_UndeleteDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).UndeleteDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/UndeleteDataset", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).UndeleteDataset(ctx, req.(*UndeleteDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatasetServiceV1_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatasetServiceV1Server).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.DatasetServiceV1/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatasetServiceV1Server).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatasetServiceV1_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.DatasetServiceV1", + HandlerType: (*DatasetServiceV1Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListDatasets", + Handler: _DatasetServiceV1_ListDatasets_Handler, + }, + { + MethodName: "CreateDataset", + Handler: _DatasetServiceV1_CreateDataset_Handler, + }, + { + MethodName: "GetDataset", + Handler: _DatasetServiceV1_GetDataset_Handler, + }, + { + MethodName: "UpdateDataset", + Handler: _DatasetServiceV1_UpdateDataset_Handler, + }, + { + MethodName: "DeleteDataset", + Handler: _DatasetServiceV1_DeleteDataset_Handler, + }, + { + MethodName: "UndeleteDataset", + Handler: _DatasetServiceV1_UndeleteDataset_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _DatasetServiceV1_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _DatasetServiceV1_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _DatasetServiceV1_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1/datasets.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1/datasets.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 786 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xd1, 0x4e, 0x13, 0x4d, + 0x14, 0xce, 0x16, 0xfe, 0x1f, 0x7a, 0xa0, 0xa0, 0x63, 0xc5, 0xda, 0x8a, 0x96, 0x8d, 0x42, 0xad, + 0xba, 0x4d, 0x6b, 0x08, 0x49, 0x89, 0x37, 0x88, 0x12, 0x12, 0x49, 0x9a, 0x02, 0x5e, 0x78, 0xd3, + 0x0c, 0xdd, 0xa1, 0x8e, 0x74, 0x77, 0xd6, 0x9d, 0x29, 0x28, 0xc8, 0x0d, 0x77, 0x5c, 0xfb, 0x00, + 0x26, 0xde, 0xf9, 0x3c, 0xbe, 0x82, 0x0f, 0xe1, 0xa5, 0x99, 0xd9, 0xd9, 0x76, 0xdb, 0x2e, 0x05, + 0x8c, 0x77, 0xdb, 0x73, 0xbe, 0x73, 0xbe, 0xef, 0xcc, 0xf9, 0x76, 0xba, 0xb0, 0xd0, 0x62, 0xac, + 0xd5, 0x26, 0xa5, 0x16, 0x71, 0x99, 0x43, 0x9b, 0xbc, 0x74, 0x58, 0x2e, 0xd9, 0x58, 0x60, 0x4e, + 0x04, 0xb7, 0x3c, 0x9f, 0x09, 0x86, 0x50, 0x00, 0xb1, 0x42, 0x88, 0x75, 0x58, 0xce, 0xde, 0xd3, + 0x65, 0xd8, 0xa3, 0x25, 0xec, 0xba, 0x4c, 0x60, 0x41, 0x99, 0xab, 0x2b, 0xb2, 0xf7, 0x75, 0x96, + 0x62, 0x47, 0xf6, 0xa3, 0xd8, 0x69, 0x78, 0xac, 0x4d, 0x9b, 0x9f, 0x75, 0x3e, 0xdb, 0x9f, 0xef, + 0xcb, 0xe5, 0x74, 0x4e, 0xfd, 0xda, 0xeb, 0xec, 0x97, 0x88, 0xe3, 0x89, 0x30, 0x99, 0x1f, 0x4c, + 0xee, 0x53, 0xd2, 0xb6, 0x1b, 0x0e, 0xe6, 0x07, 0x1a, 0xf1, 0x60, 0x10, 0x21, 0xa8, 0x43, 0xb8, + 0xc0, 0x8e, 0x17, 0x00, 0xcc, 0x73, 0x03, 0x26, 0xd6, 0x83, 0x01, 0xd1, 0x0c, 0x24, 0xa8, 0x9d, + 0x31, 0xf2, 0x46, 0x21, 0x59, 0x4f, 0x50, 0x1b, 0xcd, 0x03, 0x78, 0x3e, 0xfb, 0x40, 0x9a, 0xa2, + 0x41, 0xed, 0x4c, 0x42, 0xc5, 0x93, 0x3a, 0xb2, 0x69, 0x23, 0x04, 0xe3, 0x2e, 0x76, 0x48, 0x66, + 0x4c, 0x25, 0xd4, 0x33, 0x5a, 0x85, 0xa9, 0xa6, 0x4f, 0xb0, 0x20, 0x0d, 0x49, 0x94, 0x19, 0xcf, + 0x1b, 0x85, 0xa9, 0x4a, 0xd6, 0xd2, 0x47, 0x16, 0xaa, 0xb0, 0x76, 0x42, 0x15, 0x75, 0x08, 0xe0, + 0x32, 0x60, 0x7a, 0x70, 0xeb, 0x0d, 0xe5, 0x42, 0xcb, 0xe1, 0x75, 0xf2, 0xb1, 0x43, 0xb8, 0x18, + 0x90, 0x61, 0x0c, 0xca, 0xc8, 0x41, 0xd2, 0xc3, 0x2d, 0xd2, 0xe0, 0xf4, 0x98, 0x28, 0x91, 0xff, + 0xd5, 0x27, 0x65, 0x60, 0x9b, 0x1e, 0x13, 0x55, 0x2b, 0x93, 0x82, 0x1d, 0x10, 0x57, 0x2b, 0x55, + 0xf0, 0x1d, 0x19, 0x30, 0x8f, 0x20, 0xdd, 0xcf, 0xc8, 0x3d, 0xe6, 0x72, 0x82, 0x56, 0x60, 0x32, + 0xdc, 0x7a, 0xc6, 0xc8, 0x8f, 0x15, 0xa6, 0x2a, 0x39, 0x6b, 0x78, 0xed, 0x96, 0xae, 0xab, 0x77, + 0xc1, 0x68, 0x11, 0x66, 0x5d, 0xf2, 0x49, 0x34, 0x22, 0xa4, 0xc1, 0xb9, 0xa5, 0x64, 0xb8, 0xd6, + 0x25, 0xde, 0x82, 0xf4, 0x4b, 0x35, 0x78, 0xd8, 0x42, 0xcf, 0xba, 0x0c, 0x13, 0xba, 0x97, 0x1a, + 0xf4, 0x12, 0xde, 0x10, 0x6b, 0xfe, 0x30, 0x20, 0xbd, 0xeb, 0xd9, 0xc3, 0xfd, 0xe6, 0x01, 0x34, + 0x26, 0x72, 0x76, 0x3a, 0xb2, 0x69, 0x47, 0xe9, 0x12, 0x57, 0xa7, 0x93, 0x5b, 0xee, 0x28, 0x36, + 0x65, 0x35, 0x75, 0xac, 0x71, 0x5b, 0x7e, 0x2d, 0xdd, 0xb8, 0x85, 0xf9, 0x41, 0x1d, 0x02, 0xb8, + 0x7c, 0x36, 0x97, 0x21, 0xbd, 0x4e, 0xda, 0xe4, 0x9a, 0x52, 0xcd, 0x15, 0x98, 0xdb, 0x75, 0xed, + 0xbf, 0x28, 0xac, 0xc0, 0xcd, 0x0d, 0x22, 0xae, 0x55, 0x53, 0xf9, 0x96, 0x84, 0x1b, 0xba, 0x62, + 0x9b, 0xf8, 0x87, 0xb4, 0x49, 0xde, 0x96, 0xd1, 0x11, 0x4c, 0x47, 0xcd, 0x82, 0x96, 0xe2, 0xce, + 0x2a, 0xc6, 0xc0, 0xd9, 0xc2, 0xe5, 0xc0, 0xc0, 0x77, 0x66, 0xfa, 0xec, 0xe7, 0xaf, 0xaf, 0x89, + 0x19, 0x34, 0x1d, 0xbd, 0x77, 0x50, 0x07, 0x52, 0x7d, 0x66, 0x41, 0xb1, 0x0d, 0xe3, 0xfc, 0x94, + 0x1d, 0xb5, 0x4f, 0x73, 0x5e, 0xb1, 0xdd, 0x31, 0xfb, 0xd8, 0xaa, 0xdd, 0x2d, 0x73, 0x80, 0xde, + 0xc1, 0xa1, 0x47, 0x71, 0x9d, 0x86, 0x0e, 0x76, 0x34, 0xe1, 0x82, 0x22, 0xcc, 0xa1, 0xbb, 0x51, + 0xc2, 0xd2, 0x49, 0x6f, 0x13, 0xa7, 0xe8, 0xcc, 0x80, 0x54, 0x9f, 0x93, 0xe3, 0x87, 0x8d, 0x33, + 0xfb, 0x68, 0xee, 0xa2, 0xe2, 0x7e, 0x58, 0xb9, 0x98, 0xbb, 0x37, 0xb9, 0x80, 0x54, 0x9f, 0x45, + 0xe3, 0x35, 0xc4, 0xb9, 0x38, 0x3b, 0x37, 0xf4, 0x16, 0xbc, 0x92, 0x17, 0x76, 0x38, 0x7a, 0x71, + 0xc4, 0xe8, 0xe7, 0x06, 0xcc, 0x0e, 0x58, 0x1c, 0x15, 0x63, 0x87, 0x8f, 0x7d, 0x0f, 0x46, 0x8f, + 0xff, 0x4c, 0xf1, 0x2f, 0x99, 0xe6, 0xc5, 0xe3, 0x77, 0x74, 0xdb, 0xaa, 0x51, 0x44, 0x5f, 0x60, + 0x7a, 0x9b, 0x88, 0x4d, 0xec, 0xd4, 0xd4, 0x9f, 0x11, 0x32, 0xc3, 0xde, 0x14, 0x3b, 0xb2, 0x6d, + 0x34, 0x19, 0xf2, 0xdf, 0x1e, 0xc0, 0x04, 0x59, 0xb3, 0xac, 0x98, 0x9f, 0x98, 0x8b, 0x92, 0xf9, + 0xc4, 0x27, 0x9c, 0x75, 0xfc, 0x26, 0x79, 0xd1, 0xd5, 0x50, 0x3c, 0xad, 0xf2, 0x48, 0x37, 0xcd, + 0xbe, 0x31, 0x8a, 0x7d, 0xe3, 0x9f, 0xb2, 0xb7, 0x06, 0xd8, 0xbf, 0x1b, 0x80, 0x76, 0x08, 0x57, + 0x41, 0xe2, 0x3b, 0x94, 0x73, 0xf9, 0x5f, 0xde, 0xf3, 0x80, 0x26, 0x18, 0x86, 0x84, 0x52, 0x1e, + 0x5f, 0x01, 0xa9, 0x5f, 0xf8, 0x15, 0x25, 0xaf, 0x6c, 0x3e, 0xbd, 0x58, 0x9e, 0x18, 0xaa, 0xae, + 0x1a, 0xc5, 0xb5, 0xf7, 0x30, 0xd7, 0x64, 0x4e, 0xcc, 0xc6, 0xd7, 0x52, 0xe1, 0xad, 0x52, 0x93, + 0x0e, 0xac, 0x19, 0xef, 0xaa, 0x21, 0x88, 0xb5, 0xb1, 0xdb, 0xb2, 0x98, 0xdf, 0x92, 0x9f, 0x37, + 0xca, 0x9f, 0xa5, 0x20, 0x85, 0x3d, 0xca, 0xa3, 0x9f, 0x3c, 0xab, 0xe1, 0xf3, 0x6f, 0xc3, 0xd8, + 0xfb, 0x5f, 0x21, 0x9f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x87, 0x48, 0x07, 0xbb, 0x1b, 0x09, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/operations.pb.go new file mode 100644 index 000000000..194e90170 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/operations.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/operations.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf5 "github.com/golang/protobuf/ptypes/any" +import google_protobuf6 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Metadata describing an [Operation][google.longrunning.Operation]. +type OperationMetadata struct { + // The Google Cloud Project in which the job is scoped. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The time at which the job was submitted to the Genomics service. + CreateTime *google_protobuf6.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The time at which the job began to run. + StartTime *google_protobuf6.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time at which the job stopped running. + EndTime *google_protobuf6.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // The original request that started the operation. Note that this will be in + // current version of the API. If the operation was started with v1beta2 API + // and a GetOperation is performed on v1 API, a v1 request will be returned. + Request *google_protobuf5.Any `protobuf:"bytes,5,opt,name=request" json:"request,omitempty"` + // Optional event messages that were generated during the job's execution. + // This also contains any warnings that were generated during import + // or export. + Events []*OperationEvent `protobuf:"bytes,6,rep,name=events" json:"events,omitempty"` + // This field is deprecated. Use `labels` instead. Optionally provided by the + // caller when submitting the request that creates the operation. + ClientId string `protobuf:"bytes,7,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + // Runtime metadata on this Operation. + RuntimeMetadata *google_protobuf5.Any `protobuf:"bytes,8,opt,name=runtime_metadata,json=runtimeMetadata" json:"runtime_metadata,omitempty"` + // Optionally provided by the caller when submitting the request that creates + // the operation. + Labels map[string]string `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *OperationMetadata) Reset() { *m = OperationMetadata{} } +func (m *OperationMetadata) String() string { return proto.CompactTextString(m) } +func (*OperationMetadata) ProtoMessage() {} +func (*OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *OperationMetadata) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *OperationMetadata) GetCreateTime() *google_protobuf6.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *OperationMetadata) GetStartTime() *google_protobuf6.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *OperationMetadata) GetEndTime() *google_protobuf6.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *OperationMetadata) GetRequest() *google_protobuf5.Any { + if m != nil { + return m.Request + } + return nil +} + +func (m *OperationMetadata) GetEvents() []*OperationEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *OperationMetadata) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *OperationMetadata) GetRuntimeMetadata() *google_protobuf5.Any { + if m != nil { + return m.RuntimeMetadata + } + return nil +} + +func (m *OperationMetadata) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// An event that occurred during an [Operation][google.longrunning.Operation]. +type OperationEvent struct { + // Optional time of when event started. + StartTime *google_protobuf6.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Optional time of when event finished. An event can have a start time and no + // finish time. If an event has a finish time, there must be a start time. + EndTime *google_protobuf6.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Required description of event. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *OperationEvent) Reset() { *m = OperationEvent{} } +func (m *OperationEvent) String() string { return proto.CompactTextString(m) } +func (*OperationEvent) ProtoMessage() {} +func (*OperationEvent) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *OperationEvent) GetStartTime() *google_protobuf6.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *OperationEvent) GetEndTime() *google_protobuf6.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *OperationEvent) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*OperationMetadata)(nil), "google.genomics.v1.OperationMetadata") + proto.RegisterType((*OperationEvent)(nil), "google.genomics.v1.OperationEvent") +} + +func init() { proto.RegisterFile("google/genomics/v1/operations.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 456 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0xe5, 0x76, 0x6b, 0x9b, 0x17, 0x89, 0x0d, 0x6b, 0x42, 0xa1, 0x80, 0xa8, 0xca, 0xa5, + 0x27, 0x47, 0x1d, 0x42, 0x62, 0xdd, 0x01, 0x31, 0x69, 0x87, 0x4a, 0x20, 0xa6, 0x88, 0x13, 0x97, + 0xca, 0x4d, 0x1e, 0x51, 0x46, 0x62, 0x07, 0xdb, 0xad, 0xd4, 0xef, 0xc3, 0x17, 0xe0, 0xdb, 0x71, + 0x44, 0xb1, 0x9d, 0x2a, 0x6c, 0x68, 0x45, 0xdc, 0xec, 0xf7, 0xfe, 0x3f, 0xfb, 0x9f, 0xf7, 0x8f, + 0xe1, 0x55, 0x2e, 0x65, 0x5e, 0x62, 0x9c, 0xa3, 0x90, 0x55, 0x91, 0xea, 0x78, 0x3b, 0x8f, 0x65, + 0x8d, 0x8a, 0x9b, 0x42, 0x0a, 0xcd, 0x6a, 0x25, 0x8d, 0xa4, 0xd4, 0x89, 0x58, 0x2b, 0x62, 0xdb, + 0xf9, 0xf8, 0xb9, 0x07, 0x79, 0x5d, 0xc4, 0x5c, 0x08, 0x69, 0xba, 0xc4, 0xf8, 0xa9, 0xef, 0xda, + 0xdd, 0x7a, 0xf3, 0x35, 0xe6, 0x62, 0xe7, 0x5b, 0x2f, 0xef, 0xb6, 0x4c, 0x51, 0xa1, 0x36, 0xbc, + 0xaa, 0x9d, 0x60, 0xfa, 0xf3, 0x08, 0x1e, 0x7f, 0x6a, 0x2d, 0x7c, 0x44, 0xc3, 0x33, 0x6e, 0x38, + 0x7d, 0x01, 0x50, 0x2b, 0x79, 0x8b, 0xa9, 0x59, 0x15, 0x59, 0x44, 0x26, 0x64, 0x16, 0x24, 0x81, + 0xaf, 0x2c, 0x33, 0x7a, 0x09, 0x61, 0xaa, 0x90, 0x1b, 0x5c, 0x35, 0xc7, 0x45, 0xbd, 0x09, 0x99, + 0x85, 0xe7, 0x63, 0xe6, 0x8d, 0xb7, 0x77, 0xb1, 0xcf, 0xed, 0x5d, 0x09, 0x38, 0x79, 0x53, 0xa0, + 0x17, 0x00, 0xda, 0x70, 0x65, 0x1c, 0xdb, 0x3f, 0xc8, 0x06, 0x56, 0x6d, 0xd1, 0x37, 0x30, 0x42, + 0x91, 0x39, 0xf0, 0xe8, 0x20, 0x38, 0x44, 0x91, 0x59, 0x8c, 0xc1, 0x50, 0xe1, 0xf7, 0x0d, 0x6a, + 0x13, 0x1d, 0x5b, 0xea, 0xec, 0x1e, 0xf5, 0x5e, 0xec, 0x92, 0x56, 0x44, 0x17, 0x30, 0xc0, 0x2d, + 0x0a, 0xa3, 0xa3, 0xc1, 0xa4, 0x3f, 0x0b, 0xcf, 0xa7, 0xec, 0x7e, 0x24, 0x6c, 0x3f, 0xb4, 0xeb, + 0x46, 0x9a, 0x78, 0x82, 0x3e, 0x83, 0x20, 0x2d, 0x0b, 0x14, 0x76, 0x70, 0x43, 0x3b, 0xb8, 0x91, + 0x2b, 0x2c, 0x33, 0xfa, 0x0e, 0x4e, 0xd5, 0x46, 0x34, 0xf6, 0x57, 0x95, 0x1f, 0x75, 0x34, 0x7a, + 0xc0, 0xd1, 0x89, 0x57, 0xef, 0x73, 0x59, 0xc2, 0xa0, 0xe4, 0x6b, 0x2c, 0x75, 0x14, 0x58, 0x67, + 0xf3, 0x07, 0x9d, 0xb5, 0x18, 0xfb, 0x60, 0x99, 0x6b, 0x61, 0xd4, 0x2e, 0xf1, 0x07, 0x8c, 0x2f, + 0x20, 0xec, 0x94, 0xe9, 0x29, 0xf4, 0xbf, 0xe1, 0xce, 0x47, 0xdd, 0x2c, 0xe9, 0x19, 0x1c, 0x6f, + 0x79, 0xb9, 0x71, 0xf1, 0x06, 0x89, 0xdb, 0x2c, 0x7a, 0x6f, 0xc9, 0xf4, 0x07, 0x81, 0x47, 0x7f, + 0x7e, 0xfe, 0x9d, 0x50, 0xc9, 0xff, 0x86, 0xda, 0xfb, 0xf7, 0x50, 0x27, 0x10, 0x66, 0xa8, 0x53, + 0x55, 0xd4, 0x8d, 0x0b, 0xfb, 0x1f, 0x05, 0x49, 0xb7, 0x74, 0x75, 0x0b, 0x4f, 0x52, 0x59, 0xfd, + 0x65, 0x42, 0x57, 0x27, 0x7b, 0xf7, 0xfa, 0xa6, 0xb9, 0xe2, 0x86, 0x7c, 0x59, 0xb4, 0x32, 0x59, + 0x72, 0x91, 0x33, 0xa9, 0xf2, 0xe6, 0x95, 0x5a, 0x03, 0xb1, 0x6b, 0xf1, 0xba, 0xd0, 0xdd, 0x97, + 0x7b, 0xd9, 0xae, 0x7f, 0x11, 0xb2, 0x1e, 0x58, 0xe5, 0xeb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x6a, 0xf6, 0xa8, 0x9a, 0xe2, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/position.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/position.pb.go new file mode 100644 index 000000000..413c68445 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/position.pb.go @@ -0,0 +1,78 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/position.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// An abstraction for referring to a genomic position, in relation to some +// already known reference. For now, represents a genomic position as a +// reference name, a base number on that reference (0-based), and a +// determination of forward or reverse strand. +type Position struct { + // The name of the reference in whatever reference set is being used. + ReferenceName string `protobuf:"bytes,1,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The 0-based offset from the start of the forward strand for that reference. + Position int64 `protobuf:"varint,2,opt,name=position" json:"position,omitempty"` + // Whether this position is on the reverse strand, as opposed to the forward + // strand. + ReverseStrand bool `protobuf:"varint,3,opt,name=reverse_strand,json=reverseStrand" json:"reverse_strand,omitempty"` +} + +func (m *Position) Reset() { *m = Position{} } +func (m *Position) String() string { return proto.CompactTextString(m) } +func (*Position) ProtoMessage() {} +func (*Position) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *Position) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *Position) GetPosition() int64 { + if m != nil { + return m.Position + } + return 0 +} + +func (m *Position) GetReverseStrand() bool { + if m != nil { + return m.ReverseStrand + } + return false +} + +func init() { + proto.RegisterType((*Position)(nil), "google.genomics.v1.Position") +} + +func init() { proto.RegisterFile("google/genomics/v1/position.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0x41, 0x4b, 0x03, 0x31, + 0x14, 0x84, 0x89, 0x05, 0x59, 0x03, 0xf5, 0xb0, 0x07, 0x59, 0x8a, 0x87, 0x55, 0x10, 0xf6, 0x94, + 0x50, 0xbc, 0xe9, 0xad, 0x3f, 0x40, 0x96, 0x7a, 0xf3, 0x52, 0x9e, 0xeb, 0x33, 0x06, 0xba, 0xef, + 0x85, 0x24, 0xec, 0x6f, 0xf7, 0x28, 0x49, 0x9a, 0x22, 0xf4, 0x96, 0x4c, 0x66, 0x26, 0x1f, 0x23, + 0x1f, 0x0c, 0xb3, 0x39, 0xa2, 0x36, 0x48, 0x3c, 0xdb, 0x29, 0xe8, 0x65, 0xab, 0x1d, 0x07, 0x1b, + 0x2d, 0x93, 0x72, 0x9e, 0x23, 0xb7, 0x6d, 0xb1, 0xa8, 0x6a, 0x51, 0xcb, 0x76, 0x73, 0x7f, 0x8a, + 0x81, 0xb3, 0x1a, 0x88, 0x38, 0x42, 0x0a, 0x84, 0x92, 0x78, 0x8c, 0xb2, 0x19, 0x4f, 0x1d, 0xed, + 0x93, 0xbc, 0xf5, 0xf8, 0x8d, 0x1e, 0x69, 0xc2, 0x03, 0xc1, 0x8c, 0x9d, 0xe8, 0xc5, 0x70, 0xb3, + 0x5f, 0x9f, 0xd5, 0x37, 0x98, 0xb1, 0xdd, 0xc8, 0xa6, 0x7e, 0xdb, 0x5d, 0xf5, 0x62, 0x58, 0xed, + 0xcf, 0xf7, 0x52, 0xb1, 0xa0, 0x0f, 0x78, 0x08, 0xd1, 0x03, 0x7d, 0x75, 0xab, 0x5e, 0x0c, 0x4d, + 0xaa, 0xc8, 0xea, 0x7b, 0x16, 0x77, 0x3f, 0xf2, 0x6e, 0xe2, 0x59, 0x5d, 0xd2, 0xee, 0xd6, 0x95, + 0x66, 0x4c, 0x78, 0xa3, 0xf8, 0x78, 0xa9, 0x26, 0x3e, 0x02, 0x19, 0xc5, 0xde, 0xa4, 0x01, 0x32, + 0xbc, 0x2e, 0x4f, 0xe0, 0x6c, 0xf8, 0x3f, 0xca, 0x6b, 0x3d, 0xff, 0x0a, 0xf1, 0x79, 0x9d, 0x9d, + 0xcf, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0xc6, 0x22, 0xea, 0x3d, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/range.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/range.pb.go new file mode 100644 index 000000000..4062d6eaa --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/range.pb.go @@ -0,0 +1,75 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/range.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A 0-based half-open genomic coordinate range for search requests. +type Range struct { + // The reference sequence name, for example `chr1`, + // `1`, or `chrX`. + ReferenceName string `protobuf:"bytes,1,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The start position of the range on the reference, 0-based inclusive. + Start int64 `protobuf:"varint,2,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. + End int64 `protobuf:"varint,3,opt,name=end" json:"end,omitempty"` +} + +func (m *Range) Reset() { *m = Range{} } +func (m *Range) String() string { return proto.CompactTextString(m) } +func (*Range) ProtoMessage() {} +func (*Range) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func (m *Range) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *Range) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Range) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func init() { + proto.RegisterType((*Range)(nil), "google.genomics.v1.Range") +} + +func init() { proto.RegisterFile("google/genomics/v1/range.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x4f, 0x4d, 0x4b, 0xc4, 0x30, + 0x10, 0x25, 0x96, 0x15, 0x0c, 0x28, 0x12, 0x44, 0x8a, 0x88, 0x2c, 0x82, 0xb0, 0xa7, 0x84, 0xe2, + 0x4d, 0x6f, 0xfd, 0x01, 0x52, 0x7a, 0xf0, 0xe0, 0x45, 0xc6, 0x3a, 0x86, 0x40, 0x33, 0x53, 0x92, + 0xd0, 0xdf, 0xee, 0x51, 0x92, 0x58, 0x11, 0xf6, 0x36, 0x79, 0x1f, 0x79, 0xef, 0xc9, 0x3b, 0xcb, + 0x6c, 0x67, 0x34, 0x16, 0x89, 0xbd, 0x9b, 0xa2, 0x59, 0x3b, 0x13, 0x80, 0x2c, 0xea, 0x25, 0x70, + 0x62, 0xa5, 0x2a, 0xaf, 0x37, 0x5e, 0xaf, 0xdd, 0xcd, 0xed, 0xaf, 0x07, 0x16, 0x67, 0x80, 0x88, + 0x13, 0x24, 0xc7, 0x14, 0xab, 0xe3, 0xfe, 0x55, 0xee, 0xc6, 0xfc, 0x81, 0x7a, 0x90, 0x17, 0x01, + 0xbf, 0x30, 0x20, 0x4d, 0xf8, 0x4e, 0xe0, 0xb1, 0x15, 0x7b, 0x71, 0x38, 0x1b, 0xcf, 0xff, 0xd0, + 0x17, 0xf0, 0xa8, 0xae, 0xe4, 0x2e, 0x26, 0x08, 0xa9, 0x3d, 0xd9, 0x8b, 0x43, 0x33, 0xd6, 0x87, + 0xba, 0x94, 0x0d, 0xd2, 0x67, 0xdb, 0x14, 0x2c, 0x9f, 0x3d, 0xca, 0xeb, 0x89, 0xbd, 0x3e, 0xee, + 0xd3, 0xcb, 0x92, 0x37, 0xe4, 0xf4, 0x41, 0xbc, 0x3d, 0x6d, 0x0a, 0x9e, 0x81, 0xac, 0xe6, 0x60, + 0xf3, 0xb8, 0xd2, 0xcd, 0x54, 0x0a, 0x16, 0x17, 0xff, 0x0f, 0x7e, 0xde, 0xee, 0x6f, 0x21, 0x3e, + 0x4e, 0x8b, 0xf2, 0xf1, 0x27, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x3e, 0xf1, 0x62, 0x19, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/readalignment.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readalignment.pb.go new file mode 100644 index 000000000..f766a3007 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readalignment.pb.go @@ -0,0 +1,393 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/readalignment.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A linear alignment can be represented by one CIGAR string. Describes the +// mapped position and local alignment of the read to the reference. +type LinearAlignment struct { + // The position of this alignment. + Position *Position `protobuf:"bytes,1,opt,name=position" json:"position,omitempty"` + // The mapping quality of this alignment. Represents how likely + // the read maps to this position as opposed to other locations. + // + // Specifically, this is -10 log10 Pr(mapping position is wrong), rounded to + // the nearest integer. + MappingQuality int32 `protobuf:"varint,2,opt,name=mapping_quality,json=mappingQuality" json:"mapping_quality,omitempty"` + // Represents the local alignment of this sequence (alignment matches, indels, + // etc) against the reference. + Cigar []*CigarUnit `protobuf:"bytes,3,rep,name=cigar" json:"cigar,omitempty"` +} + +func (m *LinearAlignment) Reset() { *m = LinearAlignment{} } +func (m *LinearAlignment) String() string { return proto.CompactTextString(m) } +func (*LinearAlignment) ProtoMessage() {} +func (*LinearAlignment) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *LinearAlignment) GetPosition() *Position { + if m != nil { + return m.Position + } + return nil +} + +func (m *LinearAlignment) GetMappingQuality() int32 { + if m != nil { + return m.MappingQuality + } + return 0 +} + +func (m *LinearAlignment) GetCigar() []*CigarUnit { + if m != nil { + return m.Cigar + } + return nil +} + +// A read alignment describes a linear alignment of a string of DNA to a +// [reference sequence][google.genomics.v1.Reference], in addition to metadata +// about the fragment (the molecule of DNA sequenced) and the read (the bases +// which were read by the sequencer). A read is equivalent to a line in a SAM +// file. A read belongs to exactly one read group and exactly one +// [read group set][google.genomics.v1.ReadGroupSet]. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +// +// ### Reverse-stranded reads +// +// Mapped reads (reads having a non-null `alignment`) can be aligned to either +// the forward or the reverse strand of their associated reference. Strandedness +// of a mapped read is encoded by `alignment.position.reverseStrand`. +// +// If we consider the reference to be a forward-stranded coordinate space of +// `[0, reference.length)` with `0` as the left-most position and +// `reference.length` as the right-most position, reads are always aligned left +// to right. That is, `alignment.position.position` always refers to the +// left-most reference coordinate and `alignment.cigar` describes the alignment +// of this read to the reference from left to right. All per-base fields such as +// `alignedSequence` and `alignedQuality` share this same left-to-right +// orientation; this is true of reads which are aligned to either strand. For +// reverse-stranded reads, this means that `alignedSequence` is the reverse +// complement of the bases that were originally reported by the sequencing +// machine. +// +// ### Generating a reference-aligned sequence string +// +// When interacting with mapped reads, it's often useful to produce a string +// representing the local alignment of the read to reference. The following +// pseudocode demonstrates one way of doing this: +// +// out = "" +// offset = 0 +// for c in read.alignment.cigar { +// switch c.operation { +// case "ALIGNMENT_MATCH", "SEQUENCE_MATCH", "SEQUENCE_MISMATCH": +// out += read.alignedSequence[offset:offset+c.operationLength] +// offset += c.operationLength +// break +// case "CLIP_SOFT", "INSERT": +// offset += c.operationLength +// break +// case "PAD": +// out += repeat("*", c.operationLength) +// break +// case "DELETE": +// out += repeat("-", c.operationLength) +// break +// case "SKIP": +// out += repeat(" ", c.operationLength) +// break +// case "CLIP_HARD": +// break +// } +// } +// return out +// +// ### Converting to SAM's CIGAR string +// +// The following pseudocode generates a SAM CIGAR string from the +// `cigar` field. Note that this is a lossy conversion +// (`cigar.referenceSequence` is lost). +// +// cigarMap = { +// "ALIGNMENT_MATCH": "M", +// "INSERT": "I", +// "DELETE": "D", +// "SKIP": "N", +// "CLIP_SOFT": "S", +// "CLIP_HARD": "H", +// "PAD": "P", +// "SEQUENCE_MATCH": "=", +// "SEQUENCE_MISMATCH": "X", +// } +// cigarStr = "" +// for c in read.alignment.cigar { +// cigarStr += c.operationLength + cigarMap[c.operation] +// } +// return cigarStr +type Read struct { + // The server-generated read ID, unique across all reads. This is different + // from the `fragmentName`. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The ID of the read group this read belongs to. A read belongs to exactly + // one read group. This is a server-generated ID which is distinct from SAM's + // RG tag (for that value, see + // [ReadGroup.name][google.genomics.v1.ReadGroup.name]). + ReadGroupId string `protobuf:"bytes,2,opt,name=read_group_id,json=readGroupId" json:"read_group_id,omitempty"` + // The ID of the read group set this read belongs to. A read belongs to + // exactly one read group set. + ReadGroupSetId string `protobuf:"bytes,3,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` + // The fragment name. Equivalent to QNAME (query template name) in SAM. + FragmentName string `protobuf:"bytes,4,opt,name=fragment_name,json=fragmentName" json:"fragment_name,omitempty"` + // The orientation and the distance between reads from the fragment are + // consistent with the sequencing protocol (SAM flag 0x2). + ProperPlacement bool `protobuf:"varint,5,opt,name=proper_placement,json=properPlacement" json:"proper_placement,omitempty"` + // The fragment is a PCR or optical duplicate (SAM flag 0x400). + DuplicateFragment bool `protobuf:"varint,6,opt,name=duplicate_fragment,json=duplicateFragment" json:"duplicate_fragment,omitempty"` + // The observed length of the fragment, equivalent to TLEN in SAM. + FragmentLength int32 `protobuf:"varint,7,opt,name=fragment_length,json=fragmentLength" json:"fragment_length,omitempty"` + // The read number in sequencing. 0-based and less than numberReads. This + // field replaces SAM flag 0x40 and 0x80. + ReadNumber int32 `protobuf:"varint,8,opt,name=read_number,json=readNumber" json:"read_number,omitempty"` + // The number of reads in the fragment (extension to SAM flag 0x1). + NumberReads int32 `protobuf:"varint,9,opt,name=number_reads,json=numberReads" json:"number_reads,omitempty"` + // Whether this read did not pass filters, such as platform or vendor quality + // controls (SAM flag 0x200). + FailedVendorQualityChecks bool `protobuf:"varint,10,opt,name=failed_vendor_quality_checks,json=failedVendorQualityChecks" json:"failed_vendor_quality_checks,omitempty"` + // The linear alignment for this alignment record. This field is null for + // unmapped reads. + Alignment *LinearAlignment `protobuf:"bytes,11,opt,name=alignment" json:"alignment,omitempty"` + // Whether this alignment is secondary. Equivalent to SAM flag 0x100. + // A secondary alignment represents an alternative to the primary alignment + // for this read. Aligners may return secondary alignments if a read can map + // ambiguously to multiple coordinates in the genome. By convention, each read + // has one and only one alignment where both `secondaryAlignment` + // and `supplementaryAlignment` are false. + SecondaryAlignment bool `protobuf:"varint,12,opt,name=secondary_alignment,json=secondaryAlignment" json:"secondary_alignment,omitempty"` + // Whether this alignment is supplementary. Equivalent to SAM flag 0x800. + // Supplementary alignments are used in the representation of a chimeric + // alignment. In a chimeric alignment, a read is split into multiple + // linear alignments that map to different reference contigs. The first + // linear alignment in the read will be designated as the representative + // alignment; the remaining linear alignments will be designated as + // supplementary alignments. These alignments may have different mapping + // quality scores. In each linear alignment in a chimeric alignment, the read + // will be hard clipped. The `alignedSequence` and + // `alignedQuality` fields in the alignment record will only + // represent the bases for its respective linear alignment. + SupplementaryAlignment bool `protobuf:"varint,13,opt,name=supplementary_alignment,json=supplementaryAlignment" json:"supplementary_alignment,omitempty"` + // The bases of the read sequence contained in this alignment record, + // **without CIGAR operations applied** (equivalent to SEQ in SAM). + // `alignedSequence` and `alignedQuality` may be + // shorter than the full read sequence and quality. This will occur if the + // alignment is part of a chimeric alignment, or if the read was trimmed. When + // this occurs, the CIGAR for this read will begin/end with a hard clip + // operator that will indicate the length of the excised sequence. + AlignedSequence string `protobuf:"bytes,14,opt,name=aligned_sequence,json=alignedSequence" json:"aligned_sequence,omitempty"` + // The quality of the read sequence contained in this alignment record + // (equivalent to QUAL in SAM). + // `alignedSequence` and `alignedQuality` may be shorter than the full read + // sequence and quality. This will occur if the alignment is part of a + // chimeric alignment, or if the read was trimmed. When this occurs, the CIGAR + // for this read will begin/end with a hard clip operator that will indicate + // the length of the excised sequence. + AlignedQuality []int32 `protobuf:"varint,15,rep,packed,name=aligned_quality,json=alignedQuality" json:"aligned_quality,omitempty"` + // The mapping of the primary alignment of the + // `(readNumber+1)%numberReads` read in the fragment. It replaces + // mate position and mate strand in SAM. + NextMatePosition *Position `protobuf:"bytes,16,opt,name=next_mate_position,json=nextMatePosition" json:"next_mate_position,omitempty"` + // A map of additional read alignment information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,17,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Read) Reset() { *m = Read{} } +func (m *Read) String() string { return proto.CompactTextString(m) } +func (*Read) ProtoMessage() {} +func (*Read) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +func (m *Read) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Read) GetReadGroupId() string { + if m != nil { + return m.ReadGroupId + } + return "" +} + +func (m *Read) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +func (m *Read) GetFragmentName() string { + if m != nil { + return m.FragmentName + } + return "" +} + +func (m *Read) GetProperPlacement() bool { + if m != nil { + return m.ProperPlacement + } + return false +} + +func (m *Read) GetDuplicateFragment() bool { + if m != nil { + return m.DuplicateFragment + } + return false +} + +func (m *Read) GetFragmentLength() int32 { + if m != nil { + return m.FragmentLength + } + return 0 +} + +func (m *Read) GetReadNumber() int32 { + if m != nil { + return m.ReadNumber + } + return 0 +} + +func (m *Read) GetNumberReads() int32 { + if m != nil { + return m.NumberReads + } + return 0 +} + +func (m *Read) GetFailedVendorQualityChecks() bool { + if m != nil { + return m.FailedVendorQualityChecks + } + return false +} + +func (m *Read) GetAlignment() *LinearAlignment { + if m != nil { + return m.Alignment + } + return nil +} + +func (m *Read) GetSecondaryAlignment() bool { + if m != nil { + return m.SecondaryAlignment + } + return false +} + +func (m *Read) GetSupplementaryAlignment() bool { + if m != nil { + return m.SupplementaryAlignment + } + return false +} + +func (m *Read) GetAlignedSequence() string { + if m != nil { + return m.AlignedSequence + } + return "" +} + +func (m *Read) GetAlignedQuality() []int32 { + if m != nil { + return m.AlignedQuality + } + return nil +} + +func (m *Read) GetNextMatePosition() *Position { + if m != nil { + return m.NextMatePosition + } + return nil +} + +func (m *Read) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +func init() { + proto.RegisterType((*LinearAlignment)(nil), "google.genomics.v1.LinearAlignment") + proto.RegisterType((*Read)(nil), "google.genomics.v1.Read") +} + +func init() { proto.RegisterFile("google/genomics/v1/readalignment.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcd, 0x4e, 0xdb, 0x4a, + 0x14, 0xc7, 0xe5, 0x84, 0x70, 0xc9, 0x09, 0x24, 0x61, 0xae, 0xc4, 0xf5, 0x8d, 0xb8, 0xb7, 0x21, + 0x48, 0x6d, 0x58, 0xd4, 0x2e, 0x20, 0xb5, 0x88, 0x2e, 0x2a, 0x40, 0x6d, 0x45, 0x45, 0x51, 0x6a, + 0x54, 0x16, 0xdd, 0x58, 0x83, 0x7d, 0x62, 0x46, 0xd8, 0x33, 0xc6, 0x1e, 0x47, 0xcd, 0x23, 0xf5, + 0xdd, 0xfa, 0x00, 0x5d, 0x56, 0x33, 0xf6, 0x38, 0xd0, 0x66, 0xd1, 0x5d, 0xf2, 0x3f, 0xbf, 0xf3, + 0xe1, 0xf3, 0x31, 0xf0, 0x34, 0x12, 0x22, 0x8a, 0xd1, 0x8d, 0x90, 0x8b, 0x84, 0x05, 0xb9, 0x3b, + 0xdb, 0x77, 0x33, 0xa4, 0x21, 0x8d, 0x59, 0xc4, 0x13, 0xe4, 0xd2, 0x49, 0x33, 0x21, 0x05, 0x21, + 0x25, 0xe7, 0x18, 0xce, 0x99, 0xed, 0x0f, 0xb6, 0x2b, 0x5f, 0x9a, 0x32, 0x97, 0x72, 0x2e, 0x24, + 0x95, 0x4c, 0xf0, 0xbc, 0xf4, 0x18, 0xfc, 0xbf, 0x24, 0x72, 0xc0, 0x22, 0x9a, 0x55, 0xf6, 0x9d, + 0x25, 0xf6, 0x54, 0xe4, 0x4c, 0xc5, 0xa8, 0x10, 0x93, 0x40, 0xff, 0xbb, 0x29, 0xa6, 0x6e, 0x2e, + 0xb3, 0x22, 0xa8, 0x4a, 0x1a, 0x7d, 0xb3, 0xa0, 0x77, 0xc1, 0x38, 0xd2, 0xec, 0xc4, 0x14, 0x4b, + 0x8e, 0x60, 0xcd, 0xc4, 0xb0, 0xad, 0xa1, 0x35, 0xee, 0x1c, 0x6c, 0x3b, 0xbf, 0x57, 0xee, 0x4c, + 0x2a, 0xc6, 0xab, 0x69, 0xf2, 0x0c, 0x7a, 0x09, 0x4d, 0x53, 0xc6, 0x23, 0xff, 0xbe, 0xa0, 0x31, + 0x93, 0x73, 0xbb, 0x31, 0xb4, 0xc6, 0x2d, 0xaf, 0x5b, 0xc9, 0x9f, 0x4a, 0x95, 0x1c, 0x42, 0x4b, + 0x7f, 0x86, 0xdd, 0x1c, 0x36, 0xc7, 0x9d, 0x83, 0xff, 0x96, 0xc5, 0x3f, 0x53, 0xc0, 0x67, 0xce, + 0xa4, 0x57, 0xb2, 0xa3, 0xef, 0xab, 0xb0, 0xe2, 0x21, 0x0d, 0x49, 0x17, 0x1a, 0x2c, 0xd4, 0xa5, + 0xb5, 0xbd, 0x06, 0x0b, 0xc9, 0x08, 0x36, 0x54, 0xbb, 0xfd, 0x28, 0x13, 0x45, 0xea, 0xb3, 0x50, + 0x27, 0x6d, 0x7b, 0x1d, 0x25, 0xbe, 0x57, 0xda, 0x79, 0x48, 0xf6, 0x60, 0xf3, 0x01, 0x93, 0xa3, + 0x54, 0x5c, 0x53, 0x73, 0xdd, 0x9a, 0xbb, 0x42, 0x79, 0x1e, 0x92, 0x5d, 0xd8, 0x98, 0x66, 0x34, + 0x52, 0xbd, 0xf0, 0x39, 0x4d, 0xd0, 0x5e, 0xd1, 0xd8, 0xba, 0x11, 0x2f, 0x69, 0x82, 0x64, 0x0f, + 0xfa, 0x69, 0x26, 0x52, 0xcc, 0xfc, 0x34, 0xa6, 0x01, 0x2a, 0xdd, 0x6e, 0x0d, 0xad, 0xf1, 0x9a, + 0xd7, 0x2b, 0xf5, 0x89, 0x91, 0xc9, 0x73, 0x20, 0x61, 0x91, 0xc6, 0x2c, 0xa0, 0x12, 0x7d, 0x13, + 0xc4, 0x5e, 0xd5, 0xf0, 0x66, 0x6d, 0x79, 0x57, 0x19, 0x54, 0x13, 0xeb, 0xf4, 0x31, 0xf2, 0x48, + 0xde, 0xda, 0x7f, 0x95, 0x4d, 0x34, 0xf2, 0x85, 0x56, 0xc9, 0x13, 0xd0, 0x5f, 0xe8, 0xf3, 0x22, + 0xb9, 0xc1, 0xcc, 0x5e, 0xd3, 0x10, 0x28, 0xe9, 0x52, 0x2b, 0x64, 0x07, 0xd6, 0x4b, 0x9b, 0xaf, + 0xc4, 0xdc, 0x6e, 0x6b, 0xa2, 0x53, 0x6a, 0xaa, 0x93, 0x39, 0x79, 0x03, 0xdb, 0x53, 0xca, 0x62, + 0x0c, 0xfd, 0x19, 0xf2, 0x50, 0x64, 0x66, 0x6e, 0x7e, 0x70, 0x8b, 0xc1, 0x5d, 0x6e, 0x83, 0xae, + 0xf2, 0xdf, 0x92, 0xb9, 0xd6, 0x48, 0x35, 0xc3, 0x33, 0x0d, 0x90, 0x13, 0x68, 0xd7, 0x6b, 0x6e, + 0x77, 0xf4, 0xb6, 0xec, 0x2e, 0x9b, 0xe6, 0x2f, 0x4b, 0xe6, 0x2d, 0xbc, 0x88, 0x0b, 0x7f, 0xe7, + 0x18, 0x08, 0x1e, 0xd2, 0x6c, 0xee, 0x2f, 0x82, 0xad, 0xeb, 0xd4, 0xa4, 0x36, 0x2d, 0x16, 0xf4, + 0x15, 0xfc, 0x93, 0x17, 0x69, 0x1a, 0xeb, 0xf6, 0x3e, 0x76, 0xda, 0xd0, 0x4e, 0x5b, 0x8f, 0xcc, + 0x0b, 0xc7, 0x3d, 0xe8, 0x6b, 0x14, 0x43, 0x3f, 0xc7, 0xfb, 0x02, 0x79, 0x80, 0x76, 0x57, 0x0f, + 0xb7, 0x57, 0xe9, 0x57, 0x95, 0xac, 0xa6, 0x60, 0x50, 0xb3, 0xca, 0xbd, 0x61, 0x53, 0x4d, 0xa1, + 0x92, 0xcd, 0x2a, 0x7f, 0x00, 0xc2, 0xf1, 0xab, 0xf4, 0x13, 0x35, 0xdd, 0xfa, 0x6e, 0xfa, 0x7f, + 0x70, 0x37, 0x7d, 0xe5, 0xf7, 0x91, 0x4a, 0x34, 0x0a, 0x79, 0x09, 0x2b, 0x8c, 0x4f, 0x85, 0xbd, + 0xa9, 0xaf, 0x62, 0xb4, 0xcc, 0x5b, 0x8d, 0xcd, 0x39, 0xe7, 0x53, 0xf1, 0x96, 0xcb, 0x6c, 0xee, + 0x69, 0x7e, 0x70, 0x05, 0xed, 0x5a, 0x22, 0x7d, 0x68, 0xde, 0xe1, 0xbc, 0x3a, 0x0f, 0xf5, 0x93, + 0xbc, 0x80, 0xd6, 0x8c, 0xc6, 0x05, 0xea, 0xbb, 0xe8, 0x1c, 0x0c, 0x4c, 0x5c, 0xf3, 0x24, 0x38, + 0x17, 0x2c, 0x97, 0xd7, 0x8a, 0xf0, 0x4a, 0xf0, 0xb8, 0x71, 0x64, 0x9d, 0x26, 0xb0, 0x15, 0x88, + 0x64, 0x49, 0x0d, 0xa7, 0x44, 0x15, 0x51, 0x77, 0x75, 0xa2, 0xa2, 0x4c, 0xac, 0x2f, 0xc7, 0x86, + 0x14, 0x31, 0xe5, 0x91, 0x23, 0xb2, 0x48, 0x3d, 0x4b, 0x3a, 0x87, 0x5b, 0x9a, 0x68, 0xca, 0xf2, + 0x87, 0x4f, 0xd5, 0x6b, 0xf3, 0xfb, 0x87, 0x65, 0xdd, 0xac, 0x6a, 0xf2, 0xf0, 0x67, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xd0, 0xe1, 0xf6, 0x57, 0x4d, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroup.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroup.pb.go new file mode 100644 index 000000000..42f4e143a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroup.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/readgroup.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A read group is all the data that's processed the same way by the sequencer. +type ReadGroup struct { + // The server-generated read group ID, unique for all read groups. + // Note: This is different than the @RG ID field in the SAM spec. For that + // value, see [name][google.genomics.v1.ReadGroup.name]. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The dataset to which this read group belongs. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The read group name. This corresponds to the @RG ID field in the SAM spec. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // A free-form text description of this read group. + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // A client-supplied sample identifier for the reads in this read group. + SampleId string `protobuf:"bytes,5,opt,name=sample_id,json=sampleId" json:"sample_id,omitempty"` + // The experiment used to generate this read group. + Experiment *ReadGroup_Experiment `protobuf:"bytes,6,opt,name=experiment" json:"experiment,omitempty"` + // The predicted insert size of this read group. The insert size is the length + // the sequenced DNA fragment from end-to-end, not including the adapters. + PredictedInsertSize int32 `protobuf:"varint,7,opt,name=predicted_insert_size,json=predictedInsertSize" json:"predicted_insert_size,omitempty"` + // The programs used to generate this read group. Programs are always + // identical for all read groups within a read group set. For this reason, + // only the first read group in a returned set will have this field + // populated. + Programs []*ReadGroup_Program `protobuf:"bytes,10,rep,name=programs" json:"programs,omitempty"` + // The reference set the reads in this read group are aligned to. + ReferenceSetId string `protobuf:"bytes,11,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // A map of additional read group information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,12,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ReadGroup) Reset() { *m = ReadGroup{} } +func (m *ReadGroup) String() string { return proto.CompactTextString(m) } +func (*ReadGroup) ProtoMessage() {} +func (*ReadGroup) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } + +func (m *ReadGroup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ReadGroup) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *ReadGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ReadGroup) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ReadGroup) GetSampleId() string { + if m != nil { + return m.SampleId + } + return "" +} + +func (m *ReadGroup) GetExperiment() *ReadGroup_Experiment { + if m != nil { + return m.Experiment + } + return nil +} + +func (m *ReadGroup) GetPredictedInsertSize() int32 { + if m != nil { + return m.PredictedInsertSize + } + return 0 +} + +func (m *ReadGroup) GetPrograms() []*ReadGroup_Program { + if m != nil { + return m.Programs + } + return nil +} + +func (m *ReadGroup) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *ReadGroup) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +type ReadGroup_Experiment struct { + // A client-supplied library identifier; a library is a collection of DNA + // fragments which have been prepared for sequencing from a sample. This + // field is important for quality control as error or bias can be introduced + // during sample preparation. + LibraryId string `protobuf:"bytes,1,opt,name=library_id,json=libraryId" json:"library_id,omitempty"` + // The platform unit used as part of this experiment, for example + // flowcell-barcode.lane for Illumina or slide for SOLiD. Corresponds to the + // @RG PU field in the SAM spec. + PlatformUnit string `protobuf:"bytes,2,opt,name=platform_unit,json=platformUnit" json:"platform_unit,omitempty"` + // The sequencing center used as part of this experiment. + SequencingCenter string `protobuf:"bytes,3,opt,name=sequencing_center,json=sequencingCenter" json:"sequencing_center,omitempty"` + // The instrument model used as part of this experiment. This maps to + // sequencing technology in the SAM spec. + InstrumentModel string `protobuf:"bytes,4,opt,name=instrument_model,json=instrumentModel" json:"instrument_model,omitempty"` +} + +func (m *ReadGroup_Experiment) Reset() { *m = ReadGroup_Experiment{} } +func (m *ReadGroup_Experiment) String() string { return proto.CompactTextString(m) } +func (*ReadGroup_Experiment) ProtoMessage() {} +func (*ReadGroup_Experiment) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0, 0} } + +func (m *ReadGroup_Experiment) GetLibraryId() string { + if m != nil { + return m.LibraryId + } + return "" +} + +func (m *ReadGroup_Experiment) GetPlatformUnit() string { + if m != nil { + return m.PlatformUnit + } + return "" +} + +func (m *ReadGroup_Experiment) GetSequencingCenter() string { + if m != nil { + return m.SequencingCenter + } + return "" +} + +func (m *ReadGroup_Experiment) GetInstrumentModel() string { + if m != nil { + return m.InstrumentModel + } + return "" +} + +type ReadGroup_Program struct { + // The command line used to run this program. + CommandLine string `protobuf:"bytes,1,opt,name=command_line,json=commandLine" json:"command_line,omitempty"` + // The user specified locally unique ID of the program. Used along with + // `prevProgramId` to define an ordering between programs. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // The display name of the program. This is typically the colloquial name of + // the tool used, for example 'bwa' or 'picard'. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The ID of the program run before this one. + PrevProgramId string `protobuf:"bytes,4,opt,name=prev_program_id,json=prevProgramId" json:"prev_program_id,omitempty"` + // The version of the program run. + Version string `protobuf:"bytes,5,opt,name=version" json:"version,omitempty"` +} + +func (m *ReadGroup_Program) Reset() { *m = ReadGroup_Program{} } +func (m *ReadGroup_Program) String() string { return proto.CompactTextString(m) } +func (*ReadGroup_Program) ProtoMessage() {} +func (*ReadGroup_Program) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0, 1} } + +func (m *ReadGroup_Program) GetCommandLine() string { + if m != nil { + return m.CommandLine + } + return "" +} + +func (m *ReadGroup_Program) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ReadGroup_Program) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ReadGroup_Program) GetPrevProgramId() string { + if m != nil { + return m.PrevProgramId + } + return "" +} + +func (m *ReadGroup_Program) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func init() { + proto.RegisterType((*ReadGroup)(nil), "google.genomics.v1.ReadGroup") + proto.RegisterType((*ReadGroup_Experiment)(nil), "google.genomics.v1.ReadGroup.Experiment") + proto.RegisterType((*ReadGroup_Program)(nil), "google.genomics.v1.ReadGroup.Program") +} + +func init() { proto.RegisterFile("google/genomics/v1/readgroup.proto", fileDescriptor7) } + +var fileDescriptor7 = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xcb, 0x6e, 0xd4, 0x30, + 0x14, 0x55, 0xa6, 0xcf, 0xb9, 0xd3, 0xc7, 0x60, 0x04, 0x8a, 0x06, 0x90, 0x86, 0x22, 0x60, 0x10, + 0x52, 0x42, 0x87, 0x0d, 0x6a, 0x57, 0x14, 0x55, 0x10, 0xa9, 0x48, 0x55, 0x2a, 0x58, 0xb0, 0x89, + 0xdc, 0xf8, 0x4e, 0x64, 0x91, 0xd8, 0xc1, 0x76, 0x46, 0xb4, 0x9f, 0xc1, 0x57, 0xf0, 0x2d, 0x7c, + 0x11, 0x4b, 0x64, 0xc7, 0x49, 0x47, 0xa2, 0xea, 0xce, 0x39, 0xe7, 0x5c, 0xdf, 0xc7, 0xb9, 0x0e, + 0x1c, 0x14, 0x52, 0x16, 0x25, 0xc6, 0x05, 0x0a, 0x59, 0xf1, 0x5c, 0xc7, 0xcb, 0xc3, 0x58, 0x21, + 0x65, 0x85, 0x92, 0x4d, 0x1d, 0xd5, 0x4a, 0x1a, 0x49, 0x48, 0xab, 0x89, 0x3a, 0x4d, 0xb4, 0x3c, + 0x9c, 0x3c, 0xf6, 0x71, 0xb4, 0xe6, 0x31, 0x15, 0x42, 0x1a, 0x6a, 0xb8, 0x14, 0xba, 0x8d, 0xe8, + 0x59, 0xf7, 0x75, 0xd9, 0x2c, 0x62, 0x6d, 0x54, 0x93, 0x9b, 0x96, 0x3d, 0xf8, 0xb3, 0x09, 0xc3, + 0x14, 0x29, 0xfb, 0x68, 0x73, 0x90, 0x3d, 0x18, 0x70, 0x16, 0x06, 0xd3, 0x60, 0x36, 0x4c, 0x07, + 0x9c, 0x91, 0x27, 0x00, 0x8c, 0x1a, 0xaa, 0xd1, 0x64, 0x9c, 0x85, 0x03, 0x87, 0x0f, 0x3d, 0x92, + 0x30, 0x42, 0x60, 0x5d, 0xd0, 0x0a, 0xc3, 0x35, 0x47, 0xb8, 0x33, 0x99, 0xc2, 0x88, 0xa1, 0xce, + 0x15, 0xaf, 0x6d, 0x11, 0xe1, 0xba, 0xa3, 0x56, 0x21, 0xf2, 0x08, 0x86, 0x9a, 0x56, 0x75, 0x89, + 0xf6, 0xce, 0x0d, 0xc7, 0x6f, 0xb7, 0x40, 0xc2, 0xc8, 0x27, 0x00, 0xfc, 0x59, 0xa3, 0xe2, 0x15, + 0x0a, 0x13, 0x6e, 0x4e, 0x83, 0xd9, 0x68, 0x3e, 0x8b, 0xfe, 0x6f, 0x3a, 0xea, 0x8b, 0x8e, 0x4e, + 0x7b, 0x7d, 0xba, 0x12, 0x4b, 0xe6, 0xf0, 0xa0, 0x56, 0xc8, 0x78, 0x6e, 0x90, 0x65, 0x5c, 0x68, + 0x54, 0x26, 0xd3, 0xfc, 0x1a, 0xc3, 0xad, 0x69, 0x30, 0xdb, 0x48, 0xef, 0xf7, 0x64, 0xe2, 0xb8, + 0x0b, 0x7e, 0x8d, 0xe4, 0x3d, 0x6c, 0xd7, 0x4a, 0x16, 0x8a, 0x56, 0x3a, 0x84, 0xe9, 0xda, 0x6c, + 0x34, 0x7f, 0x7e, 0x77, 0xee, 0xf3, 0x56, 0x9d, 0xf6, 0x61, 0x64, 0x06, 0x63, 0x85, 0x0b, 0x54, + 0x28, 0x72, 0xcc, 0xfc, 0xe0, 0x46, 0xae, 0xc9, 0xbd, 0x1e, 0xbf, 0x70, 0xd3, 0x3b, 0x86, 0x75, + 0x2e, 0x16, 0x32, 0xdc, 0x71, 0x89, 0x5e, 0xde, 0x9d, 0x28, 0x11, 0x0b, 0x79, 0x2a, 0x8c, 0xba, + 0x4a, 0x5d, 0xd0, 0xe4, 0x77, 0x00, 0x70, 0xd3, 0xb8, 0x35, 0xaa, 0xe4, 0x97, 0x8a, 0xaa, 0xab, + 0xac, 0x37, 0x70, 0xe8, 0x91, 0x84, 0x91, 0x67, 0xb0, 0x5b, 0x97, 0xd4, 0x2c, 0xa4, 0xaa, 0xb2, + 0x46, 0x70, 0xe3, 0xad, 0xdc, 0xe9, 0xc0, 0x2f, 0x82, 0x1b, 0xf2, 0x1a, 0xee, 0x69, 0xfc, 0xd1, + 0xa0, 0xc8, 0xb9, 0x28, 0xb2, 0x1c, 0x85, 0x41, 0xe5, 0xad, 0x1d, 0xdf, 0x10, 0x1f, 0x1c, 0x4e, + 0x5e, 0xc1, 0x98, 0x0b, 0xbb, 0x49, 0x36, 0x7d, 0x56, 0x49, 0x86, 0xa5, 0xf7, 0x7a, 0xff, 0x06, + 0xff, 0x6c, 0xe1, 0xc9, 0xaf, 0x00, 0xb6, 0xfc, 0x9c, 0xc8, 0x53, 0xd8, 0xc9, 0x65, 0x55, 0x51, + 0xc1, 0xb2, 0x92, 0x0b, 0xf4, 0x95, 0x8e, 0x3c, 0x76, 0xc6, 0x05, 0xfa, 0x1d, 0x1c, 0xf4, 0x3b, + 0x78, 0xdb, 0x92, 0xbd, 0x80, 0xfd, 0x5a, 0xe1, 0x32, 0xf3, 0x53, 0xb7, 0x3d, 0xb7, 0xc9, 0x77, + 0x2d, 0xec, 0x93, 0x25, 0x8c, 0x84, 0xb0, 0xb5, 0x44, 0xa5, 0xed, 0x22, 0xb6, 0x8b, 0xd6, 0x7d, + 0x4e, 0x2e, 0x60, 0xd8, 0x8f, 0x94, 0x8c, 0x61, 0xed, 0x3b, 0x5e, 0xf9, 0x62, 0xec, 0x91, 0xbc, + 0x81, 0x8d, 0x25, 0x2d, 0x1b, 0x74, 0x75, 0x8c, 0xe6, 0x93, 0xce, 0x9c, 0xee, 0x11, 0x45, 0x67, + 0x5c, 0x9b, 0xaf, 0x56, 0x91, 0xb6, 0xc2, 0xa3, 0xc1, 0xbb, 0xe0, 0x84, 0xc3, 0xc3, 0x5c, 0x56, + 0xb7, 0x18, 0x79, 0xb2, 0xd7, 0x3b, 0x79, 0x6e, 0x6f, 0x38, 0x0f, 0xbe, 0x1d, 0x75, 0x2a, 0x59, + 0x52, 0x51, 0x44, 0x52, 0x15, 0xf6, 0xdd, 0xbb, 0xfb, 0xe3, 0x96, 0xa2, 0x35, 0xd7, 0xab, 0xff, + 0x82, 0xe3, 0xee, 0xfc, 0x37, 0x08, 0x2e, 0x37, 0x9d, 0xf2, 0xed, 0xbf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x37, 0xed, 0xaa, 0xaa, 0x34, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroupset.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroupset.pb.go new file mode 100644 index 000000000..9d6234649 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/readgroupset.pb.go @@ -0,0 +1,132 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/readgroupset.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A read group set is a logical collection of read groups, which are +// collections of reads produced by a sequencer. A read group set typically +// models reads corresponding to one sample, sequenced one way, and aligned one +// way. +// +// * A read group set belongs to one dataset. +// * A read group belongs to one read group set. +// * A read belongs to one read group. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type ReadGroupSet struct { + // The server-generated read group set ID, unique for all read group sets. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The dataset to which this read group set belongs. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The reference set to which the reads in this read group set are aligned. + ReferenceSetId string `protobuf:"bytes,3,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // The read group set name. By default this will be initialized to the sample + // name of the sequenced data contained in this set. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The filename of the original source file for this read group set, if any. + Filename string `protobuf:"bytes,5,opt,name=filename" json:"filename,omitempty"` + // The read groups in this set. There are typically 1-10 read groups in a read + // group set. + ReadGroups []*ReadGroup `protobuf:"bytes,6,rep,name=read_groups,json=readGroups" json:"read_groups,omitempty"` + // A map of additional read group set information. + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,7,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ReadGroupSet) Reset() { *m = ReadGroupSet{} } +func (m *ReadGroupSet) String() string { return proto.CompactTextString(m) } +func (*ReadGroupSet) ProtoMessage() {} +func (*ReadGroupSet) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +func (m *ReadGroupSet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ReadGroupSet) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *ReadGroupSet) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *ReadGroupSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ReadGroupSet) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *ReadGroupSet) GetReadGroups() []*ReadGroup { + if m != nil { + return m.ReadGroups + } + return nil +} + +func (m *ReadGroupSet) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +func init() { + proto.RegisterType((*ReadGroupSet)(nil), "google.genomics.v1.ReadGroupSet") +} + +func init() { proto.RegisterFile("google/genomics/v1/readgroupset.proto", fileDescriptor8) } + +var fileDescriptor8 = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x4f, 0x8b, 0xdb, 0x30, + 0x10, 0xc5, 0xb1, 0xf3, 0xa7, 0xcd, 0xa4, 0x84, 0x54, 0x87, 0x62, 0x4c, 0x03, 0x21, 0x50, 0x08, + 0x3d, 0xc8, 0x4d, 0x7a, 0x29, 0x29, 0xe4, 0x10, 0x28, 0x25, 0xb0, 0x87, 0x60, 0xc3, 0x1e, 0xf6, + 0x12, 0x14, 0x7b, 0x6c, 0xc4, 0x3a, 0x92, 0x91, 0xe4, 0x40, 0xbe, 0xf3, 0x7e, 0x80, 0x3d, 0x2e, + 0x96, 0xff, 0x10, 0xd8, 0x25, 0xb7, 0xd1, 0xd3, 0xef, 0x8d, 0x46, 0x6f, 0xe0, 0x47, 0x26, 0x65, + 0x96, 0x63, 0x90, 0xa1, 0x90, 0x67, 0x1e, 0xeb, 0xe0, 0xb2, 0x0a, 0x14, 0xb2, 0x24, 0x53, 0xb2, + 0x2c, 0x34, 0x1a, 0x5a, 0x28, 0x69, 0x24, 0x21, 0x35, 0x46, 0x5b, 0x8c, 0x5e, 0x56, 0xfe, 0xf7, + 0xc6, 0xca, 0x0a, 0x1e, 0x30, 0x21, 0xa4, 0x61, 0x86, 0x4b, 0xa1, 0x6b, 0x87, 0xbf, 0xb8, 0xd7, + 0xb8, 0x61, 0xda, 0x0e, 0xf6, 0x74, 0x2a, 0xd3, 0x40, 0x1b, 0x55, 0xc6, 0xcd, 0x9b, 0x8b, 0x17, + 0x17, 0xbe, 0x84, 0xc8, 0x92, 0xff, 0x95, 0x23, 0x42, 0x43, 0x26, 0xe0, 0xf2, 0xc4, 0x73, 0xe6, + 0xce, 0x72, 0x14, 0xba, 0x3c, 0x21, 0x33, 0x80, 0x84, 0x19, 0xa6, 0xd1, 0x1c, 0x79, 0xe2, 0xb9, + 0x56, 0x1f, 0x35, 0xca, 0x3e, 0x21, 0x4b, 0x98, 0x2a, 0x4c, 0x51, 0xa1, 0x88, 0xf1, 0xd8, 0x40, + 0x3d, 0x0b, 0x4d, 0x3a, 0x3d, 0xb2, 0x24, 0x81, 0xbe, 0x60, 0x67, 0xf4, 0xfa, 0xf6, 0xd6, 0xd6, + 0xc4, 0x87, 0xcf, 0x29, 0xcf, 0xd1, 0xea, 0x03, 0xab, 0x77, 0x67, 0xb2, 0x85, 0x71, 0xf5, 0x95, + 0x63, 0x1d, 0x92, 0x37, 0x9c, 0xf7, 0x96, 0xe3, 0xf5, 0x8c, 0xbe, 0xcf, 0x88, 0x76, 0xf3, 0x87, + 0xa0, 0xda, 0x52, 0x93, 0x2d, 0xf4, 0xb9, 0x48, 0xa5, 0xf7, 0xc9, 0x1a, 0x7f, 0xde, 0x35, 0x46, + 0x68, 0xe8, 0x5e, 0xa4, 0xf2, 0x9f, 0x30, 0xea, 0x1a, 0x5a, 0x9f, 0x1f, 0xc1, 0xa8, 0x93, 0xc8, + 0x14, 0x7a, 0xcf, 0x78, 0x6d, 0x62, 0xa9, 0x4a, 0xf2, 0x0b, 0x06, 0x17, 0x96, 0x97, 0x68, 0x23, + 0x19, 0xaf, 0xfd, 0xb6, 0x7f, 0x1b, 0x33, 0x7d, 0xe0, 0xda, 0x3c, 0x56, 0x44, 0x58, 0x83, 0x1b, + 0xf7, 0x8f, 0xb3, 0xcb, 0xe1, 0x5b, 0x2c, 0xcf, 0x1f, 0xcc, 0xb2, 0xfb, 0x7a, 0x3b, 0xcc, 0xa1, + 0x6a, 0x72, 0x70, 0x9e, 0x36, 0x2d, 0x28, 0x73, 0x26, 0x32, 0x2a, 0x55, 0x56, 0xad, 0xda, 0x3e, + 0x11, 0xd4, 0x57, 0xac, 0xe0, 0xfa, 0x76, 0xfd, 0x7f, 0xdb, 0xfa, 0xd5, 0x71, 0x4e, 0x43, 0x4b, + 0xfe, 0x7e, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xa9, 0x2f, 0xa5, 0x80, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/reads.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/reads.pb.go new file mode 100644 index 000000000..4023fd2d2 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/reads.pb.go @@ -0,0 +1,1414 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/reads.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ImportReadGroupSetsRequest_PartitionStrategy int32 + +const ( + ImportReadGroupSetsRequest_PARTITION_STRATEGY_UNSPECIFIED ImportReadGroupSetsRequest_PartitionStrategy = 0 + // In most cases, this strategy yields one read group set per file. This is + // the default behavior. + // + // Allocate one read group set per file per sample. For BAM files, read + // groups are considered to share a sample if they have identical sample + // names. Furthermore, all reads for each file which do not belong to a read + // group, if any, will be grouped into a single read group set per-file. + ImportReadGroupSetsRequest_PER_FILE_PER_SAMPLE ImportReadGroupSetsRequest_PartitionStrategy = 1 + // Includes all read groups in all imported files into a single read group + // set. Requires that the headers for all imported files are equivalent. All + // reads which do not belong to a read group, if any, will be grouped into a + // separate read group set. + ImportReadGroupSetsRequest_MERGE_ALL ImportReadGroupSetsRequest_PartitionStrategy = 2 +) + +var ImportReadGroupSetsRequest_PartitionStrategy_name = map[int32]string{ + 0: "PARTITION_STRATEGY_UNSPECIFIED", + 1: "PER_FILE_PER_SAMPLE", + 2: "MERGE_ALL", +} +var ImportReadGroupSetsRequest_PartitionStrategy_value = map[string]int32{ + "PARTITION_STRATEGY_UNSPECIFIED": 0, + "PER_FILE_PER_SAMPLE": 1, + "MERGE_ALL": 2, +} + +func (x ImportReadGroupSetsRequest_PartitionStrategy) String() string { + return proto.EnumName(ImportReadGroupSetsRequest_PartitionStrategy_name, int32(x)) +} +func (ImportReadGroupSetsRequest_PartitionStrategy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor9, []int{2, 0} +} + +// The read group set search request. +type SearchReadGroupSetsRequest struct { + // Restricts this query to read group sets within the given datasets. At least + // one ID must be provided. + DatasetIds []string `protobuf:"bytes,1,rep,name=dataset_ids,json=datasetIds" json:"dataset_ids,omitempty"` + // Only return read group sets for which a substring of the name matches this + // string. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 256. The maximum value is 1024. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchReadGroupSetsRequest) Reset() { *m = SearchReadGroupSetsRequest{} } +func (m *SearchReadGroupSetsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchReadGroupSetsRequest) ProtoMessage() {} +func (*SearchReadGroupSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } + +func (m *SearchReadGroupSetsRequest) GetDatasetIds() []string { + if m != nil { + return m.DatasetIds + } + return nil +} + +func (m *SearchReadGroupSetsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SearchReadGroupSetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchReadGroupSetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// The read group set search response. +type SearchReadGroupSetsResponse struct { + // The list of matching read group sets. + ReadGroupSets []*ReadGroupSet `protobuf:"bytes,1,rep,name=read_group_sets,json=readGroupSets" json:"read_group_sets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchReadGroupSetsResponse) Reset() { *m = SearchReadGroupSetsResponse{} } +func (m *SearchReadGroupSetsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchReadGroupSetsResponse) ProtoMessage() {} +func (*SearchReadGroupSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } + +func (m *SearchReadGroupSetsResponse) GetReadGroupSets() []*ReadGroupSet { + if m != nil { + return m.ReadGroupSets + } + return nil +} + +func (m *SearchReadGroupSetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The read group set import request. +type ImportReadGroupSetsRequest struct { + // Required. The ID of the dataset these read group sets will belong to. The + // caller must have WRITE permissions to this dataset. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The reference set to which the imported read group sets are aligned to, if + // any. The reference names of this reference set must be a superset of those + // found in the imported file headers. If no reference set id is provided, a + // best effort is made to associate with a matching reference set. + ReferenceSetId string `protobuf:"bytes,4,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // A list of URIs pointing at [BAM + // files](https://samtools.github.io/hts-specs/SAMv1.pdf) + // in Google Cloud Storage. + // Those URIs can include wildcards (*), but do not add or remove + // matching files before import has completed. + // + // Note that Google Cloud Storage object listing is only eventually + // consistent: files added may be not be immediately visible to + // everyone. Thus, if using a wildcard it is preferable not to start + // the import immediately after the files are created. + SourceUris []string `protobuf:"bytes,2,rep,name=source_uris,json=sourceUris" json:"source_uris,omitempty"` + // The partition strategy describes how read groups are partitioned into read + // group sets. + PartitionStrategy ImportReadGroupSetsRequest_PartitionStrategy `protobuf:"varint,5,opt,name=partition_strategy,json=partitionStrategy,enum=google.genomics.v1.ImportReadGroupSetsRequest_PartitionStrategy" json:"partition_strategy,omitempty"` +} + +func (m *ImportReadGroupSetsRequest) Reset() { *m = ImportReadGroupSetsRequest{} } +func (m *ImportReadGroupSetsRequest) String() string { return proto.CompactTextString(m) } +func (*ImportReadGroupSetsRequest) ProtoMessage() {} +func (*ImportReadGroupSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} } + +func (m *ImportReadGroupSetsRequest) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *ImportReadGroupSetsRequest) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *ImportReadGroupSetsRequest) GetSourceUris() []string { + if m != nil { + return m.SourceUris + } + return nil +} + +func (m *ImportReadGroupSetsRequest) GetPartitionStrategy() ImportReadGroupSetsRequest_PartitionStrategy { + if m != nil { + return m.PartitionStrategy + } + return ImportReadGroupSetsRequest_PARTITION_STRATEGY_UNSPECIFIED +} + +// The read group set import response. +type ImportReadGroupSetsResponse struct { + // IDs of the read group sets that were created. + ReadGroupSetIds []string `protobuf:"bytes,1,rep,name=read_group_set_ids,json=readGroupSetIds" json:"read_group_set_ids,omitempty"` +} + +func (m *ImportReadGroupSetsResponse) Reset() { *m = ImportReadGroupSetsResponse{} } +func (m *ImportReadGroupSetsResponse) String() string { return proto.CompactTextString(m) } +func (*ImportReadGroupSetsResponse) ProtoMessage() {} +func (*ImportReadGroupSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{3} } + +func (m *ImportReadGroupSetsResponse) GetReadGroupSetIds() []string { + if m != nil { + return m.ReadGroupSetIds + } + return nil +} + +// The read group set export request. +type ExportReadGroupSetRequest struct { + // Required. The Google Cloud project ID that owns this + // export. The caller must have WRITE access to this project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. A Google Cloud Storage URI for the exported BAM file. + // The currently authenticated user must have write access to the new file. + // An error will be returned if the URI already contains data. + ExportUri string `protobuf:"bytes,2,opt,name=export_uri,json=exportUri" json:"export_uri,omitempty"` + // Required. The ID of the read group set to export. The caller must have + // READ access to this read group set. + ReadGroupSetId string `protobuf:"bytes,3,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` + // The reference names to export. If this is not specified, all reference + // sequences, including unmapped reads, are exported. + // Use `*` to export only unmapped reads. + ReferenceNames []string `protobuf:"bytes,4,rep,name=reference_names,json=referenceNames" json:"reference_names,omitempty"` +} + +func (m *ExportReadGroupSetRequest) Reset() { *m = ExportReadGroupSetRequest{} } +func (m *ExportReadGroupSetRequest) String() string { return proto.CompactTextString(m) } +func (*ExportReadGroupSetRequest) ProtoMessage() {} +func (*ExportReadGroupSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } + +func (m *ExportReadGroupSetRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ExportReadGroupSetRequest) GetExportUri() string { + if m != nil { + return m.ExportUri + } + return "" +} + +func (m *ExportReadGroupSetRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +func (m *ExportReadGroupSetRequest) GetReferenceNames() []string { + if m != nil { + return m.ReferenceNames + } + return nil +} + +type UpdateReadGroupSetRequest struct { + // The ID of the read group set to be updated. The caller must have WRITE + // permissions to the dataset associated with this read group set. + ReadGroupSetId string `protobuf:"bytes,1,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` + // The new read group set data. See `updateMask` for details on mutability of + // fields. + ReadGroupSet *ReadGroupSet `protobuf:"bytes,2,opt,name=read_group_set,json=readGroupSet" json:"read_group_set,omitempty"` + // An optional mask specifying which fields to update. Supported fields: + // + // * [name][google.genomics.v1.ReadGroupSet.name]. + // * [referenceSetId][google.genomics.v1.ReadGroupSet.reference_set_id]. + // + // Leaving `updateMask` unset is equivalent to specifying all mutable + // fields. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateReadGroupSetRequest) Reset() { *m = UpdateReadGroupSetRequest{} } +func (m *UpdateReadGroupSetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateReadGroupSetRequest) ProtoMessage() {} +func (*UpdateReadGroupSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{5} } + +func (m *UpdateReadGroupSetRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +func (m *UpdateReadGroupSetRequest) GetReadGroupSet() *ReadGroupSet { + if m != nil { + return m.ReadGroupSet + } + return nil +} + +func (m *UpdateReadGroupSetRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteReadGroupSetRequest struct { + // The ID of the read group set to be deleted. The caller must have WRITE + // permissions to the dataset associated with this read group set. + ReadGroupSetId string `protobuf:"bytes,1,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` +} + +func (m *DeleteReadGroupSetRequest) Reset() { *m = DeleteReadGroupSetRequest{} } +func (m *DeleteReadGroupSetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteReadGroupSetRequest) ProtoMessage() {} +func (*DeleteReadGroupSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } + +func (m *DeleteReadGroupSetRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +type GetReadGroupSetRequest struct { + // The ID of the read group set. + ReadGroupSetId string `protobuf:"bytes,1,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` +} + +func (m *GetReadGroupSetRequest) Reset() { *m = GetReadGroupSetRequest{} } +func (m *GetReadGroupSetRequest) String() string { return proto.CompactTextString(m) } +func (*GetReadGroupSetRequest) ProtoMessage() {} +func (*GetReadGroupSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{7} } + +func (m *GetReadGroupSetRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +type ListCoverageBucketsRequest struct { + // Required. The ID of the read group set over which coverage is requested. + ReadGroupSetId string `protobuf:"bytes,1,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` + // The name of the reference to query, within the reference set associated + // with this query. Optional. + ReferenceName string `protobuf:"bytes,3,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The start position of the range on the reference, 0-based inclusive. If + // specified, `referenceName` must also be specified. Defaults to 0. + Start int64 `protobuf:"varint,4,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. If + // specified, `referenceName` must also be specified. If unset or 0, defaults + // to the length of the reference. + End int64 `protobuf:"varint,5,opt,name=end" json:"end,omitempty"` + // The desired width of each reported coverage bucket in base pairs. This + // will be rounded down to the nearest precomputed bucket width; the value + // of which is returned as `bucketWidth` in the response. Defaults + // to infinity (each bucket spans an entire reference sequence) or the length + // of the target range, if specified. The smallest precomputed + // `bucketWidth` is currently 2048 base pairs; this is subject to + // change. + TargetBucketWidth int64 `protobuf:"varint,6,opt,name=target_bucket_width,json=targetBucketWidth" json:"target_bucket_width,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 1024. The maximum value is 2048. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListCoverageBucketsRequest) Reset() { *m = ListCoverageBucketsRequest{} } +func (m *ListCoverageBucketsRequest) String() string { return proto.CompactTextString(m) } +func (*ListCoverageBucketsRequest) ProtoMessage() {} +func (*ListCoverageBucketsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{8} } + +func (m *ListCoverageBucketsRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +func (m *ListCoverageBucketsRequest) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *ListCoverageBucketsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *ListCoverageBucketsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *ListCoverageBucketsRequest) GetTargetBucketWidth() int64 { + if m != nil { + return m.TargetBucketWidth + } + return 0 +} + +func (m *ListCoverageBucketsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListCoverageBucketsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// A bucket over which read coverage has been precomputed. A bucket corresponds +// to a specific range of the reference sequence. +type CoverageBucket struct { + // The genomic coordinate range spanned by this bucket. + Range *Range `protobuf:"bytes,1,opt,name=range" json:"range,omitempty"` + // The average number of reads which are aligned to each individual + // reference base in this bucket. + MeanCoverage float32 `protobuf:"fixed32,2,opt,name=mean_coverage,json=meanCoverage" json:"mean_coverage,omitempty"` +} + +func (m *CoverageBucket) Reset() { *m = CoverageBucket{} } +func (m *CoverageBucket) String() string { return proto.CompactTextString(m) } +func (*CoverageBucket) ProtoMessage() {} +func (*CoverageBucket) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{9} } + +func (m *CoverageBucket) GetRange() *Range { + if m != nil { + return m.Range + } + return nil +} + +func (m *CoverageBucket) GetMeanCoverage() float32 { + if m != nil { + return m.MeanCoverage + } + return 0 +} + +type ListCoverageBucketsResponse struct { + // The length of each coverage bucket in base pairs. Note that buckets at the + // end of a reference sequence may be shorter. This value is omitted if the + // bucket width is infinity (the default behaviour, with no range or + // `targetBucketWidth`). + BucketWidth int64 `protobuf:"varint,1,opt,name=bucket_width,json=bucketWidth" json:"bucket_width,omitempty"` + // The coverage buckets. The list of buckets is sparse; a bucket with 0 + // overlapping reads is not returned. A bucket never crosses more than one + // reference sequence. Each bucket has width `bucketWidth`, unless + // its end is the end of the reference sequence. + CoverageBuckets []*CoverageBucket `protobuf:"bytes,2,rep,name=coverage_buckets,json=coverageBuckets" json:"coverage_buckets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListCoverageBucketsResponse) Reset() { *m = ListCoverageBucketsResponse{} } +func (m *ListCoverageBucketsResponse) String() string { return proto.CompactTextString(m) } +func (*ListCoverageBucketsResponse) ProtoMessage() {} +func (*ListCoverageBucketsResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{10} } + +func (m *ListCoverageBucketsResponse) GetBucketWidth() int64 { + if m != nil { + return m.BucketWidth + } + return 0 +} + +func (m *ListCoverageBucketsResponse) GetCoverageBuckets() []*CoverageBucket { + if m != nil { + return m.CoverageBuckets + } + return nil +} + +func (m *ListCoverageBucketsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The read search request. +type SearchReadsRequest struct { + // The IDs of the read groups sets within which to search for reads. All + // specified read group sets must be aligned against a common set of reference + // sequences; this defines the genomic coordinates for the query. Must specify + // one of `readGroupSetIds` or `readGroupIds`. + ReadGroupSetIds []string `protobuf:"bytes,1,rep,name=read_group_set_ids,json=readGroupSetIds" json:"read_group_set_ids,omitempty"` + // The IDs of the read groups within which to search for reads. All specified + // read groups must belong to the same read group sets. Must specify one of + // `readGroupSetIds` or `readGroupIds`. + ReadGroupIds []string `protobuf:"bytes,5,rep,name=read_group_ids,json=readGroupIds" json:"read_group_ids,omitempty"` + // The reference sequence name, for example `chr1`, `1`, or `chrX`. If set to + // `*`, only unmapped reads are returned. If unspecified, all reads (mapped + // and unmapped) are returned. + ReferenceName string `protobuf:"bytes,7,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The start position of the range on the reference, 0-based inclusive. If + // specified, `referenceName` must also be specified. + Start int64 `protobuf:"varint,8,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. If + // specified, `referenceName` must also be specified. + End int64 `protobuf:"varint,9,opt,name=end" json:"end,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 256. The maximum value is 2048. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchReadsRequest) Reset() { *m = SearchReadsRequest{} } +func (m *SearchReadsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchReadsRequest) ProtoMessage() {} +func (*SearchReadsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{11} } + +func (m *SearchReadsRequest) GetReadGroupSetIds() []string { + if m != nil { + return m.ReadGroupSetIds + } + return nil +} + +func (m *SearchReadsRequest) GetReadGroupIds() []string { + if m != nil { + return m.ReadGroupIds + } + return nil +} + +func (m *SearchReadsRequest) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *SearchReadsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *SearchReadsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *SearchReadsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchReadsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// The read search response. +type SearchReadsResponse struct { + // The list of matching alignments sorted by mapped genomic coordinate, + // if any, ascending in position within the same reference. Unmapped reads, + // which have no position, are returned contiguously and are sorted in + // ascending lexicographic order by fragment name. + Alignments []*Read `protobuf:"bytes,1,rep,name=alignments" json:"alignments,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchReadsResponse) Reset() { *m = SearchReadsResponse{} } +func (m *SearchReadsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchReadsResponse) ProtoMessage() {} +func (*SearchReadsResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{12} } + +func (m *SearchReadsResponse) GetAlignments() []*Read { + if m != nil { + return m.Alignments + } + return nil +} + +func (m *SearchReadsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The stream reads request. +type StreamReadsRequest struct { + // The Google Cloud project ID which will be billed + // for this access. The caller must have WRITE access to this project. + // Required. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The ID of the read group set from which to stream reads. + ReadGroupSetId string `protobuf:"bytes,2,opt,name=read_group_set_id,json=readGroupSetId" json:"read_group_set_id,omitempty"` + // The reference sequence name, for example `chr1`, + // `1`, or `chrX`. If set to *, only unmapped reads are + // returned. + ReferenceName string `protobuf:"bytes,3,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The start position of the range on the reference, 0-based inclusive. If + // specified, `referenceName` must also be specified. + Start int64 `protobuf:"varint,4,opt,name=start" json:"start,omitempty"` + // The end position of the range on the reference, 0-based exclusive. If + // specified, `referenceName` must also be specified. + End int64 `protobuf:"varint,5,opt,name=end" json:"end,omitempty"` + // Restricts results to a shard containing approximately `1/totalShards` + // of the normal response payload for this query. Results from a sharded + // request are disjoint from those returned by all queries which differ only + // in their shard parameter. A shard may yield 0 results; this is especially + // likely for large values of `totalShards`. + // + // Valid values are `[0, totalShards)`. + Shard int32 `protobuf:"varint,6,opt,name=shard" json:"shard,omitempty"` + // Specifying `totalShards` causes a disjoint subset of the normal response + // payload to be returned for each query with a unique `shard` parameter + // specified. A best effort is made to yield equally sized shards. Sharding + // can be used to distribute processing amongst workers, where each worker is + // assigned a unique `shard` number and all workers specify the same + // `totalShards` number. The union of reads returned for all sharded queries + // `[0, totalShards)` is equal to those returned by a single unsharded query. + // + // Queries for different values of `totalShards` with common divisors will + // share shard boundaries. For example, streaming `shard` 2 of 5 + // `totalShards` yields the same results as streaming `shard`s 4 and 5 of 10 + // `totalShards`. This property can be leveraged for adaptive retries. + TotalShards int32 `protobuf:"varint,7,opt,name=total_shards,json=totalShards" json:"total_shards,omitempty"` +} + +func (m *StreamReadsRequest) Reset() { *m = StreamReadsRequest{} } +func (m *StreamReadsRequest) String() string { return proto.CompactTextString(m) } +func (*StreamReadsRequest) ProtoMessage() {} +func (*StreamReadsRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{13} } + +func (m *StreamReadsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *StreamReadsRequest) GetReadGroupSetId() string { + if m != nil { + return m.ReadGroupSetId + } + return "" +} + +func (m *StreamReadsRequest) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *StreamReadsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *StreamReadsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *StreamReadsRequest) GetShard() int32 { + if m != nil { + return m.Shard + } + return 0 +} + +func (m *StreamReadsRequest) GetTotalShards() int32 { + if m != nil { + return m.TotalShards + } + return 0 +} + +type StreamReadsResponse struct { + Alignments []*Read `protobuf:"bytes,1,rep,name=alignments" json:"alignments,omitempty"` +} + +func (m *StreamReadsResponse) Reset() { *m = StreamReadsResponse{} } +func (m *StreamReadsResponse) String() string { return proto.CompactTextString(m) } +func (*StreamReadsResponse) ProtoMessage() {} +func (*StreamReadsResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{14} } + +func (m *StreamReadsResponse) GetAlignments() []*Read { + if m != nil { + return m.Alignments + } + return nil +} + +func init() { + proto.RegisterType((*SearchReadGroupSetsRequest)(nil), "google.genomics.v1.SearchReadGroupSetsRequest") + proto.RegisterType((*SearchReadGroupSetsResponse)(nil), "google.genomics.v1.SearchReadGroupSetsResponse") + proto.RegisterType((*ImportReadGroupSetsRequest)(nil), "google.genomics.v1.ImportReadGroupSetsRequest") + proto.RegisterType((*ImportReadGroupSetsResponse)(nil), "google.genomics.v1.ImportReadGroupSetsResponse") + proto.RegisterType((*ExportReadGroupSetRequest)(nil), "google.genomics.v1.ExportReadGroupSetRequest") + proto.RegisterType((*UpdateReadGroupSetRequest)(nil), "google.genomics.v1.UpdateReadGroupSetRequest") + proto.RegisterType((*DeleteReadGroupSetRequest)(nil), "google.genomics.v1.DeleteReadGroupSetRequest") + proto.RegisterType((*GetReadGroupSetRequest)(nil), "google.genomics.v1.GetReadGroupSetRequest") + proto.RegisterType((*ListCoverageBucketsRequest)(nil), "google.genomics.v1.ListCoverageBucketsRequest") + proto.RegisterType((*CoverageBucket)(nil), "google.genomics.v1.CoverageBucket") + proto.RegisterType((*ListCoverageBucketsResponse)(nil), "google.genomics.v1.ListCoverageBucketsResponse") + proto.RegisterType((*SearchReadsRequest)(nil), "google.genomics.v1.SearchReadsRequest") + proto.RegisterType((*SearchReadsResponse)(nil), "google.genomics.v1.SearchReadsResponse") + proto.RegisterType((*StreamReadsRequest)(nil), "google.genomics.v1.StreamReadsRequest") + proto.RegisterType((*StreamReadsResponse)(nil), "google.genomics.v1.StreamReadsResponse") + proto.RegisterEnum("google.genomics.v1.ImportReadGroupSetsRequest_PartitionStrategy", ImportReadGroupSetsRequest_PartitionStrategy_name, ImportReadGroupSetsRequest_PartitionStrategy_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for StreamingReadService service + +type StreamingReadServiceClient interface { + // Returns a stream of all the reads matching the search request, ordered + // by reference name, position, and ID. + StreamReads(ctx context.Context, in *StreamReadsRequest, opts ...grpc.CallOption) (StreamingReadService_StreamReadsClient, error) +} + +type streamingReadServiceClient struct { + cc *grpc.ClientConn +} + +func NewStreamingReadServiceClient(cc *grpc.ClientConn) StreamingReadServiceClient { + return &streamingReadServiceClient{cc} +} + +func (c *streamingReadServiceClient) StreamReads(ctx context.Context, in *StreamReadsRequest, opts ...grpc.CallOption) (StreamingReadService_StreamReadsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_StreamingReadService_serviceDesc.Streams[0], c.cc, "/google.genomics.v1.StreamingReadService/StreamReads", opts...) + if err != nil { + return nil, err + } + x := &streamingReadServiceStreamReadsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StreamingReadService_StreamReadsClient interface { + Recv() (*StreamReadsResponse, error) + grpc.ClientStream +} + +type streamingReadServiceStreamReadsClient struct { + grpc.ClientStream +} + +func (x *streamingReadServiceStreamReadsClient) Recv() (*StreamReadsResponse, error) { + m := new(StreamReadsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for StreamingReadService service + +type StreamingReadServiceServer interface { + // Returns a stream of all the reads matching the search request, ordered + // by reference name, position, and ID. + StreamReads(*StreamReadsRequest, StreamingReadService_StreamReadsServer) error +} + +func RegisterStreamingReadServiceServer(s *grpc.Server, srv StreamingReadServiceServer) { + s.RegisterService(&_StreamingReadService_serviceDesc, srv) +} + +func _StreamingReadService_StreamReads_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamReadsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StreamingReadServiceServer).StreamReads(m, &streamingReadServiceStreamReadsServer{stream}) +} + +type StreamingReadService_StreamReadsServer interface { + Send(*StreamReadsResponse) error + grpc.ServerStream +} + +type streamingReadServiceStreamReadsServer struct { + grpc.ServerStream +} + +func (x *streamingReadServiceStreamReadsServer) Send(m *StreamReadsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _StreamingReadService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.StreamingReadService", + HandlerType: (*StreamingReadServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamReads", + Handler: _StreamingReadService_StreamReads_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/genomics/v1/reads.proto", +} + +// Client API for ReadServiceV1 service + +type ReadServiceV1Client interface { + // Creates read group sets by asynchronously importing the provided + // information. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The caller must have WRITE permissions to the dataset. + // + // ## Notes on [BAM](https://samtools.github.io/hts-specs/SAMv1.pdf) import + // + // - Tags will be converted to strings - tag types are not preserved + // - Comments (`@CO`) in the input file header will not be preserved + // - Original header order of references (`@SQ`) will not be preserved + // - Any reverse stranded unmapped reads will be reverse complemented, and + // their qualities (also the "BQ" and "OQ" tags, if any) will be reversed + // - Unmapped reads will be stripped of positional information (reference name + // and position) + ImportReadGroupSets(ctx context.Context, in *ImportReadGroupSetsRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Exports a read group set to a BAM file in Google Cloud Storage. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Note that currently there may be some differences between exported BAM + // files and the original BAM file at the time of import. See + // [ImportReadGroupSets][google.genomics.v1.ReadServiceV1.ImportReadGroupSets] + // for caveats. + ExportReadGroupSet(ctx context.Context, in *ExportReadGroupSetRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Searches for read group sets matching the criteria. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReadGroupSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L135). + SearchReadGroupSets(ctx context.Context, in *SearchReadGroupSetsRequest, opts ...grpc.CallOption) (*SearchReadGroupSetsResponse, error) + // Updates a read group set. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateReadGroupSet(ctx context.Context, in *UpdateReadGroupSetRequest, opts ...grpc.CallOption) (*ReadGroupSet, error) + // Deletes a read group set. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteReadGroupSet(ctx context.Context, in *DeleteReadGroupSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Gets a read group set by ID. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetReadGroupSet(ctx context.Context, in *GetReadGroupSetRequest, opts ...grpc.CallOption) (*ReadGroupSet, error) + // Lists fixed width coverage buckets for a read group set, each of which + // correspond to a range of a reference sequence. Each bucket summarizes + // coverage information across its corresponding genomic range. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Coverage is defined as the number of reads which are aligned to a given + // base in the reference sequence. Coverage buckets are available at several + // precomputed bucket widths, enabling retrieval of various coverage 'zoom + // levels'. The caller must have READ permissions for the target read group + // set. + ListCoverageBuckets(ctx context.Context, in *ListCoverageBucketsRequest, opts ...grpc.CallOption) (*ListCoverageBucketsResponse, error) + // Gets a list of reads for one or more read group sets. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Reads search operates over a genomic coordinate space of reference sequence + // & position defined over the reference sequences to which the requested + // read group sets are aligned. + // + // If a target positional range is specified, search returns all reads whose + // alignment to the reference genome overlap the range. A query which + // specifies only read group set IDs yields all reads in those read group + // sets, including unmapped reads. + // + // All reads returned (including reads on subsequent pages) are ordered by + // genomic coordinate (by reference sequence, then position). Reads with + // equivalent genomic coordinates are returned in an unspecified order. This + // order is consistent, such that two queries for the same content (regardless + // of page size) yield reads in the same order across their respective streams + // of paginated responses. + // + // Implements + // [GlobalAllianceApi.searchReads](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L85). + SearchReads(ctx context.Context, in *SearchReadsRequest, opts ...grpc.CallOption) (*SearchReadsResponse, error) +} + +type readServiceV1Client struct { + cc *grpc.ClientConn +} + +func NewReadServiceV1Client(cc *grpc.ClientConn) ReadServiceV1Client { + return &readServiceV1Client{cc} +} + +func (c *readServiceV1Client) ImportReadGroupSets(ctx context.Context, in *ImportReadGroupSetsRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/ImportReadGroupSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) ExportReadGroupSet(ctx context.Context, in *ExportReadGroupSetRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/ExportReadGroupSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) SearchReadGroupSets(ctx context.Context, in *SearchReadGroupSetsRequest, opts ...grpc.CallOption) (*SearchReadGroupSetsResponse, error) { + out := new(SearchReadGroupSetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/SearchReadGroupSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) UpdateReadGroupSet(ctx context.Context, in *UpdateReadGroupSetRequest, opts ...grpc.CallOption) (*ReadGroupSet, error) { + out := new(ReadGroupSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/UpdateReadGroupSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) DeleteReadGroupSet(ctx context.Context, in *DeleteReadGroupSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/DeleteReadGroupSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) GetReadGroupSet(ctx context.Context, in *GetReadGroupSetRequest, opts ...grpc.CallOption) (*ReadGroupSet, error) { + out := new(ReadGroupSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/GetReadGroupSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) ListCoverageBuckets(ctx context.Context, in *ListCoverageBucketsRequest, opts ...grpc.CallOption) (*ListCoverageBucketsResponse, error) { + out := new(ListCoverageBucketsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/ListCoverageBuckets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readServiceV1Client) SearchReads(ctx context.Context, in *SearchReadsRequest, opts ...grpc.CallOption) (*SearchReadsResponse, error) { + out := new(SearchReadsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReadServiceV1/SearchReads", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ReadServiceV1 service + +type ReadServiceV1Server interface { + // Creates read group sets by asynchronously importing the provided + // information. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The caller must have WRITE permissions to the dataset. + // + // ## Notes on [BAM](https://samtools.github.io/hts-specs/SAMv1.pdf) import + // + // - Tags will be converted to strings - tag types are not preserved + // - Comments (`@CO`) in the input file header will not be preserved + // - Original header order of references (`@SQ`) will not be preserved + // - Any reverse stranded unmapped reads will be reverse complemented, and + // their qualities (also the "BQ" and "OQ" tags, if any) will be reversed + // - Unmapped reads will be stripped of positional information (reference name + // and position) + ImportReadGroupSets(context.Context, *ImportReadGroupSetsRequest) (*google_longrunning.Operation, error) + // Exports a read group set to a BAM file in Google Cloud Storage. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Note that currently there may be some differences between exported BAM + // files and the original BAM file at the time of import. See + // [ImportReadGroupSets][google.genomics.v1.ReadServiceV1.ImportReadGroupSets] + // for caveats. + ExportReadGroupSet(context.Context, *ExportReadGroupSetRequest) (*google_longrunning.Operation, error) + // Searches for read group sets matching the criteria. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReadGroupSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L135). + SearchReadGroupSets(context.Context, *SearchReadGroupSetsRequest) (*SearchReadGroupSetsResponse, error) + // Updates a read group set. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateReadGroupSet(context.Context, *UpdateReadGroupSetRequest) (*ReadGroupSet, error) + // Deletes a read group set. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteReadGroupSet(context.Context, *DeleteReadGroupSetRequest) (*google_protobuf1.Empty, error) + // Gets a read group set by ID. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetReadGroupSet(context.Context, *GetReadGroupSetRequest) (*ReadGroupSet, error) + // Lists fixed width coverage buckets for a read group set, each of which + // correspond to a range of a reference sequence. Each bucket summarizes + // coverage information across its corresponding genomic range. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Coverage is defined as the number of reads which are aligned to a given + // base in the reference sequence. Coverage buckets are available at several + // precomputed bucket widths, enabling retrieval of various coverage 'zoom + // levels'. The caller must have READ permissions for the target read group + // set. + ListCoverageBuckets(context.Context, *ListCoverageBucketsRequest) (*ListCoverageBucketsResponse, error) + // Gets a list of reads for one or more read group sets. + // + // For the definitions of read group sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Reads search operates over a genomic coordinate space of reference sequence + // & position defined over the reference sequences to which the requested + // read group sets are aligned. + // + // If a target positional range is specified, search returns all reads whose + // alignment to the reference genome overlap the range. A query which + // specifies only read group set IDs yields all reads in those read group + // sets, including unmapped reads. + // + // All reads returned (including reads on subsequent pages) are ordered by + // genomic coordinate (by reference sequence, then position). Reads with + // equivalent genomic coordinates are returned in an unspecified order. This + // order is consistent, such that two queries for the same content (regardless + // of page size) yield reads in the same order across their respective streams + // of paginated responses. + // + // Implements + // [GlobalAllianceApi.searchReads](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/readmethods.avdl#L85). + SearchReads(context.Context, *SearchReadsRequest) (*SearchReadsResponse, error) +} + +func RegisterReadServiceV1Server(s *grpc.Server, srv ReadServiceV1Server) { + s.RegisterService(&_ReadServiceV1_serviceDesc, srv) +} + +func _ReadServiceV1_ImportReadGroupSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportReadGroupSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).ImportReadGroupSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/ImportReadGroupSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).ImportReadGroupSets(ctx, req.(*ImportReadGroupSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_ExportReadGroupSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportReadGroupSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).ExportReadGroupSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/ExportReadGroupSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).ExportReadGroupSet(ctx, req.(*ExportReadGroupSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_SearchReadGroupSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchReadGroupSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).SearchReadGroupSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/SearchReadGroupSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).SearchReadGroupSets(ctx, req.(*SearchReadGroupSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_UpdateReadGroupSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateReadGroupSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).UpdateReadGroupSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/UpdateReadGroupSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).UpdateReadGroupSet(ctx, req.(*UpdateReadGroupSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_DeleteReadGroupSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteReadGroupSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).DeleteReadGroupSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/DeleteReadGroupSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).DeleteReadGroupSet(ctx, req.(*DeleteReadGroupSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_GetReadGroupSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetReadGroupSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).GetReadGroupSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/GetReadGroupSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).GetReadGroupSet(ctx, req.(*GetReadGroupSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_ListCoverageBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCoverageBucketsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).ListCoverageBuckets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/ListCoverageBuckets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).ListCoverageBuckets(ctx, req.(*ListCoverageBucketsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReadServiceV1_SearchReads_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchReadsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReadServiceV1Server).SearchReads(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReadServiceV1/SearchReads", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReadServiceV1Server).SearchReads(ctx, req.(*SearchReadsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ReadServiceV1_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.ReadServiceV1", + HandlerType: (*ReadServiceV1Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ImportReadGroupSets", + Handler: _ReadServiceV1_ImportReadGroupSets_Handler, + }, + { + MethodName: "ExportReadGroupSet", + Handler: _ReadServiceV1_ExportReadGroupSet_Handler, + }, + { + MethodName: "SearchReadGroupSets", + Handler: _ReadServiceV1_SearchReadGroupSets_Handler, + }, + { + MethodName: "UpdateReadGroupSet", + Handler: _ReadServiceV1_UpdateReadGroupSet_Handler, + }, + { + MethodName: "DeleteReadGroupSet", + Handler: _ReadServiceV1_DeleteReadGroupSet_Handler, + }, + { + MethodName: "GetReadGroupSet", + Handler: _ReadServiceV1_GetReadGroupSet_Handler, + }, + { + MethodName: "ListCoverageBuckets", + Handler: _ReadServiceV1_ListCoverageBuckets_Handler, + }, + { + MethodName: "SearchReads", + Handler: _ReadServiceV1_SearchReads_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1/reads.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1/reads.proto", fileDescriptor9) } + +var fileDescriptor9 = []byte{ + // 1333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xff, 0x8e, 0x1d, 0xb7, 0xcd, 0x73, 0x93, 0x38, 0xe3, 0x7e, 0x8b, 0xe3, 0x90, 0x36, 0x6c, + 0x69, 0x1b, 0x02, 0xb5, 0x89, 0x11, 0x2a, 0x4a, 0x85, 0x44, 0xda, 0x3a, 0xc1, 0x28, 0x69, 0xad, + 0x75, 0x02, 0x82, 0xcb, 0x6a, 0x62, 0x4f, 0xb6, 0x4b, 0xec, 0xdd, 0x65, 0x66, 0x9c, 0xfe, 0x52, + 0x2f, 0xbd, 0x81, 0x04, 0x1c, 0x10, 0x27, 0xae, 0x5c, 0x39, 0x22, 0xfe, 0x08, 0x4e, 0x88, 0x0b, + 0x7f, 0x00, 0xe2, 0x0f, 0xe0, 0xc4, 0x11, 0xcd, 0xec, 0x6e, 0xbc, 0xeb, 0x9d, 0x6d, 0x1c, 0x55, + 0xe2, 0xb6, 0xfb, 0xe6, 0xb3, 0x6f, 0x3e, 0xef, 0xf7, 0x5b, 0xb8, 0x64, 0x7b, 0x9e, 0xdd, 0xa7, + 0x75, 0x9b, 0xba, 0xde, 0xc0, 0xe9, 0xf2, 0xfa, 0xd1, 0x5a, 0x9d, 0x51, 0xd2, 0xe3, 0x35, 0x9f, + 0x79, 0xc2, 0xc3, 0x38, 0x38, 0xaf, 0x45, 0xe7, 0xb5, 0xa3, 0xb5, 0xea, 0xab, 0xe1, 0x37, 0xc4, + 0x77, 0xea, 0xc4, 0x75, 0x3d, 0x41, 0x84, 0xe3, 0xb9, 0xe1, 0x17, 0x55, 0xad, 0x46, 0xe2, 0xda, + 0x34, 0x3c, 0xbf, 0x96, 0x71, 0x23, 0xe9, 0x3b, 0xb6, 0x3b, 0xa0, 0xae, 0x08, 0x71, 0x57, 0x33, + 0x70, 0x36, 0xf3, 0x86, 0x3e, 0xa7, 0x11, 0xec, 0x4a, 0x08, 0xeb, 0x7b, 0xae, 0xcd, 0x86, 0xae, + 0xeb, 0xb8, 0x76, 0xdd, 0xf3, 0x29, 0x4b, 0x70, 0x5a, 0x0c, 0x41, 0xea, 0x6d, 0x7f, 0x78, 0x50, + 0xa7, 0x03, 0x5f, 0x3c, 0x0e, 0x0f, 0x97, 0xc7, 0x0f, 0x0f, 0x1c, 0xda, 0xef, 0x59, 0x03, 0xc2, + 0x0f, 0x03, 0x84, 0xf1, 0x35, 0x82, 0x6a, 0x87, 0x12, 0xd6, 0x7d, 0x60, 0x52, 0xd2, 0xdb, 0x92, + 0x04, 0x3a, 0x54, 0x70, 0x93, 0x7e, 0x31, 0xa4, 0x5c, 0xe0, 0xcb, 0x50, 0xec, 0x11, 0x41, 0x38, + 0x15, 0x96, 0xd3, 0xe3, 0x15, 0xb4, 0x9c, 0x5f, 0x99, 0x36, 0x21, 0x14, 0xb5, 0x7a, 0x1c, 0x63, + 0x98, 0x72, 0xc9, 0x80, 0x56, 0xf2, 0xcb, 0x68, 0x65, 0xda, 0x54, 0xcf, 0x78, 0x09, 0xc0, 0x27, + 0x36, 0xb5, 0x84, 0x77, 0x48, 0xdd, 0x4a, 0x4e, 0x9d, 0x4c, 0x4b, 0xc9, 0xae, 0x14, 0xe0, 0x45, + 0x50, 0x2f, 0x16, 0x77, 0x9e, 0xd0, 0xca, 0xd4, 0x32, 0x5a, 0x29, 0x98, 0xe7, 0xa4, 0xa0, 0xe3, + 0x3c, 0xa1, 0xc6, 0xb7, 0x08, 0x16, 0xb5, 0x7c, 0xb8, 0xef, 0xb9, 0x9c, 0xe2, 0x0f, 0x61, 0x4e, + 0x7a, 0xca, 0x52, 0xae, 0xb2, 0x38, 0x15, 0x01, 0xa9, 0x62, 0x63, 0xb9, 0x96, 0x0e, 0x67, 0x2d, + 0xae, 0xc3, 0x9c, 0x61, 0x71, 0x8d, 0xf8, 0x1a, 0xcc, 0xb9, 0xf4, 0x91, 0xb0, 0x52, 0x54, 0x67, + 0xa4, 0xb8, 0x1d, 0xd1, 0x35, 0xfe, 0xc8, 0x41, 0xb5, 0x35, 0xf0, 0x3d, 0x26, 0xb4, 0x1e, 0x5a, + 0x02, 0x18, 0x79, 0xa8, 0x82, 0x02, 0x63, 0x8f, 0x1d, 0x84, 0x57, 0xa0, 0xc4, 0xe8, 0x01, 0x65, + 0xd4, 0xed, 0x52, 0x2b, 0x04, 0x4d, 0x29, 0xd0, 0xec, 0xb1, 0xbc, 0xa3, 0x90, 0x97, 0xa1, 0xc8, + 0xbd, 0x21, 0xeb, 0x52, 0x6b, 0xc8, 0x1c, 0x5e, 0xc9, 0x05, 0xae, 0x0e, 0x44, 0x7b, 0xcc, 0xe1, + 0xd8, 0x03, 0xec, 0x13, 0x26, 0x1c, 0x19, 0x7d, 0x8b, 0x0b, 0x46, 0x04, 0xb5, 0x1f, 0x57, 0x0a, + 0xcb, 0x68, 0x65, 0xb6, 0xf1, 0x81, 0xce, 0xfa, 0x6c, 0xd6, 0xb5, 0x76, 0xa4, 0xa8, 0x13, 0xea, + 0x31, 0xe7, 0xfd, 0x71, 0x91, 0x61, 0xc1, 0x7c, 0x0a, 0x87, 0x0d, 0xb8, 0xd4, 0xde, 0x30, 0x77, + 0x5b, 0xbb, 0xad, 0xfb, 0xf7, 0xac, 0xce, 0xae, 0xb9, 0xb1, 0xdb, 0xdc, 0xfa, 0xd4, 0xda, 0xbb, + 0xd7, 0x69, 0x37, 0xef, 0xb4, 0x36, 0x5b, 0xcd, 0xbb, 0xa5, 0xff, 0xe1, 0x57, 0xa0, 0xdc, 0x6e, + 0x9a, 0xd6, 0x66, 0x6b, 0xbb, 0x69, 0xc9, 0x87, 0xce, 0xc6, 0x4e, 0x7b, 0xbb, 0x59, 0x42, 0x78, + 0x06, 0xa6, 0x77, 0x9a, 0xe6, 0x56, 0xd3, 0xda, 0xd8, 0xde, 0x2e, 0xe5, 0x8c, 0x8f, 0x60, 0x51, + 0xcb, 0x31, 0x8c, 0xf5, 0x9b, 0x80, 0x93, 0xb1, 0x8e, 0xe5, 0xe0, 0x5c, 0x3c, 0x98, 0xad, 0x1e, + 0x37, 0x7e, 0x42, 0xb0, 0xd0, 0x7c, 0x34, 0xae, 0x2c, 0x16, 0x25, 0x9f, 0x79, 0x9f, 0xd3, 0x6e, + 0x3c, 0x4a, 0xa1, 0xa4, 0xd5, 0x93, 0xc7, 0x54, 0x7d, 0x2b, 0x7d, 0x1f, 0x65, 0x6c, 0x20, 0xd9, + 0x63, 0x0e, 0x7e, 0x03, 0xe6, 0x53, 0x44, 0xc2, 0x8c, 0x9f, 0x4d, 0xf2, 0xc0, 0xd7, 0x65, 0x7e, + 0x46, 0xf1, 0x96, 0xd5, 0xc0, 0x2b, 0x53, 0x8a, 0xf0, 0x28, 0xdc, 0xf7, 0xa4, 0xd4, 0xf8, 0x15, + 0xc1, 0xc2, 0x9e, 0xdf, 0x23, 0x82, 0xea, 0xf8, 0x6a, 0x6f, 0x44, 0xda, 0x1b, 0x37, 0x61, 0x36, + 0x09, 0x55, 0xfc, 0x27, 0x29, 0x88, 0xf3, 0x71, 0x4d, 0xf8, 0x16, 0x14, 0x87, 0x8a, 0x8f, 0x6a, + 0x0f, 0xca, 0xbc, 0x62, 0xa3, 0x1a, 0x29, 0x89, 0x3a, 0x48, 0x6d, 0x53, 0x76, 0x90, 0x1d, 0xc2, + 0x0f, 0x4d, 0x08, 0xe0, 0xf2, 0xd9, 0xd8, 0x84, 0x85, 0xbb, 0xb4, 0x4f, 0x5f, 0xd6, 0x18, 0xe3, + 0x0e, 0x5c, 0xdc, 0xa2, 0xe2, 0x25, 0x95, 0x3c, 0xcf, 0x41, 0x75, 0xdb, 0xe1, 0xe2, 0x8e, 0x77, + 0x44, 0x19, 0xb1, 0xe9, 0xed, 0x61, 0xf7, 0x30, 0x56, 0xb1, 0xa7, 0xf0, 0xed, 0x55, 0x98, 0x4d, + 0x46, 0x33, 0x8c, 0xfa, 0x4c, 0x22, 0x98, 0xf8, 0x02, 0x14, 0xb8, 0x20, 0x4c, 0xa8, 0xca, 0xce, + 0x9b, 0xc1, 0x0b, 0x2e, 0x41, 0x9e, 0xba, 0x3d, 0x55, 0xa0, 0x79, 0x53, 0x3e, 0xe2, 0x1a, 0x94, + 0x05, 0x61, 0x36, 0x15, 0xd6, 0xbe, 0xa2, 0x64, 0x3d, 0x74, 0x7a, 0xe2, 0x41, 0xe5, 0x8c, 0x42, + 0xcc, 0x07, 0x47, 0x01, 0xd9, 0x4f, 0xe4, 0xc1, 0x58, 0x23, 0x3d, 0xfb, 0xc2, 0x46, 0x7a, 0x6e, + 0xac, 0x91, 0x1e, 0xc0, 0x6c, 0xd2, 0x7e, 0x5c, 0x87, 0x82, 0x1a, 0x56, 0xca, 0xd6, 0x62, 0x63, + 0x41, 0x9b, 0x1f, 0x12, 0x60, 0x06, 0x38, 0x7c, 0x05, 0x66, 0x06, 0x94, 0xb8, 0x56, 0x37, 0xd4, + 0xa3, 0x12, 0x2b, 0x67, 0x9e, 0x97, 0xc2, 0x48, 0xb7, 0xf1, 0x0b, 0x82, 0x45, 0xad, 0xb3, 0xc3, + 0x22, 0x7e, 0x0d, 0xce, 0x27, 0x8c, 0x45, 0xca, 0xd8, 0xe2, 0x7e, 0xcc, 0xcc, 0x1d, 0x28, 0x45, + 0x57, 0x84, 0x8e, 0x09, 0xda, 0x5f, 0xb1, 0x61, 0xe8, 0x38, 0x26, 0x6f, 0x32, 0xe7, 0xba, 0xc9, + 0x9b, 0x75, 0x8d, 0x3d, 0xaf, 0x6b, 0xec, 0x7f, 0x23, 0xc0, 0xa3, 0x51, 0x73, 0x9c, 0x1e, 0xa7, + 0xe9, 0x3a, 0xf8, 0xf5, 0x44, 0xf1, 0x49, 0x60, 0x41, 0x01, 0x47, 0xa5, 0x25, 0x51, 0xe9, 0x34, + 0x3a, 0xfb, 0xc2, 0x34, 0x3a, 0xa7, 0x49, 0xa3, 0xe9, 0x51, 0x1a, 0x25, 0xd3, 0x22, 0x7f, 0xaa, + 0xf9, 0xfa, 0x10, 0xca, 0x09, 0x9b, 0xc3, 0x28, 0xbd, 0x07, 0x70, 0xbc, 0xa4, 0x44, 0x13, 0xb5, + 0x92, 0xd5, 0x40, 0xcc, 0x18, 0x76, 0xe2, 0x31, 0xfa, 0x97, 0xf4, 0xb6, 0x60, 0x94, 0x0c, 0x12, + 0xde, 0x3e, 0xa1, 0x31, 0x6b, 0x6b, 0x35, 0xf7, 0x5f, 0xd4, 0xaa, 0xc4, 0x3d, 0x20, 0xac, 0xa7, + 0xaa, 0xb3, 0x60, 0x06, 0x2f, 0x32, 0x9b, 0x85, 0x27, 0x48, 0xdf, 0x52, 0xaf, 0x5c, 0xc5, 0xb1, + 0x60, 0x16, 0x95, 0xac, 0xa3, 0x44, 0xc6, 0x7d, 0x28, 0x27, 0xec, 0x7c, 0x59, 0x0f, 0x37, 0xbe, + 0x47, 0x70, 0x21, 0xd0, 0xe8, 0xb8, 0xb6, 0x3c, 0xed, 0x50, 0x76, 0xe4, 0x74, 0x29, 0x7e, 0x06, + 0xc5, 0xd8, 0x4d, 0xf8, 0x9a, 0x4e, 0x5b, 0xda, 0xe5, 0xd5, 0xeb, 0x27, 0xe2, 0x02, 0xca, 0xc6, + 0xe2, 0xf3, 0xdf, 0xff, 0xfc, 0x2e, 0xf7, 0x7f, 0xa3, 0x74, 0xbc, 0x39, 0xaf, 0x73, 0x05, 0x5b, + 0x47, 0xab, 0x6f, 0xa3, 0xc6, 0x6f, 0xd3, 0x30, 0x13, 0xa3, 0xf3, 0xf1, 0x1a, 0xfe, 0x12, 0x41, + 0x59, 0x33, 0xd0, 0x71, 0xed, 0x74, 0xdb, 0x49, 0x75, 0x29, 0xc2, 0xc7, 0x36, 0xdf, 0xda, 0xfd, + 0x68, 0xf3, 0x35, 0xae, 0x28, 0x5e, 0x4b, 0x46, 0x65, 0x7c, 0x6f, 0xe6, 0xeb, 0x8e, 0x52, 0xba, + 0x8e, 0x56, 0xf1, 0x0f, 0x08, 0x70, 0x7a, 0x1f, 0xc0, 0x37, 0x74, 0x54, 0x32, 0xf7, 0x86, 0x93, + 0x98, 0xdc, 0x54, 0x4c, 0xd6, 0x8c, 0xb7, 0x52, 0x4c, 0xea, 0x4f, 0x53, 0x79, 0xfb, 0x6c, 0x3d, + 0xd8, 0x28, 0x42, 0x76, 0x65, 0xcd, 0x9a, 0xab, 0xf7, 0x54, 0xf6, 0x7e, 0x5e, 0xad, 0x4f, 0x8c, + 0x0f, 0x63, 0x9a, 0xed, 0xbb, 0x3a, 0x57, 0x9f, 0x49, 0x76, 0x3f, 0x22, 0xc0, 0xe9, 0xdd, 0x44, + 0xef, 0xbb, 0xcc, 0x1d, 0xa6, 0x7a, 0xe2, 0x02, 0x62, 0xbc, 0xaf, 0xc8, 0xdc, 0x6c, 0x5c, 0x9d, + 0xcc, 0x7d, 0x63, 0x7b, 0x0e, 0xfe, 0x0a, 0x01, 0x4e, 0x6f, 0x1d, 0x7a, 0x9a, 0x99, 0xdb, 0x49, + 0xf5, 0x62, 0x6a, 0xc5, 0x69, 0xca, 0x3f, 0x28, 0xe3, 0x86, 0x22, 0x77, 0x7d, 0x75, 0x32, 0x72, + 0xf8, 0x1b, 0x04, 0x73, 0x63, 0xab, 0x0b, 0x5e, 0xd5, 0x31, 0xd1, 0xef, 0x37, 0x13, 0x78, 0x2b, + 0x24, 0x84, 0x27, 0x24, 0xf4, 0x33, 0x82, 0xb2, 0x66, 0x30, 0xeb, 0x53, 0x2c, 0x7b, 0x5d, 0xd2, + 0xa7, 0xd8, 0x0b, 0x26, 0x7e, 0x14, 0x55, 0xfc, 0xee, 0x44, 0x3c, 0xeb, 0xd1, 0xf8, 0x0e, 0x27, + 0x3f, 0x7e, 0x0a, 0xc5, 0xd8, 0x84, 0xca, 0xe8, 0x6a, 0xa9, 0xb1, 0x9d, 0xd1, 0xd5, 0xd2, 0xa3, + 0x4e, 0xd3, 0xd5, 0x46, 0x99, 0x7f, 0x9b, 0xc2, 0xc5, 0xae, 0x37, 0xd0, 0xa8, 0xba, 0x0d, 0x4a, + 0x4b, 0x5b, 0xe6, 0x48, 0x1b, 0x7d, 0xb6, 0x1e, 0x21, 0xbc, 0x3e, 0x71, 0xed, 0x9a, 0xc7, 0x6c, + 0xf9, 0x33, 0xaf, 0x32, 0xa8, 0x1e, 0x1c, 0x11, 0xdf, 0xe1, 0xf1, 0x1f, 0xfc, 0x5b, 0xd1, 0xf3, + 0x3f, 0x08, 0xed, 0x9f, 0x51, 0xc8, 0x77, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x9b, 0xce, + 0x6e, 0xa3, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/references.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/references.pb.go new file mode 100644 index 000000000..ef5e0e38c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/references.pb.go @@ -0,0 +1,864 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/references.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A reference is a canonical assembled DNA sequence, intended to act as a +// reference coordinate space for other genomic annotations. A single reference +// might represent the human chromosome 1 or mitochandrial DNA, for instance. A +// reference belongs to one or more reference sets. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type Reference struct { + // The server-generated reference ID, unique across all references. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The length of this reference's sequence. + Length int64 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + // MD5 of the upper-case sequence excluding all whitespace characters (this + // is equivalent to SQ:M5 in SAM). This value is represented in lower case + // hexadecimal format. + Md5Checksum string `protobuf:"bytes,3,opt,name=md5checksum" json:"md5checksum,omitempty"` + // The name of this reference, for example `22`. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The URI from which the sequence was obtained. Typically specifies a FASTA + // format file. + SourceUri string `protobuf:"bytes,5,opt,name=source_uri,json=sourceUri" json:"source_uri,omitempty"` + // All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally + // with a version number, for example `GCF_000001405.26`. + SourceAccessions []string `protobuf:"bytes,6,rep,name=source_accessions,json=sourceAccessions" json:"source_accessions,omitempty"` + // ID from http://www.ncbi.nlm.nih.gov/taxonomy. For example, 9606 for human. + NcbiTaxonId int32 `protobuf:"varint,7,opt,name=ncbi_taxon_id,json=ncbiTaxonId" json:"ncbi_taxon_id,omitempty"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} +func (*Reference) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } + +func (m *Reference) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Reference) GetLength() int64 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Reference) GetMd5Checksum() string { + if m != nil { + return m.Md5Checksum + } + return "" +} + +func (m *Reference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Reference) GetSourceUri() string { + if m != nil { + return m.SourceUri + } + return "" +} + +func (m *Reference) GetSourceAccessions() []string { + if m != nil { + return m.SourceAccessions + } + return nil +} + +func (m *Reference) GetNcbiTaxonId() int32 { + if m != nil { + return m.NcbiTaxonId + } + return 0 +} + +// A reference set is a set of references which typically comprise a reference +// assembly for a species, such as `GRCh38` which is representative +// of the human genome. A reference set defines a common coordinate space for +// comparing reference-aligned experimental data. A reference set contains 1 or +// more references. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type ReferenceSet struct { + // The server-generated reference set ID, unique across all reference sets. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The IDs of the reference objects that are part of this set. + // `Reference.md5checksum` must be unique within this set. + ReferenceIds []string `protobuf:"bytes,2,rep,name=reference_ids,json=referenceIds" json:"reference_ids,omitempty"` + // Order-independent MD5 checksum which identifies this reference set. The + // checksum is computed by sorting all lower case hexidecimal string + // `reference.md5checksum` (for all reference in this set) in + // ascending lexicographic order, concatenating, and taking the MD5 of that + // value. The resulting value is represented in lower case hexadecimal format. + Md5Checksum string `protobuf:"bytes,3,opt,name=md5checksum" json:"md5checksum,omitempty"` + // ID from http://www.ncbi.nlm.nih.gov/taxonomy (for example, 9606 for human) + // indicating the species which this reference set is intended to model. Note + // that contained references may specify a different `ncbiTaxonId`, as + // assemblies may contain reference sequences which do not belong to the + // modeled species, for example EBV in a human reference genome. + NcbiTaxonId int32 `protobuf:"varint,4,opt,name=ncbi_taxon_id,json=ncbiTaxonId" json:"ncbi_taxon_id,omitempty"` + // Free text description of this reference set. + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + // Public id of this reference set, such as `GRCh37`. + AssemblyId string `protobuf:"bytes,6,opt,name=assembly_id,json=assemblyId" json:"assembly_id,omitempty"` + // The URI from which the references were obtained. + SourceUri string `protobuf:"bytes,7,opt,name=source_uri,json=sourceUri" json:"source_uri,omitempty"` + // All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally + // with a version number, for example `NC_000001.11`. + SourceAccessions []string `protobuf:"bytes,8,rep,name=source_accessions,json=sourceAccessions" json:"source_accessions,omitempty"` +} + +func (m *ReferenceSet) Reset() { *m = ReferenceSet{} } +func (m *ReferenceSet) String() string { return proto.CompactTextString(m) } +func (*ReferenceSet) ProtoMessage() {} +func (*ReferenceSet) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } + +func (m *ReferenceSet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ReferenceSet) GetReferenceIds() []string { + if m != nil { + return m.ReferenceIds + } + return nil +} + +func (m *ReferenceSet) GetMd5Checksum() string { + if m != nil { + return m.Md5Checksum + } + return "" +} + +func (m *ReferenceSet) GetNcbiTaxonId() int32 { + if m != nil { + return m.NcbiTaxonId + } + return 0 +} + +func (m *ReferenceSet) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ReferenceSet) GetAssemblyId() string { + if m != nil { + return m.AssemblyId + } + return "" +} + +func (m *ReferenceSet) GetSourceUri() string { + if m != nil { + return m.SourceUri + } + return "" +} + +func (m *ReferenceSet) GetSourceAccessions() []string { + if m != nil { + return m.SourceAccessions + } + return nil +} + +type SearchReferenceSetsRequest struct { + // If present, return reference sets for which the + // [md5checksum][google.genomics.v1.ReferenceSet.md5checksum] matches exactly. + Md5Checksums []string `protobuf:"bytes,1,rep,name=md5checksums" json:"md5checksums,omitempty"` + // If present, return reference sets for which a prefix of any of + // [sourceAccessions][google.genomics.v1.ReferenceSet.source_accessions] + // match any of these strings. Accession numbers typically have a main number + // and a version, for example `NC_000001.11`. + Accessions []string `protobuf:"bytes,2,rep,name=accessions" json:"accessions,omitempty"` + // If present, return reference sets for which a substring of their + // `assemblyId` matches this string (case insensitive). + AssemblyId string `protobuf:"bytes,3,opt,name=assembly_id,json=assemblyId" json:"assembly_id,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 1024. The maximum value is 4096. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchReferenceSetsRequest) Reset() { *m = SearchReferenceSetsRequest{} } +func (m *SearchReferenceSetsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchReferenceSetsRequest) ProtoMessage() {} +func (*SearchReferenceSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{2} } + +func (m *SearchReferenceSetsRequest) GetMd5Checksums() []string { + if m != nil { + return m.Md5Checksums + } + return nil +} + +func (m *SearchReferenceSetsRequest) GetAccessions() []string { + if m != nil { + return m.Accessions + } + return nil +} + +func (m *SearchReferenceSetsRequest) GetAssemblyId() string { + if m != nil { + return m.AssemblyId + } + return "" +} + +func (m *SearchReferenceSetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchReferenceSetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +type SearchReferenceSetsResponse struct { + // The matching references sets. + ReferenceSets []*ReferenceSet `protobuf:"bytes,1,rep,name=reference_sets,json=referenceSets" json:"reference_sets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchReferenceSetsResponse) Reset() { *m = SearchReferenceSetsResponse{} } +func (m *SearchReferenceSetsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchReferenceSetsResponse) ProtoMessage() {} +func (*SearchReferenceSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{3} } + +func (m *SearchReferenceSetsResponse) GetReferenceSets() []*ReferenceSet { + if m != nil { + return m.ReferenceSets + } + return nil +} + +func (m *SearchReferenceSetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type GetReferenceSetRequest struct { + // The ID of the reference set. + ReferenceSetId string `protobuf:"bytes,1,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` +} + +func (m *GetReferenceSetRequest) Reset() { *m = GetReferenceSetRequest{} } +func (m *GetReferenceSetRequest) String() string { return proto.CompactTextString(m) } +func (*GetReferenceSetRequest) ProtoMessage() {} +func (*GetReferenceSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{4} } + +func (m *GetReferenceSetRequest) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +type SearchReferencesRequest struct { + // If present, return references for which the + // [md5checksum][google.genomics.v1.Reference.md5checksum] matches exactly. + Md5Checksums []string `protobuf:"bytes,1,rep,name=md5checksums" json:"md5checksums,omitempty"` + // If present, return references for which a prefix of any of + // [sourceAccessions][google.genomics.v1.Reference.source_accessions] match + // any of these strings. Accession numbers typically have a main number and a + // version, for example `GCF_000001405.26`. + Accessions []string `protobuf:"bytes,2,rep,name=accessions" json:"accessions,omitempty"` + // If present, return only references which belong to this reference set. + ReferenceSetId string `protobuf:"bytes,3,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 1024. The maximum value is 4096. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchReferencesRequest) Reset() { *m = SearchReferencesRequest{} } +func (m *SearchReferencesRequest) String() string { return proto.CompactTextString(m) } +func (*SearchReferencesRequest) ProtoMessage() {} +func (*SearchReferencesRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{5} } + +func (m *SearchReferencesRequest) GetMd5Checksums() []string { + if m != nil { + return m.Md5Checksums + } + return nil +} + +func (m *SearchReferencesRequest) GetAccessions() []string { + if m != nil { + return m.Accessions + } + return nil +} + +func (m *SearchReferencesRequest) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *SearchReferencesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchReferencesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +type SearchReferencesResponse struct { + // The matching references. + References []*Reference `protobuf:"bytes,1,rep,name=references" json:"references,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchReferencesResponse) Reset() { *m = SearchReferencesResponse{} } +func (m *SearchReferencesResponse) String() string { return proto.CompactTextString(m) } +func (*SearchReferencesResponse) ProtoMessage() {} +func (*SearchReferencesResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{6} } + +func (m *SearchReferencesResponse) GetReferences() []*Reference { + if m != nil { + return m.References + } + return nil +} + +func (m *SearchReferencesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type GetReferenceRequest struct { + // The ID of the reference. + ReferenceId string `protobuf:"bytes,1,opt,name=reference_id,json=referenceId" json:"reference_id,omitempty"` +} + +func (m *GetReferenceRequest) Reset() { *m = GetReferenceRequest{} } +func (m *GetReferenceRequest) String() string { return proto.CompactTextString(m) } +func (*GetReferenceRequest) ProtoMessage() {} +func (*GetReferenceRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{7} } + +func (m *GetReferenceRequest) GetReferenceId() string { + if m != nil { + return m.ReferenceId + } + return "" +} + +type ListBasesRequest struct { + // The ID of the reference. + ReferenceId string `protobuf:"bytes,1,opt,name=reference_id,json=referenceId" json:"reference_id,omitempty"` + // The start position (0-based) of this query. Defaults to 0. + Start int64 `protobuf:"varint,2,opt,name=start" json:"start,omitempty"` + // The end position (0-based, exclusive) of this query. Defaults to the length + // of this reference. + End int64 `protobuf:"varint,3,opt,name=end" json:"end,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of bases to return in a single page. If unspecified, + // defaults to 200Kbp (kilo base pairs). The maximum value is 10Mbp (mega base + // pairs). + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListBasesRequest) Reset() { *m = ListBasesRequest{} } +func (m *ListBasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListBasesRequest) ProtoMessage() {} +func (*ListBasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{8} } + +func (m *ListBasesRequest) GetReferenceId() string { + if m != nil { + return m.ReferenceId + } + return "" +} + +func (m *ListBasesRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *ListBasesRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *ListBasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListBasesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +type ListBasesResponse struct { + // The offset position (0-based) of the given `sequence` from the + // start of this `Reference`. This value will differ for each page + // in a paginated request. + Offset int64 `protobuf:"varint,1,opt,name=offset" json:"offset,omitempty"` + // A substring of the bases that make up this reference. + Sequence string `protobuf:"bytes,2,opt,name=sequence" json:"sequence,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListBasesResponse) Reset() { *m = ListBasesResponse{} } +func (m *ListBasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListBasesResponse) ProtoMessage() {} +func (*ListBasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{9} } + +func (m *ListBasesResponse) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *ListBasesResponse) GetSequence() string { + if m != nil { + return m.Sequence + } + return "" +} + +func (m *ListBasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*Reference)(nil), "google.genomics.v1.Reference") + proto.RegisterType((*ReferenceSet)(nil), "google.genomics.v1.ReferenceSet") + proto.RegisterType((*SearchReferenceSetsRequest)(nil), "google.genomics.v1.SearchReferenceSetsRequest") + proto.RegisterType((*SearchReferenceSetsResponse)(nil), "google.genomics.v1.SearchReferenceSetsResponse") + proto.RegisterType((*GetReferenceSetRequest)(nil), "google.genomics.v1.GetReferenceSetRequest") + proto.RegisterType((*SearchReferencesRequest)(nil), "google.genomics.v1.SearchReferencesRequest") + proto.RegisterType((*SearchReferencesResponse)(nil), "google.genomics.v1.SearchReferencesResponse") + proto.RegisterType((*GetReferenceRequest)(nil), "google.genomics.v1.GetReferenceRequest") + proto.RegisterType((*ListBasesRequest)(nil), "google.genomics.v1.ListBasesRequest") + proto.RegisterType((*ListBasesResponse)(nil), "google.genomics.v1.ListBasesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ReferenceServiceV1 service + +type ReferenceServiceV1Client interface { + // Searches for reference sets which match the given criteria. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReferenceSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L71) + SearchReferenceSets(ctx context.Context, in *SearchReferenceSetsRequest, opts ...grpc.CallOption) (*SearchReferenceSetsResponse, error) + // Gets a reference set. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReferenceSet](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L83). + GetReferenceSet(ctx context.Context, in *GetReferenceSetRequest, opts ...grpc.CallOption) (*ReferenceSet, error) + // Searches for references which match the given criteria. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReferences](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L146). + SearchReferences(ctx context.Context, in *SearchReferencesRequest, opts ...grpc.CallOption) (*SearchReferencesResponse, error) + // Gets a reference. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReference](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L158). + GetReference(ctx context.Context, in *GetReferenceRequest, opts ...grpc.CallOption) (*Reference, error) + // Lists the bases in a reference, optionally restricted to a range. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReferenceBases](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L221). + ListBases(ctx context.Context, in *ListBasesRequest, opts ...grpc.CallOption) (*ListBasesResponse, error) +} + +type referenceServiceV1Client struct { + cc *grpc.ClientConn +} + +func NewReferenceServiceV1Client(cc *grpc.ClientConn) ReferenceServiceV1Client { + return &referenceServiceV1Client{cc} +} + +func (c *referenceServiceV1Client) SearchReferenceSets(ctx context.Context, in *SearchReferenceSetsRequest, opts ...grpc.CallOption) (*SearchReferenceSetsResponse, error) { + out := new(SearchReferenceSetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReferenceServiceV1/SearchReferenceSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *referenceServiceV1Client) GetReferenceSet(ctx context.Context, in *GetReferenceSetRequest, opts ...grpc.CallOption) (*ReferenceSet, error) { + out := new(ReferenceSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReferenceServiceV1/GetReferenceSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *referenceServiceV1Client) SearchReferences(ctx context.Context, in *SearchReferencesRequest, opts ...grpc.CallOption) (*SearchReferencesResponse, error) { + out := new(SearchReferencesResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReferenceServiceV1/SearchReferences", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *referenceServiceV1Client) GetReference(ctx context.Context, in *GetReferenceRequest, opts ...grpc.CallOption) (*Reference, error) { + out := new(Reference) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReferenceServiceV1/GetReference", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *referenceServiceV1Client) ListBases(ctx context.Context, in *ListBasesRequest, opts ...grpc.CallOption) (*ListBasesResponse, error) { + out := new(ListBasesResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.ReferenceServiceV1/ListBases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ReferenceServiceV1 service + +type ReferenceServiceV1Server interface { + // Searches for reference sets which match the given criteria. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReferenceSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L71) + SearchReferenceSets(context.Context, *SearchReferenceSetsRequest) (*SearchReferenceSetsResponse, error) + // Gets a reference set. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReferenceSet](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L83). + GetReferenceSet(context.Context, *GetReferenceSetRequest) (*ReferenceSet, error) + // Searches for references which match the given criteria. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchReferences](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L146). + SearchReferences(context.Context, *SearchReferencesRequest) (*SearchReferencesResponse, error) + // Gets a reference. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReference](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L158). + GetReference(context.Context, *GetReferenceRequest) (*Reference, error) + // Lists the bases in a reference, optionally restricted to a range. + // + // For the definitions of references and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.getReferenceBases](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/referencemethods.avdl#L221). + ListBases(context.Context, *ListBasesRequest) (*ListBasesResponse, error) +} + +func RegisterReferenceServiceV1Server(s *grpc.Server, srv ReferenceServiceV1Server) { + s.RegisterService(&_ReferenceServiceV1_serviceDesc, srv) +} + +func _ReferenceServiceV1_SearchReferenceSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchReferenceSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReferenceServiceV1Server).SearchReferenceSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReferenceServiceV1/SearchReferenceSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReferenceServiceV1Server).SearchReferenceSets(ctx, req.(*SearchReferenceSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReferenceServiceV1_GetReferenceSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetReferenceSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReferenceServiceV1Server).GetReferenceSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReferenceServiceV1/GetReferenceSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReferenceServiceV1Server).GetReferenceSet(ctx, req.(*GetReferenceSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReferenceServiceV1_SearchReferences_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchReferencesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReferenceServiceV1Server).SearchReferences(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReferenceServiceV1/SearchReferences", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReferenceServiceV1Server).SearchReferences(ctx, req.(*SearchReferencesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReferenceServiceV1_GetReference_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetReferenceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReferenceServiceV1Server).GetReference(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReferenceServiceV1/GetReference", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReferenceServiceV1Server).GetReference(ctx, req.(*GetReferenceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReferenceServiceV1_ListBases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReferenceServiceV1Server).ListBases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.ReferenceServiceV1/ListBases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReferenceServiceV1Server).ListBases(ctx, req.(*ListBasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ReferenceServiceV1_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.ReferenceServiceV1", + HandlerType: (*ReferenceServiceV1Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SearchReferenceSets", + Handler: _ReferenceServiceV1_SearchReferenceSets_Handler, + }, + { + MethodName: "GetReferenceSet", + Handler: _ReferenceServiceV1_GetReferenceSet_Handler, + }, + { + MethodName: "SearchReferences", + Handler: _ReferenceServiceV1_SearchReferences_Handler, + }, + { + MethodName: "GetReference", + Handler: _ReferenceServiceV1_GetReference_Handler, + }, + { + MethodName: "ListBases", + Handler: _ReferenceServiceV1_ListBases_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1/references.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1/references.proto", fileDescriptor10) } + +var fileDescriptor10 = []byte{ + // 851 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xd6, 0x78, 0x63, 0x37, 0x7e, 0x76, 0x12, 0xf7, 0x15, 0xc2, 0xca, 0x25, 0xd4, 0x6c, 0x9a, + 0x62, 0x35, 0x95, 0x57, 0x29, 0x42, 0x42, 0x45, 0x1c, 0xc8, 0xa5, 0x8a, 0xc4, 0x21, 0xda, 0x14, + 0x0e, 0x5c, 0x56, 0x9b, 0xdd, 0x89, 0x33, 0x34, 0xde, 0x31, 0x3b, 0x93, 0xa8, 0xb4, 0xca, 0x01, + 0x24, 0x8e, 0xc0, 0x81, 0x0b, 0x88, 0xdf, 0xc2, 0x89, 0x9f, 0xc0, 0x09, 0x71, 0xe5, 0x47, 0x70, + 0x44, 0x33, 0x3b, 0xbb, 0x1e, 0xaf, 0x97, 0xd8, 0x52, 0xb9, 0xed, 0x7c, 0xf3, 0xe6, 0xcd, 0xf7, + 0x7d, 0x6f, 0xde, 0xec, 0xc0, 0xee, 0x98, 0xf3, 0xf1, 0x05, 0xf5, 0xc7, 0x34, 0xe5, 0x13, 0x16, + 0x0b, 0xff, 0xea, 0xc0, 0xcf, 0xe8, 0x19, 0xcd, 0x68, 0x1a, 0x53, 0x31, 0x9a, 0x66, 0x5c, 0x72, + 0xc4, 0x3c, 0x68, 0x54, 0x04, 0x8d, 0xae, 0x0e, 0xfa, 0x6f, 0x9b, 0x85, 0xd1, 0x94, 0xf9, 0x51, + 0x9a, 0x72, 0x19, 0x49, 0xc6, 0x53, 0xb3, 0xc2, 0xfb, 0x93, 0x40, 0x3b, 0x28, 0xd2, 0xe0, 0x26, + 0x34, 0x58, 0xe2, 0x92, 0x01, 0x19, 0xb6, 0x83, 0x06, 0x4b, 0x70, 0x1b, 0x5a, 0x17, 0x34, 0x1d, + 0xcb, 0x73, 0xb7, 0x31, 0x20, 0x43, 0x27, 0x30, 0x23, 0x1c, 0x40, 0x67, 0x92, 0x7c, 0x10, 0x9f, + 0xd3, 0xf8, 0xb9, 0xb8, 0x9c, 0xb8, 0x8e, 0x5e, 0x60, 0x43, 0x88, 0xb0, 0x96, 0x46, 0x13, 0xea, + 0xae, 0xe9, 0x29, 0xfd, 0x8d, 0x3b, 0x00, 0x82, 0x5f, 0x66, 0x31, 0x0d, 0x2f, 0x33, 0xe6, 0x36, + 0xf5, 0x4c, 0x3b, 0x47, 0x3e, 0xcb, 0x18, 0xee, 0xc3, 0x6d, 0x33, 0x1d, 0xc5, 0x31, 0x15, 0x42, + 0xb1, 0x74, 0x5b, 0x03, 0x67, 0xd8, 0x0e, 0x7a, 0xf9, 0xc4, 0x27, 0x25, 0x8e, 0x1e, 0x6c, 0xa4, + 0xf1, 0x29, 0x0b, 0x65, 0xf4, 0x82, 0xa7, 0x21, 0x4b, 0xdc, 0x5b, 0x03, 0x32, 0x6c, 0x06, 0x1d, + 0x05, 0x3e, 0x53, 0xd8, 0x51, 0xe2, 0xfd, 0xdc, 0x80, 0x6e, 0xa9, 0xed, 0x84, 0xca, 0x05, 0x79, + 0xbb, 0xb0, 0x51, 0x5a, 0x18, 0xb2, 0x44, 0xb8, 0x0d, 0xbd, 0x5b, 0xb7, 0x04, 0x8f, 0x12, 0xb1, + 0x82, 0xd6, 0x05, 0x2e, 0x6b, 0x0b, 0x5c, 0x54, 0x96, 0x84, 0x8a, 0x38, 0x63, 0x53, 0xe5, 0xbe, + 0x11, 0x6f, 0x43, 0x78, 0x0f, 0x3a, 0x91, 0x10, 0x74, 0x72, 0x7a, 0xf1, 0xb5, 0xca, 0xd1, 0xd2, + 0x11, 0x50, 0x40, 0x47, 0x49, 0xc5, 0xbe, 0x5b, 0x2b, 0xd9, 0xb7, 0x5e, 0x6f, 0x9f, 0xf7, 0x1b, + 0x81, 0xfe, 0x09, 0x8d, 0xb2, 0xf8, 0xdc, 0x36, 0x48, 0x04, 0xf4, 0xab, 0x4b, 0x2a, 0x24, 0x7a, + 0xd0, 0xb5, 0x04, 0x0a, 0x97, 0xe4, 0xbe, 0xd8, 0x18, 0xbe, 0x03, 0x60, 0x6d, 0x94, 0x3b, 0x67, + 0x21, 0x55, 0x3d, 0x4e, 0x9d, 0x9e, 0x69, 0x34, 0xa6, 0xa1, 0xe4, 0xcf, 0x69, 0x6a, 0x0e, 0x4a, + 0x5b, 0x21, 0xcf, 0x14, 0x80, 0x77, 0x41, 0x0f, 0x42, 0xc1, 0x5e, 0x52, 0xed, 0x57, 0x33, 0x58, + 0x57, 0xc0, 0x09, 0x7b, 0x49, 0xbd, 0x1f, 0x08, 0xdc, 0xad, 0xe5, 0x2f, 0xa6, 0x3c, 0x15, 0x14, + 0x9f, 0xc2, 0xe6, 0xac, 0xb2, 0x82, 0xca, 0x5c, 0x42, 0xe7, 0xf1, 0x60, 0xb4, 0xd8, 0x21, 0x23, + 0x3b, 0x45, 0x30, 0x3b, 0x11, 0x2a, 0x21, 0x3e, 0x80, 0xad, 0x94, 0xbe, 0x90, 0xa1, 0xc5, 0xb4, + 0xa1, 0x99, 0x6e, 0x28, 0xf8, 0xb8, 0x60, 0xeb, 0x1d, 0xc2, 0xf6, 0x53, 0x2a, 0xe7, 0x32, 0x19, + 0x2f, 0x87, 0xd0, 0x9b, 0xa3, 0x12, 0x96, 0x47, 0x70, 0xd3, 0xde, 0xea, 0x28, 0xf1, 0x7e, 0x27, + 0xf0, 0x56, 0x45, 0xd4, 0xff, 0x5a, 0x91, 0x3a, 0x26, 0x4e, 0x1d, 0x93, 0xd7, 0x2a, 0xcd, 0x37, + 0x04, 0xdc, 0x45, 0x15, 0xa6, 0x2e, 0x1f, 0x03, 0xcc, 0x2e, 0x2d, 0x53, 0x93, 0x9d, 0x1b, 0x6b, + 0x12, 0x58, 0x0b, 0x56, 0xae, 0xc6, 0x87, 0x70, 0xc7, 0xae, 0x46, 0x61, 0xe2, 0xbb, 0xd0, 0xb5, + 0xfb, 0xdd, 0x94, 0xa1, 0x63, 0xb5, 0xbb, 0xf7, 0x0b, 0x81, 0xde, 0xa7, 0x4c, 0xc8, 0xc3, 0x48, + 0xcc, 0xcc, 0x5f, 0xbe, 0x0e, 0xdf, 0x80, 0xa6, 0x90, 0x51, 0x26, 0xcd, 0x45, 0x99, 0x0f, 0xb0, + 0x07, 0x0e, 0x4d, 0x73, 0x93, 0x9d, 0x40, 0x7d, 0xbe, 0x96, 0xb3, 0x1c, 0x6e, 0x5b, 0xd4, 0x8c, + 0xa3, 0xdb, 0xd0, 0xe2, 0x67, 0x67, 0x82, 0x4a, 0xcd, 0xca, 0x09, 0xcc, 0x08, 0xfb, 0xb0, 0x2e, + 0x14, 0xfd, 0x34, 0xa6, 0xc6, 0xa3, 0x72, 0x5c, 0x67, 0xa3, 0x53, 0x63, 0xe3, 0xe3, 0xbf, 0x9a, + 0x80, 0xd6, 0x91, 0xce, 0xae, 0x58, 0x4c, 0x3f, 0x3f, 0xc0, 0x5f, 0x09, 0xdc, 0xa9, 0x69, 0x3e, + 0x1c, 0xd5, 0x15, 0xf2, 0xbf, 0x6f, 0x99, 0xbe, 0xbf, 0x72, 0x7c, 0xae, 0xd5, 0xdb, 0xfd, 0xf6, + 0x8f, 0xbf, 0x7f, 0x6a, 0xec, 0x78, 0xee, 0xfc, 0xcf, 0x8f, 0x4a, 0xe1, 0x0b, 0xbd, 0xec, 0x09, + 0x79, 0x88, 0xdf, 0x13, 0xd8, 0xaa, 0xb4, 0x22, 0x3e, 0xac, 0xdb, 0xa9, 0xbe, 0x5f, 0xfb, 0x4b, + 0xaf, 0x08, 0xef, 0x91, 0xa6, 0xf1, 0x00, 0xef, 0x2f, 0xd2, 0x78, 0x55, 0x6d, 0xb0, 0x6b, 0xfc, + 0x91, 0x40, 0xaf, 0xda, 0x0f, 0xb8, 0xbf, 0x82, 0xf4, 0xd2, 0xa7, 0x47, 0xab, 0x05, 0x1b, 0x93, + 0x06, 0x9a, 0x5d, 0xdf, 0x7b, 0x73, 0x9e, 0x9d, 0xe5, 0xd0, 0x35, 0x74, 0x6d, 0xed, 0xf8, 0xde, + 0x32, 0x77, 0x0a, 0x22, 0x37, 0x77, 0xaa, 0xb7, 0xa7, 0x77, 0xbe, 0x87, 0x3b, 0x95, 0x9d, 0x5f, + 0xd9, 0xcd, 0x73, 0x8d, 0xdf, 0x11, 0x68, 0x97, 0xe7, 0x18, 0xef, 0xd7, 0xe5, 0xac, 0x76, 0x60, + 0x7f, 0x6f, 0x49, 0x94, 0xd1, 0xbe, 0xaf, 0x19, 0xec, 0xe1, 0xee, 0x8d, 0x0c, 0xfc, 0x53, 0xb5, + 0xe8, 0xf0, 0x4b, 0xd8, 0x8e, 0xf9, 0xa4, 0x26, 0xf1, 0xe1, 0xd6, 0xcc, 0xd6, 0x63, 0xf5, 0x4a, + 0x3a, 0x26, 0x5f, 0x3c, 0x29, 0xc2, 0xf8, 0x45, 0x94, 0x8e, 0x47, 0x3c, 0x1b, 0xab, 0x97, 0x98, + 0x7e, 0x43, 0xf9, 0xf9, 0x54, 0x34, 0x65, 0xc2, 0x7e, 0x9d, 0x7d, 0x54, 0x7c, 0xff, 0x43, 0xc8, + 0x69, 0x4b, 0x47, 0xbe, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x9f, 0xb6, 0x11, 0xc6, + 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1/variants.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1/variants.pb.go new file mode 100644 index 000000000..0bac56bb5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1/variants.pb.go @@ -0,0 +1,2808 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1/variants.proto + +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Operations to be performed during import on Variant info fields. +// These operations are set for each info field in the info_merge_config +// map of ImportVariantsRequest, which is plumbed down to the +// MergeVariantRequests generated by the import job. +type InfoMergeOperation int32 + +const ( + InfoMergeOperation_INFO_MERGE_OPERATION_UNSPECIFIED InfoMergeOperation = 0 + // By default, Variant info fields are persisted if the Variant doesn't + // already exist in the variantset. If the Variant is equivalent to a + // Variant already in the variantset, the incoming Variant's info field + // is ignored in favor of that of the already persisted Variant. + InfoMergeOperation_IGNORE_NEW InfoMergeOperation = 1 + // This operation removes an info field from the incoming Variant + // and persists this info field in each of the incoming Variant's Calls. + InfoMergeOperation_MOVE_TO_CALLS InfoMergeOperation = 2 +) + +var InfoMergeOperation_name = map[int32]string{ + 0: "INFO_MERGE_OPERATION_UNSPECIFIED", + 1: "IGNORE_NEW", + 2: "MOVE_TO_CALLS", +} +var InfoMergeOperation_value = map[string]int32{ + "INFO_MERGE_OPERATION_UNSPECIFIED": 0, + "IGNORE_NEW": 1, + "MOVE_TO_CALLS": 2, +} + +func (x InfoMergeOperation) String() string { + return proto.EnumName(InfoMergeOperation_name, int32(x)) +} +func (InfoMergeOperation) EnumDescriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } + +type VariantSetMetadata_Type int32 + +const ( + VariantSetMetadata_TYPE_UNSPECIFIED VariantSetMetadata_Type = 0 + VariantSetMetadata_INTEGER VariantSetMetadata_Type = 1 + VariantSetMetadata_FLOAT VariantSetMetadata_Type = 2 + VariantSetMetadata_FLAG VariantSetMetadata_Type = 3 + VariantSetMetadata_CHARACTER VariantSetMetadata_Type = 4 + VariantSetMetadata_STRING VariantSetMetadata_Type = 5 +) + +var VariantSetMetadata_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "INTEGER", + 2: "FLOAT", + 3: "FLAG", + 4: "CHARACTER", + 5: "STRING", +} +var VariantSetMetadata_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "INTEGER": 1, + "FLOAT": 2, + "FLAG": 3, + "CHARACTER": 4, + "STRING": 5, +} + +func (x VariantSetMetadata_Type) String() string { + return proto.EnumName(VariantSetMetadata_Type_name, int32(x)) +} +func (VariantSetMetadata_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor11, []int{0, 0} } + +type ImportVariantsRequest_Format int32 + +const ( + ImportVariantsRequest_FORMAT_UNSPECIFIED ImportVariantsRequest_Format = 0 + // VCF (Variant Call Format). The VCF files may be gzip compressed. gVCF is + // also supported. + ImportVariantsRequest_FORMAT_VCF ImportVariantsRequest_Format = 1 + // Complete Genomics masterVarBeta format. The masterVarBeta files may + // be bzip2 compressed. + ImportVariantsRequest_FORMAT_COMPLETE_GENOMICS ImportVariantsRequest_Format = 2 +) + +var ImportVariantsRequest_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "FORMAT_VCF", + 2: "FORMAT_COMPLETE_GENOMICS", +} +var ImportVariantsRequest_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "FORMAT_VCF": 1, + "FORMAT_COMPLETE_GENOMICS": 2, +} + +func (x ImportVariantsRequest_Format) String() string { + return proto.EnumName(ImportVariantsRequest_Format_name, int32(x)) +} +func (ImportVariantsRequest_Format) EnumDescriptor() ([]byte, []int) { + return fileDescriptor11, []int{6, 0} +} + +type ExportVariantSetRequest_Format int32 + +const ( + ExportVariantSetRequest_FORMAT_UNSPECIFIED ExportVariantSetRequest_Format = 0 + // Export the data to Google BigQuery. + ExportVariantSetRequest_FORMAT_BIGQUERY ExportVariantSetRequest_Format = 1 +) + +var ExportVariantSetRequest_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "FORMAT_BIGQUERY", +} +var ExportVariantSetRequest_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "FORMAT_BIGQUERY": 1, +} + +func (x ExportVariantSetRequest_Format) String() string { + return proto.EnumName(ExportVariantSetRequest_Format_name, int32(x)) +} +func (ExportVariantSetRequest_Format) EnumDescriptor() ([]byte, []int) { + return fileDescriptor11, []int{9, 0} +} + +// Metadata describes a single piece of variant call metadata. +// These data include a top level key and either a single value string (value) +// or a list of key-value pairs (info.) +// Value and info are mutually exclusive. +type VariantSetMetadata struct { + // The top-level key. + Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // The value field for simple metadata + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + // User-provided ID field, not enforced by this API. + // Two or more pieces of structured metadata with identical + // id and key fields are considered equivalent. + Id string `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"` + // The type of data. Possible types include: Integer, Float, + // Flag, Character, and String. + Type VariantSetMetadata_Type `protobuf:"varint,5,opt,name=type,enum=google.genomics.v1.VariantSetMetadata_Type" json:"type,omitempty"` + // The number of values that can be included in a field described by this + // metadata. + Number string `protobuf:"bytes,8,opt,name=number" json:"number,omitempty"` + // A textual description of this metadata. + Description string `protobuf:"bytes,7,opt,name=description" json:"description,omitempty"` + // Remaining structured metadata key-value pairs. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,3,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VariantSetMetadata) Reset() { *m = VariantSetMetadata{} } +func (m *VariantSetMetadata) String() string { return proto.CompactTextString(m) } +func (*VariantSetMetadata) ProtoMessage() {} +func (*VariantSetMetadata) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } + +func (m *VariantSetMetadata) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *VariantSetMetadata) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *VariantSetMetadata) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *VariantSetMetadata) GetType() VariantSetMetadata_Type { + if m != nil { + return m.Type + } + return VariantSetMetadata_TYPE_UNSPECIFIED +} + +func (m *VariantSetMetadata) GetNumber() string { + if m != nil { + return m.Number + } + return "" +} + +func (m *VariantSetMetadata) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *VariantSetMetadata) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +// A variant set is a collection of call sets and variants. It contains summary +// statistics of those contents. A variant set belongs to a dataset. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type VariantSet struct { + // The dataset to which this variant set belongs. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // The server-generated variant set ID, unique across all variant sets. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // The reference set to which the variant set is mapped. The reference set + // describes the alignment provenance of the variant set, while the + // `referenceBounds` describe the shape of the actual variant data. The + // reference set's reference names are a superset of those found in the + // `referenceBounds`. + // + // For example, given a variant set that is mapped to the GRCh38 reference set + // and contains a single variant on reference 'X', `referenceBounds` would + // contain only an entry for 'X', while the associated reference set + // enumerates all possible references: '1', '2', 'X', 'Y', 'MT', etc. + ReferenceSetId string `protobuf:"bytes,6,opt,name=reference_set_id,json=referenceSetId" json:"reference_set_id,omitempty"` + // A list of all references used by the variants in a variant set + // with associated coordinate upper bounds for each one. + ReferenceBounds []*ReferenceBound `protobuf:"bytes,5,rep,name=reference_bounds,json=referenceBounds" json:"reference_bounds,omitempty"` + // The metadata associated with this variant set. + Metadata []*VariantSetMetadata `protobuf:"bytes,4,rep,name=metadata" json:"metadata,omitempty"` + // User-specified, mutable name. + Name string `protobuf:"bytes,7,opt,name=name" json:"name,omitempty"` + // A textual description of this variant set. + Description string `protobuf:"bytes,8,opt,name=description" json:"description,omitempty"` +} + +func (m *VariantSet) Reset() { *m = VariantSet{} } +func (m *VariantSet) String() string { return proto.CompactTextString(m) } +func (*VariantSet) ProtoMessage() {} +func (*VariantSet) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } + +func (m *VariantSet) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *VariantSet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *VariantSet) GetReferenceSetId() string { + if m != nil { + return m.ReferenceSetId + } + return "" +} + +func (m *VariantSet) GetReferenceBounds() []*ReferenceBound { + if m != nil { + return m.ReferenceBounds + } + return nil +} + +func (m *VariantSet) GetMetadata() []*VariantSetMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *VariantSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VariantSet) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A variant represents a change in DNA sequence relative to a reference +// sequence. For example, a variant could represent a SNP or an insertion. +// Variants belong to a variant set. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +// +// Each of the calls on a variant represent a determination of genotype with +// respect to that variant. For example, a call might assign probability of 0.32 +// to the occurrence of a SNP named rs1234 in a sample named NA12345. A call +// belongs to a call set, which contains related calls typically from one +// sample. +type Variant struct { + // The ID of the variant set this variant belongs to. + VariantSetId string `protobuf:"bytes,15,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // The server-generated variant ID, unique across all variants. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Names for the variant, for example a RefSNP ID. + Names []string `protobuf:"bytes,3,rep,name=names" json:"names,omitempty"` + // The date this variant was created, in milliseconds from the epoch. + Created int64 `protobuf:"varint,12,opt,name=created" json:"created,omitempty"` + // The reference on which this variant occurs. + // (such as `chr20` or `X`) + ReferenceName string `protobuf:"bytes,14,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The position at which this variant occurs (0-based). + // This corresponds to the first base of the string of reference bases. + Start int64 `protobuf:"varint,16,opt,name=start" json:"start,omitempty"` + // The end position (0-based) of this variant. This corresponds to the first + // base after the last base in the reference allele. So, the length of + // the reference allele is (end - start). This is useful for variants + // that don't explicitly give alternate bases, for example large deletions. + End int64 `protobuf:"varint,13,opt,name=end" json:"end,omitempty"` + // The reference bases for this variant. They start at the given + // position. + ReferenceBases string `protobuf:"bytes,6,opt,name=reference_bases,json=referenceBases" json:"reference_bases,omitempty"` + // The bases that appear instead of the reference bases. + AlternateBases []string `protobuf:"bytes,7,rep,name=alternate_bases,json=alternateBases" json:"alternate_bases,omitempty"` + // A measure of how likely this variant is to be real. + // A higher value is better. + Quality float64 `protobuf:"fixed64,8,opt,name=quality" json:"quality,omitempty"` + // A list of filters (normally quality filters) this variant has failed. + // `PASS` indicates this variant has passed all filters. + Filter []string `protobuf:"bytes,9,rep,name=filter" json:"filter,omitempty"` + // A map of additional variant information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,10,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The variant calls for this particular variant. Each one represents the + // determination of genotype with respect to this variant. + Calls []*VariantCall `protobuf:"bytes,11,rep,name=calls" json:"calls,omitempty"` +} + +func (m *Variant) Reset() { *m = Variant{} } +func (m *Variant) String() string { return proto.CompactTextString(m) } +func (*Variant) ProtoMessage() {} +func (*Variant) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} } + +func (m *Variant) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *Variant) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Variant) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +func (m *Variant) GetCreated() int64 { + if m != nil { + return m.Created + } + return 0 +} + +func (m *Variant) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *Variant) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Variant) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *Variant) GetReferenceBases() string { + if m != nil { + return m.ReferenceBases + } + return "" +} + +func (m *Variant) GetAlternateBases() []string { + if m != nil { + return m.AlternateBases + } + return nil +} + +func (m *Variant) GetQuality() float64 { + if m != nil { + return m.Quality + } + return 0 +} + +func (m *Variant) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Variant) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +func (m *Variant) GetCalls() []*VariantCall { + if m != nil { + return m.Calls + } + return nil +} + +// A call represents the determination of genotype with respect to a particular +// variant. It may include associated information such as quality and phasing. +// For example, a call might assign a probability of 0.32 to the occurrence of +// a SNP named rs1234 in a call set with the name NA12345. +type VariantCall struct { + // The ID of the call set this variant call belongs to. + CallSetId string `protobuf:"bytes,8,opt,name=call_set_id,json=callSetId" json:"call_set_id,omitempty"` + // The name of the call set this variant call belongs to. + CallSetName string `protobuf:"bytes,9,opt,name=call_set_name,json=callSetName" json:"call_set_name,omitempty"` + // The genotype of this variant call. Each value represents either the value + // of the `referenceBases` field or a 1-based index into + // `alternateBases`. If a variant had a `referenceBases` + // value of `T` and an `alternateBases` + // value of `["A", "C"]`, and the `genotype` was + // `[2, 1]`, that would mean the call + // represented the heterozygous value `CA` for this variant. + // If the `genotype` was instead `[0, 1]`, the + // represented value would be `TA`. Ordering of the + // genotype values is important if the `phaseset` is present. + // If a genotype is not called (that is, a `.` is present in the + // GT string) -1 is returned. + Genotype []int32 `protobuf:"varint,7,rep,packed,name=genotype" json:"genotype,omitempty"` + // If this field is present, this variant call's genotype ordering implies + // the phase of the bases and is consistent with any other variant calls in + // the same reference sequence which have the same phaseset value. + // When importing data from VCF, if the genotype data was phased but no + // phase set was specified this field will be set to `*`. + Phaseset string `protobuf:"bytes,5,opt,name=phaseset" json:"phaseset,omitempty"` + // The genotype likelihoods for this variant call. Each array entry + // represents how likely a specific genotype is for this call. The value + // ordering is defined by the GL tag in the VCF spec. + // If Phred-scaled genotype likelihood scores (PL) are available and + // log10(P) genotype likelihood scores (GL) are not, PL scores are converted + // to GL scores. If both are available, PL scores are stored in `info`. + GenotypeLikelihood []float64 `protobuf:"fixed64,6,rep,packed,name=genotype_likelihood,json=genotypeLikelihood" json:"genotype_likelihood,omitempty"` + // A map of additional variant call information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,2,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VariantCall) Reset() { *m = VariantCall{} } +func (m *VariantCall) String() string { return proto.CompactTextString(m) } +func (*VariantCall) ProtoMessage() {} +func (*VariantCall) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{3} } + +func (m *VariantCall) GetCallSetId() string { + if m != nil { + return m.CallSetId + } + return "" +} + +func (m *VariantCall) GetCallSetName() string { + if m != nil { + return m.CallSetName + } + return "" +} + +func (m *VariantCall) GetGenotype() []int32 { + if m != nil { + return m.Genotype + } + return nil +} + +func (m *VariantCall) GetPhaseset() string { + if m != nil { + return m.Phaseset + } + return "" +} + +func (m *VariantCall) GetGenotypeLikelihood() []float64 { + if m != nil { + return m.GenotypeLikelihood + } + return nil +} + +func (m *VariantCall) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +// A call set is a collection of variant calls, typically for one sample. It +// belongs to a variant set. +// +// For more genomics resource definitions, see [Fundamentals of Google +// Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) +type CallSet struct { + // The server-generated call set ID, unique across all call sets. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The call set name. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The sample ID this call set corresponds to. + SampleId string `protobuf:"bytes,7,opt,name=sample_id,json=sampleId" json:"sample_id,omitempty"` + // The IDs of the variant sets this call set belongs to. This field must + // have exactly length one, as a call set belongs to a single variant set. + // This field is repeated for compatibility with the + // [GA4GH 0.5.1 + // API](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variants.avdl#L76). + VariantSetIds []string `protobuf:"bytes,6,rep,name=variant_set_ids,json=variantSetIds" json:"variant_set_ids,omitempty"` + // The date this call set was created in milliseconds from the epoch. + Created int64 `protobuf:"varint,5,opt,name=created" json:"created,omitempty"` + // A map of additional call set information. This must be of the form + // map (string key mapping to a list of string values). + Info map[string]*google_protobuf3.ListValue `protobuf:"bytes,4,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *CallSet) Reset() { *m = CallSet{} } +func (m *CallSet) String() string { return proto.CompactTextString(m) } +func (*CallSet) ProtoMessage() {} +func (*CallSet) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{4} } + +func (m *CallSet) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *CallSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CallSet) GetSampleId() string { + if m != nil { + return m.SampleId + } + return "" +} + +func (m *CallSet) GetVariantSetIds() []string { + if m != nil { + return m.VariantSetIds + } + return nil +} + +func (m *CallSet) GetCreated() int64 { + if m != nil { + return m.Created + } + return 0 +} + +func (m *CallSet) GetInfo() map[string]*google_protobuf3.ListValue { + if m != nil { + return m.Info + } + return nil +} + +// ReferenceBound records an upper bound for the starting coordinate of +// variants in a particular reference. +type ReferenceBound struct { + // The name of the reference associated with this reference bound. + ReferenceName string `protobuf:"bytes,1,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // An upper bound (inclusive) on the starting coordinate of any + // variant in the reference sequence. + UpperBound int64 `protobuf:"varint,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` +} + +func (m *ReferenceBound) Reset() { *m = ReferenceBound{} } +func (m *ReferenceBound) String() string { return proto.CompactTextString(m) } +func (*ReferenceBound) ProtoMessage() {} +func (*ReferenceBound) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{5} } + +func (m *ReferenceBound) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *ReferenceBound) GetUpperBound() int64 { + if m != nil { + return m.UpperBound + } + return 0 +} + +// The variant data import request. +type ImportVariantsRequest struct { + // Required. The variant set to which variant data should be imported. + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // A list of URIs referencing variant files in Google Cloud Storage. URIs can + // include wildcards [as described + // here](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames). + // Note that recursive wildcards ('**') are not supported. + SourceUris []string `protobuf:"bytes,2,rep,name=source_uris,json=sourceUris" json:"source_uris,omitempty"` + // The format of the variant data being imported. If unspecified, defaults to + // to `VCF`. + Format ImportVariantsRequest_Format `protobuf:"varint,3,opt,name=format,enum=google.genomics.v1.ImportVariantsRequest_Format" json:"format,omitempty"` + // Convert reference names to the canonical representation. + // hg19 haploytypes (those reference names containing "_hap") + // are not modified in any way. + // All other reference names are modified according to the following rules: + // The reference name is capitalized. + // The "chr" prefix is dropped for all autosomes and sex chromsomes. + // For example "chr17" becomes "17" and "chrX" becomes "X". + // All mitochondrial chromosomes ("chrM", "chrMT", etc) become "MT". + NormalizeReferenceNames bool `protobuf:"varint,5,opt,name=normalize_reference_names,json=normalizeReferenceNames" json:"normalize_reference_names,omitempty"` + // A mapping between info field keys and the InfoMergeOperations to + // be performed on them. This is plumbed down to the MergeVariantRequests + // generated by the resulting import job. + InfoMergeConfig map[string]InfoMergeOperation `protobuf:"bytes,6,rep,name=info_merge_config,json=infoMergeConfig" json:"info_merge_config,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=google.genomics.v1.InfoMergeOperation"` +} + +func (m *ImportVariantsRequest) Reset() { *m = ImportVariantsRequest{} } +func (m *ImportVariantsRequest) String() string { return proto.CompactTextString(m) } +func (*ImportVariantsRequest) ProtoMessage() {} +func (*ImportVariantsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{6} } + +func (m *ImportVariantsRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *ImportVariantsRequest) GetSourceUris() []string { + if m != nil { + return m.SourceUris + } + return nil +} + +func (m *ImportVariantsRequest) GetFormat() ImportVariantsRequest_Format { + if m != nil { + return m.Format + } + return ImportVariantsRequest_FORMAT_UNSPECIFIED +} + +func (m *ImportVariantsRequest) GetNormalizeReferenceNames() bool { + if m != nil { + return m.NormalizeReferenceNames + } + return false +} + +func (m *ImportVariantsRequest) GetInfoMergeConfig() map[string]InfoMergeOperation { + if m != nil { + return m.InfoMergeConfig + } + return nil +} + +// The variant data import response. +type ImportVariantsResponse struct { + // IDs of the call sets created during the import. + CallSetIds []string `protobuf:"bytes,1,rep,name=call_set_ids,json=callSetIds" json:"call_set_ids,omitempty"` +} + +func (m *ImportVariantsResponse) Reset() { *m = ImportVariantsResponse{} } +func (m *ImportVariantsResponse) String() string { return proto.CompactTextString(m) } +func (*ImportVariantsResponse) ProtoMessage() {} +func (*ImportVariantsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{7} } + +func (m *ImportVariantsResponse) GetCallSetIds() []string { + if m != nil { + return m.CallSetIds + } + return nil +} + +// The CreateVariantSet request +type CreateVariantSetRequest struct { + // Required. The variant set to be created. Must have a valid `datasetId`. + VariantSet *VariantSet `protobuf:"bytes,1,opt,name=variant_set,json=variantSet" json:"variant_set,omitempty"` +} + +func (m *CreateVariantSetRequest) Reset() { *m = CreateVariantSetRequest{} } +func (m *CreateVariantSetRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVariantSetRequest) ProtoMessage() {} +func (*CreateVariantSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{8} } + +func (m *CreateVariantSetRequest) GetVariantSet() *VariantSet { + if m != nil { + return m.VariantSet + } + return nil +} + +// The variant data export request. +type ExportVariantSetRequest struct { + // Required. The ID of the variant set that contains variant data which + // should be exported. The caller must have READ access to this variant set. + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // If provided, only variant call information from the specified call sets + // will be exported. By default all variant calls are exported. + CallSetIds []string `protobuf:"bytes,2,rep,name=call_set_ids,json=callSetIds" json:"call_set_ids,omitempty"` + // Required. The Google Cloud project ID that owns the destination + // BigQuery dataset. The caller must have WRITE access to this project. This + // project will also own the resulting export job. + ProjectId string `protobuf:"bytes,3,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The format for the exported data. + Format ExportVariantSetRequest_Format `protobuf:"varint,4,opt,name=format,enum=google.genomics.v1.ExportVariantSetRequest_Format" json:"format,omitempty"` + // Required. The BigQuery dataset to export data to. This dataset must already + // exist. Note that this is distinct from the Genomics concept of "dataset". + BigqueryDataset string `protobuf:"bytes,5,opt,name=bigquery_dataset,json=bigqueryDataset" json:"bigquery_dataset,omitempty"` + // Required. The BigQuery table to export data to. + // If the table doesn't exist, it will be created. If it already exists, it + // will be overwritten. + BigqueryTable string `protobuf:"bytes,6,opt,name=bigquery_table,json=bigqueryTable" json:"bigquery_table,omitempty"` +} + +func (m *ExportVariantSetRequest) Reset() { *m = ExportVariantSetRequest{} } +func (m *ExportVariantSetRequest) String() string { return proto.CompactTextString(m) } +func (*ExportVariantSetRequest) ProtoMessage() {} +func (*ExportVariantSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{9} } + +func (m *ExportVariantSetRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *ExportVariantSetRequest) GetCallSetIds() []string { + if m != nil { + return m.CallSetIds + } + return nil +} + +func (m *ExportVariantSetRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ExportVariantSetRequest) GetFormat() ExportVariantSetRequest_Format { + if m != nil { + return m.Format + } + return ExportVariantSetRequest_FORMAT_UNSPECIFIED +} + +func (m *ExportVariantSetRequest) GetBigqueryDataset() string { + if m != nil { + return m.BigqueryDataset + } + return "" +} + +func (m *ExportVariantSetRequest) GetBigqueryTable() string { + if m != nil { + return m.BigqueryTable + } + return "" +} + +// The variant set request. +type GetVariantSetRequest struct { + // Required. The ID of the variant set. + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` +} + +func (m *GetVariantSetRequest) Reset() { *m = GetVariantSetRequest{} } +func (m *GetVariantSetRequest) String() string { return proto.CompactTextString(m) } +func (*GetVariantSetRequest) ProtoMessage() {} +func (*GetVariantSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{10} } + +func (m *GetVariantSetRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +// The search variant sets request. +type SearchVariantSetsRequest struct { + // Exactly one dataset ID must be provided here. Only variant sets which + // belong to this dataset will be returned. + DatasetIds []string `protobuf:"bytes,1,rep,name=dataset_ids,json=datasetIds" json:"dataset_ids,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 1024. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchVariantSetsRequest) Reset() { *m = SearchVariantSetsRequest{} } +func (m *SearchVariantSetsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchVariantSetsRequest) ProtoMessage() {} +func (*SearchVariantSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{11} } + +func (m *SearchVariantSetsRequest) GetDatasetIds() []string { + if m != nil { + return m.DatasetIds + } + return nil +} + +func (m *SearchVariantSetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchVariantSetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// The search variant sets response. +type SearchVariantSetsResponse struct { + // The variant sets belonging to the requested dataset. + VariantSets []*VariantSet `protobuf:"bytes,1,rep,name=variant_sets,json=variantSets" json:"variant_sets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchVariantSetsResponse) Reset() { *m = SearchVariantSetsResponse{} } +func (m *SearchVariantSetsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchVariantSetsResponse) ProtoMessage() {} +func (*SearchVariantSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{12} } + +func (m *SearchVariantSetsResponse) GetVariantSets() []*VariantSet { + if m != nil { + return m.VariantSets + } + return nil +} + +func (m *SearchVariantSetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The delete variant set request. +type DeleteVariantSetRequest struct { + // The ID of the variant set to be deleted. + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` +} + +func (m *DeleteVariantSetRequest) Reset() { *m = DeleteVariantSetRequest{} } +func (m *DeleteVariantSetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVariantSetRequest) ProtoMessage() {} +func (*DeleteVariantSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{13} } + +func (m *DeleteVariantSetRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +type UpdateVariantSetRequest struct { + // The ID of the variant to be updated (must already exist). + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // The new variant data. Only the variant_set.metadata will be considered + // for update. + VariantSet *VariantSet `protobuf:"bytes,2,opt,name=variant_set,json=variantSet" json:"variant_set,omitempty"` + // An optional mask specifying which fields to update. Supported fields: + // + // * [metadata][google.genomics.v1.VariantSet.metadata]. + // * [name][google.genomics.v1.VariantSet.name]. + // * [description][google.genomics.v1.VariantSet.description]. + // + // Leaving `updateMask` unset is equivalent to specifying all mutable + // fields. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateVariantSetRequest) Reset() { *m = UpdateVariantSetRequest{} } +func (m *UpdateVariantSetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateVariantSetRequest) ProtoMessage() {} +func (*UpdateVariantSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{14} } + +func (m *UpdateVariantSetRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *UpdateVariantSetRequest) GetVariantSet() *VariantSet { + if m != nil { + return m.VariantSet + } + return nil +} + +func (m *UpdateVariantSetRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// The variant search request. +type SearchVariantsRequest struct { + // At most one variant set ID must be provided. Only variants from this + // variant set will be returned. If omitted, a call set id must be included in + // the request. + VariantSetIds []string `protobuf:"bytes,1,rep,name=variant_set_ids,json=variantSetIds" json:"variant_set_ids,omitempty"` + // Only return variants which have exactly this name. + VariantName string `protobuf:"bytes,2,opt,name=variant_name,json=variantName" json:"variant_name,omitempty"` + // Only return variant calls which belong to call sets with these ids. + // Leaving this blank returns all variant calls. If a variant has no + // calls belonging to any of these call sets, it won't be returned at all. + CallSetIds []string `protobuf:"bytes,3,rep,name=call_set_ids,json=callSetIds" json:"call_set_ids,omitempty"` + // Required. Only return variants in this reference sequence. + ReferenceName string `protobuf:"bytes,4,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The beginning of the window (0-based, inclusive) for which + // overlapping variants should be returned. If unspecified, defaults to 0. + Start int64 `protobuf:"varint,5,opt,name=start" json:"start,omitempty"` + // The end of the window, 0-based exclusive. If unspecified or 0, defaults to + // the length of the reference. + End int64 `protobuf:"varint,6,opt,name=end" json:"end,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of variants to return in a single page. If unspecified, + // defaults to 5000. The maximum value is 10000. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The maximum number of calls to return in a single page. Note that this + // limit may be exceeded in the event that a matching variant contains more + // calls than the requested maximum. If unspecified, defaults to 5000. The + // maximum value is 10000. + MaxCalls int32 `protobuf:"varint,9,opt,name=max_calls,json=maxCalls" json:"max_calls,omitempty"` +} + +func (m *SearchVariantsRequest) Reset() { *m = SearchVariantsRequest{} } +func (m *SearchVariantsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchVariantsRequest) ProtoMessage() {} +func (*SearchVariantsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{15} } + +func (m *SearchVariantsRequest) GetVariantSetIds() []string { + if m != nil { + return m.VariantSetIds + } + return nil +} + +func (m *SearchVariantsRequest) GetVariantName() string { + if m != nil { + return m.VariantName + } + return "" +} + +func (m *SearchVariantsRequest) GetCallSetIds() []string { + if m != nil { + return m.CallSetIds + } + return nil +} + +func (m *SearchVariantsRequest) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *SearchVariantsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *SearchVariantsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +func (m *SearchVariantsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchVariantsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *SearchVariantsRequest) GetMaxCalls() int32 { + if m != nil { + return m.MaxCalls + } + return 0 +} + +// The variant search response. +type SearchVariantsResponse struct { + // The list of matching Variants. + Variants []*Variant `protobuf:"bytes,1,rep,name=variants" json:"variants,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchVariantsResponse) Reset() { *m = SearchVariantsResponse{} } +func (m *SearchVariantsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchVariantsResponse) ProtoMessage() {} +func (*SearchVariantsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{16} } + +func (m *SearchVariantsResponse) GetVariants() []*Variant { + if m != nil { + return m.Variants + } + return nil +} + +func (m *SearchVariantsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateVariantRequest struct { + // The variant to be created. + Variant *Variant `protobuf:"bytes,1,opt,name=variant" json:"variant,omitempty"` +} + +func (m *CreateVariantRequest) Reset() { *m = CreateVariantRequest{} } +func (m *CreateVariantRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVariantRequest) ProtoMessage() {} +func (*CreateVariantRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{17} } + +func (m *CreateVariantRequest) GetVariant() *Variant { + if m != nil { + return m.Variant + } + return nil +} + +type UpdateVariantRequest struct { + // The ID of the variant to be updated. + VariantId string `protobuf:"bytes,1,opt,name=variant_id,json=variantId" json:"variant_id,omitempty"` + // The new variant data. + Variant *Variant `protobuf:"bytes,2,opt,name=variant" json:"variant,omitempty"` + // An optional mask specifying which fields to update. At this time, mutable + // fields are [names][google.genomics.v1.Variant.names] and + // [info][google.genomics.v1.Variant.info]. Acceptable values are "names" and + // "info". If unspecified, all mutable fields will be updated. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateVariantRequest) Reset() { *m = UpdateVariantRequest{} } +func (m *UpdateVariantRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateVariantRequest) ProtoMessage() {} +func (*UpdateVariantRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{18} } + +func (m *UpdateVariantRequest) GetVariantId() string { + if m != nil { + return m.VariantId + } + return "" +} + +func (m *UpdateVariantRequest) GetVariant() *Variant { + if m != nil { + return m.Variant + } + return nil +} + +func (m *UpdateVariantRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteVariantRequest struct { + // The ID of the variant to be deleted. + VariantId string `protobuf:"bytes,1,opt,name=variant_id,json=variantId" json:"variant_id,omitempty"` +} + +func (m *DeleteVariantRequest) Reset() { *m = DeleteVariantRequest{} } +func (m *DeleteVariantRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVariantRequest) ProtoMessage() {} +func (*DeleteVariantRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{19} } + +func (m *DeleteVariantRequest) GetVariantId() string { + if m != nil { + return m.VariantId + } + return "" +} + +type GetVariantRequest struct { + // The ID of the variant. + VariantId string `protobuf:"bytes,1,opt,name=variant_id,json=variantId" json:"variant_id,omitempty"` +} + +func (m *GetVariantRequest) Reset() { *m = GetVariantRequest{} } +func (m *GetVariantRequest) String() string { return proto.CompactTextString(m) } +func (*GetVariantRequest) ProtoMessage() {} +func (*GetVariantRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{20} } + +func (m *GetVariantRequest) GetVariantId() string { + if m != nil { + return m.VariantId + } + return "" +} + +type MergeVariantsRequest struct { + // The destination variant set. + VariantSetId string `protobuf:"bytes,1,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // The variants to be merged with existing variants. + Variants []*Variant `protobuf:"bytes,2,rep,name=variants" json:"variants,omitempty"` + // A mapping between info field keys and the InfoMergeOperations to + // be performed on them. + InfoMergeConfig map[string]InfoMergeOperation `protobuf:"bytes,3,rep,name=info_merge_config,json=infoMergeConfig" json:"info_merge_config,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=google.genomics.v1.InfoMergeOperation"` +} + +func (m *MergeVariantsRequest) Reset() { *m = MergeVariantsRequest{} } +func (m *MergeVariantsRequest) String() string { return proto.CompactTextString(m) } +func (*MergeVariantsRequest) ProtoMessage() {} +func (*MergeVariantsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{21} } + +func (m *MergeVariantsRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *MergeVariantsRequest) GetVariants() []*Variant { + if m != nil { + return m.Variants + } + return nil +} + +func (m *MergeVariantsRequest) GetInfoMergeConfig() map[string]InfoMergeOperation { + if m != nil { + return m.InfoMergeConfig + } + return nil +} + +// The call set search request. +type SearchCallSetsRequest struct { + // Restrict the query to call sets within the given variant sets. At least one + // ID must be provided. + VariantSetIds []string `protobuf:"bytes,1,rep,name=variant_set_ids,json=variantSetIds" json:"variant_set_ids,omitempty"` + // Only return call sets for which a substring of the name matches this + // string. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The continuation token, which is used to page through large result sets. + // To get the next page of results, set this parameter to the value of + // `nextPageToken` from the previous response. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The maximum number of results to return in a single page. If unspecified, + // defaults to 1024. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *SearchCallSetsRequest) Reset() { *m = SearchCallSetsRequest{} } +func (m *SearchCallSetsRequest) String() string { return proto.CompactTextString(m) } +func (*SearchCallSetsRequest) ProtoMessage() {} +func (*SearchCallSetsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{22} } + +func (m *SearchCallSetsRequest) GetVariantSetIds() []string { + if m != nil { + return m.VariantSetIds + } + return nil +} + +func (m *SearchCallSetsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SearchCallSetsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *SearchCallSetsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// The call set search response. +type SearchCallSetsResponse struct { + // The list of matching call sets. + CallSets []*CallSet `protobuf:"bytes,1,rep,name=call_sets,json=callSets" json:"call_sets,omitempty"` + // The continuation token, which is used to page through large result sets. + // Provide this value in a subsequent request to return the next page of + // results. This field will be empty if there aren't any additional results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *SearchCallSetsResponse) Reset() { *m = SearchCallSetsResponse{} } +func (m *SearchCallSetsResponse) String() string { return proto.CompactTextString(m) } +func (*SearchCallSetsResponse) ProtoMessage() {} +func (*SearchCallSetsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{23} } + +func (m *SearchCallSetsResponse) GetCallSets() []*CallSet { + if m != nil { + return m.CallSets + } + return nil +} + +func (m *SearchCallSetsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +type CreateCallSetRequest struct { + // The call set to be created. + CallSet *CallSet `protobuf:"bytes,1,opt,name=call_set,json=callSet" json:"call_set,omitempty"` +} + +func (m *CreateCallSetRequest) Reset() { *m = CreateCallSetRequest{} } +func (m *CreateCallSetRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCallSetRequest) ProtoMessage() {} +func (*CreateCallSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{24} } + +func (m *CreateCallSetRequest) GetCallSet() *CallSet { + if m != nil { + return m.CallSet + } + return nil +} + +type UpdateCallSetRequest struct { + // The ID of the call set to be updated. + CallSetId string `protobuf:"bytes,1,opt,name=call_set_id,json=callSetId" json:"call_set_id,omitempty"` + // The new call set data. + CallSet *CallSet `protobuf:"bytes,2,opt,name=call_set,json=callSet" json:"call_set,omitempty"` + // An optional mask specifying which fields to update. At this time, the only + // mutable field is [name][google.genomics.v1.CallSet.name]. The only + // acceptable value is "name". If unspecified, all mutable fields will be + // updated. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateCallSetRequest) Reset() { *m = UpdateCallSetRequest{} } +func (m *UpdateCallSetRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateCallSetRequest) ProtoMessage() {} +func (*UpdateCallSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{25} } + +func (m *UpdateCallSetRequest) GetCallSetId() string { + if m != nil { + return m.CallSetId + } + return "" +} + +func (m *UpdateCallSetRequest) GetCallSet() *CallSet { + if m != nil { + return m.CallSet + } + return nil +} + +func (m *UpdateCallSetRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +type DeleteCallSetRequest struct { + // The ID of the call set to be deleted. + CallSetId string `protobuf:"bytes,1,opt,name=call_set_id,json=callSetId" json:"call_set_id,omitempty"` +} + +func (m *DeleteCallSetRequest) Reset() { *m = DeleteCallSetRequest{} } +func (m *DeleteCallSetRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteCallSetRequest) ProtoMessage() {} +func (*DeleteCallSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{26} } + +func (m *DeleteCallSetRequest) GetCallSetId() string { + if m != nil { + return m.CallSetId + } + return "" +} + +type GetCallSetRequest struct { + // The ID of the call set. + CallSetId string `protobuf:"bytes,1,opt,name=call_set_id,json=callSetId" json:"call_set_id,omitempty"` +} + +func (m *GetCallSetRequest) Reset() { *m = GetCallSetRequest{} } +func (m *GetCallSetRequest) String() string { return proto.CompactTextString(m) } +func (*GetCallSetRequest) ProtoMessage() {} +func (*GetCallSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{27} } + +func (m *GetCallSetRequest) GetCallSetId() string { + if m != nil { + return m.CallSetId + } + return "" +} + +// The stream variants request. +type StreamVariantsRequest struct { + // The Google Cloud project ID which will be billed + // for this access. The caller must have WRITE access to this project. + // Required. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The variant set ID from which to stream variants. + VariantSetId string `protobuf:"bytes,2,opt,name=variant_set_id,json=variantSetId" json:"variant_set_id,omitempty"` + // Only return variant calls which belong to call sets with these IDs. + // Leaving this blank returns all variant calls. + CallSetIds []string `protobuf:"bytes,3,rep,name=call_set_ids,json=callSetIds" json:"call_set_ids,omitempty"` + // Required. Only return variants in this reference sequence. + ReferenceName string `protobuf:"bytes,4,opt,name=reference_name,json=referenceName" json:"reference_name,omitempty"` + // The beginning of the window (0-based, inclusive) for which + // overlapping variants should be returned. + Start int64 `protobuf:"varint,5,opt,name=start" json:"start,omitempty"` + // The end of the window (0-based, exclusive) for which overlapping + // variants should be returned. + End int64 `protobuf:"varint,6,opt,name=end" json:"end,omitempty"` +} + +func (m *StreamVariantsRequest) Reset() { *m = StreamVariantsRequest{} } +func (m *StreamVariantsRequest) String() string { return proto.CompactTextString(m) } +func (*StreamVariantsRequest) ProtoMessage() {} +func (*StreamVariantsRequest) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{28} } + +func (m *StreamVariantsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *StreamVariantsRequest) GetVariantSetId() string { + if m != nil { + return m.VariantSetId + } + return "" +} + +func (m *StreamVariantsRequest) GetCallSetIds() []string { + if m != nil { + return m.CallSetIds + } + return nil +} + +func (m *StreamVariantsRequest) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *StreamVariantsRequest) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *StreamVariantsRequest) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +type StreamVariantsResponse struct { + Variants []*Variant `protobuf:"bytes,1,rep,name=variants" json:"variants,omitempty"` +} + +func (m *StreamVariantsResponse) Reset() { *m = StreamVariantsResponse{} } +func (m *StreamVariantsResponse) String() string { return proto.CompactTextString(m) } +func (*StreamVariantsResponse) ProtoMessage() {} +func (*StreamVariantsResponse) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{29} } + +func (m *StreamVariantsResponse) GetVariants() []*Variant { + if m != nil { + return m.Variants + } + return nil +} + +func init() { + proto.RegisterType((*VariantSetMetadata)(nil), "google.genomics.v1.VariantSetMetadata") + proto.RegisterType((*VariantSet)(nil), "google.genomics.v1.VariantSet") + proto.RegisterType((*Variant)(nil), "google.genomics.v1.Variant") + proto.RegisterType((*VariantCall)(nil), "google.genomics.v1.VariantCall") + proto.RegisterType((*CallSet)(nil), "google.genomics.v1.CallSet") + proto.RegisterType((*ReferenceBound)(nil), "google.genomics.v1.ReferenceBound") + proto.RegisterType((*ImportVariantsRequest)(nil), "google.genomics.v1.ImportVariantsRequest") + proto.RegisterType((*ImportVariantsResponse)(nil), "google.genomics.v1.ImportVariantsResponse") + proto.RegisterType((*CreateVariantSetRequest)(nil), "google.genomics.v1.CreateVariantSetRequest") + proto.RegisterType((*ExportVariantSetRequest)(nil), "google.genomics.v1.ExportVariantSetRequest") + proto.RegisterType((*GetVariantSetRequest)(nil), "google.genomics.v1.GetVariantSetRequest") + proto.RegisterType((*SearchVariantSetsRequest)(nil), "google.genomics.v1.SearchVariantSetsRequest") + proto.RegisterType((*SearchVariantSetsResponse)(nil), "google.genomics.v1.SearchVariantSetsResponse") + proto.RegisterType((*DeleteVariantSetRequest)(nil), "google.genomics.v1.DeleteVariantSetRequest") + proto.RegisterType((*UpdateVariantSetRequest)(nil), "google.genomics.v1.UpdateVariantSetRequest") + proto.RegisterType((*SearchVariantsRequest)(nil), "google.genomics.v1.SearchVariantsRequest") + proto.RegisterType((*SearchVariantsResponse)(nil), "google.genomics.v1.SearchVariantsResponse") + proto.RegisterType((*CreateVariantRequest)(nil), "google.genomics.v1.CreateVariantRequest") + proto.RegisterType((*UpdateVariantRequest)(nil), "google.genomics.v1.UpdateVariantRequest") + proto.RegisterType((*DeleteVariantRequest)(nil), "google.genomics.v1.DeleteVariantRequest") + proto.RegisterType((*GetVariantRequest)(nil), "google.genomics.v1.GetVariantRequest") + proto.RegisterType((*MergeVariantsRequest)(nil), "google.genomics.v1.MergeVariantsRequest") + proto.RegisterType((*SearchCallSetsRequest)(nil), "google.genomics.v1.SearchCallSetsRequest") + proto.RegisterType((*SearchCallSetsResponse)(nil), "google.genomics.v1.SearchCallSetsResponse") + proto.RegisterType((*CreateCallSetRequest)(nil), "google.genomics.v1.CreateCallSetRequest") + proto.RegisterType((*UpdateCallSetRequest)(nil), "google.genomics.v1.UpdateCallSetRequest") + proto.RegisterType((*DeleteCallSetRequest)(nil), "google.genomics.v1.DeleteCallSetRequest") + proto.RegisterType((*GetCallSetRequest)(nil), "google.genomics.v1.GetCallSetRequest") + proto.RegisterType((*StreamVariantsRequest)(nil), "google.genomics.v1.StreamVariantsRequest") + proto.RegisterType((*StreamVariantsResponse)(nil), "google.genomics.v1.StreamVariantsResponse") + proto.RegisterEnum("google.genomics.v1.InfoMergeOperation", InfoMergeOperation_name, InfoMergeOperation_value) + proto.RegisterEnum("google.genomics.v1.VariantSetMetadata_Type", VariantSetMetadata_Type_name, VariantSetMetadata_Type_value) + proto.RegisterEnum("google.genomics.v1.ImportVariantsRequest_Format", ImportVariantsRequest_Format_name, ImportVariantsRequest_Format_value) + proto.RegisterEnum("google.genomics.v1.ExportVariantSetRequest_Format", ExportVariantSetRequest_Format_name, ExportVariantSetRequest_Format_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for StreamingVariantService service + +type StreamingVariantServiceClient interface { + // Returns a stream of all the variants matching the search request, ordered + // by reference name, position, and ID. + StreamVariants(ctx context.Context, in *StreamVariantsRequest, opts ...grpc.CallOption) (StreamingVariantService_StreamVariantsClient, error) +} + +type streamingVariantServiceClient struct { + cc *grpc.ClientConn +} + +func NewStreamingVariantServiceClient(cc *grpc.ClientConn) StreamingVariantServiceClient { + return &streamingVariantServiceClient{cc} +} + +func (c *streamingVariantServiceClient) StreamVariants(ctx context.Context, in *StreamVariantsRequest, opts ...grpc.CallOption) (StreamingVariantService_StreamVariantsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_StreamingVariantService_serviceDesc.Streams[0], c.cc, "/google.genomics.v1.StreamingVariantService/StreamVariants", opts...) + if err != nil { + return nil, err + } + x := &streamingVariantServiceStreamVariantsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StreamingVariantService_StreamVariantsClient interface { + Recv() (*StreamVariantsResponse, error) + grpc.ClientStream +} + +type streamingVariantServiceStreamVariantsClient struct { + grpc.ClientStream +} + +func (x *streamingVariantServiceStreamVariantsClient) Recv() (*StreamVariantsResponse, error) { + m := new(StreamVariantsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for StreamingVariantService service + +type StreamingVariantServiceServer interface { + // Returns a stream of all the variants matching the search request, ordered + // by reference name, position, and ID. + StreamVariants(*StreamVariantsRequest, StreamingVariantService_StreamVariantsServer) error +} + +func RegisterStreamingVariantServiceServer(s *grpc.Server, srv StreamingVariantServiceServer) { + s.RegisterService(&_StreamingVariantService_serviceDesc, srv) +} + +func _StreamingVariantService_StreamVariants_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamVariantsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StreamingVariantServiceServer).StreamVariants(m, &streamingVariantServiceStreamVariantsServer{stream}) +} + +type StreamingVariantService_StreamVariantsServer interface { + Send(*StreamVariantsResponse) error + grpc.ServerStream +} + +type streamingVariantServiceStreamVariantsServer struct { + grpc.ServerStream +} + +func (x *streamingVariantServiceStreamVariantsServer) Send(m *StreamVariantsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _StreamingVariantService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.StreamingVariantService", + HandlerType: (*StreamingVariantServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamVariants", + Handler: _StreamingVariantService_StreamVariants_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/genomics/v1/variants.proto", +} + +// Client API for VariantServiceV1 service + +type VariantServiceV1Client interface { + // Creates variant data by asynchronously importing the provided information. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The variants for import will be merged with any existing variant that + // matches its reference sequence, start, end, reference bases, and + // alternative bases. If no such variant exists, a new one will be created. + // + // When variants are merged, the call information from the new variant + // is added to the existing variant, and Variant info fields are merged + // as specified in + // [infoMergeConfig][google.genomics.v1.ImportVariantsRequest.info_merge_config]. + // As a special case, for single-sample VCF files, QUAL and FILTER fields will + // be moved to the call level; these are sometimes interpreted in a + // call-specific context. + // Imported VCF headers are appended to the metadata already in a variant set. + ImportVariants(ctx context.Context, in *ImportVariantsRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Creates a new variant set. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The provided variant set must have a valid `datasetId` set - all other + // fields are optional. Note that the `id` field will be ignored, as this is + // assigned by the server. + CreateVariantSet(ctx context.Context, in *CreateVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) + // Exports variant set data to an external destination. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + ExportVariantSet(ctx context.Context, in *ExportVariantSetRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets a variant set by ID. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetVariantSet(ctx context.Context, in *GetVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) + // Returns a list of all variant sets matching search criteria. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchVariantSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L49). + SearchVariantSets(ctx context.Context, in *SearchVariantSetsRequest, opts ...grpc.CallOption) (*SearchVariantSetsResponse, error) + // Deletes a variant set including all variants, call sets, and calls within. + // This is not reversible. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteVariantSet(ctx context.Context, in *DeleteVariantSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Updates a variant set using patch semantics. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + UpdateVariantSet(ctx context.Context, in *UpdateVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) + // Gets a list of variants matching the criteria. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchVariants](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L126). + SearchVariants(ctx context.Context, in *SearchVariantsRequest, opts ...grpc.CallOption) (*SearchVariantsResponse, error) + // Creates a new variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateVariant(ctx context.Context, in *CreateVariantRequest, opts ...grpc.CallOption) (*Variant, error) + // Updates a variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. Returns the modified variant without + // its calls. + UpdateVariant(ctx context.Context, in *UpdateVariantRequest, opts ...grpc.CallOption) (*Variant, error) + // Deletes a variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteVariant(ctx context.Context, in *DeleteVariantRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Gets a variant by ID. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetVariant(ctx context.Context, in *GetVariantRequest, opts ...grpc.CallOption) (*Variant, error) + // Merges the given variants with existing variants. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Each variant will be + // merged with an existing variant that matches its reference sequence, + // start, end, reference bases, and alternative bases. If no such variant + // exists, a new one will be created. + // + // When variants are merged, the call information from the new variant + // is added to the existing variant. Variant info fields are merged as + // specified in the + // [infoMergeConfig][google.genomics.v1.MergeVariantsRequest.info_merge_config] + // field of the MergeVariantsRequest. + // + // Please exercise caution when using this method! It is easy to introduce + // mistakes in existing variants and difficult to back out of them. For + // example, + // suppose you were trying to merge a new variant with an existing one and + // both + // variants contain calls that belong to callsets with the same callset ID. + // + // // Existing variant - irrelevant fields trimmed for clarity + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 0, + // 1 + // ], + // } + // ] + // } + // + // // New variant with conflicting call information + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 1, + // 1 + // ], + // } + // ] + // } + // + // The resulting merged variant would overwrite the existing calls with those + // from the new variant: + // + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 1, + // 1 + // ], + // } + // ] + // } + // + // This may be the desired outcome, but it is up to the user to determine if + // if that is indeed the case. + MergeVariants(ctx context.Context, in *MergeVariantsRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Gets a list of call sets matching the criteria. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchCallSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L178). + SearchCallSets(ctx context.Context, in *SearchCallSetsRequest, opts ...grpc.CallOption) (*SearchCallSetsResponse, error) + // Creates a new call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateCallSet(ctx context.Context, in *CreateCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) + // Updates a call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateCallSet(ctx context.Context, in *UpdateCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) + // Deletes a call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteCallSet(ctx context.Context, in *DeleteCallSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Gets a call set by ID. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetCallSet(ctx context.Context, in *GetCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) +} + +type variantServiceV1Client struct { + cc *grpc.ClientConn +} + +func NewVariantServiceV1Client(cc *grpc.ClientConn) VariantServiceV1Client { + return &variantServiceV1Client{cc} +} + +func (c *variantServiceV1Client) ImportVariants(ctx context.Context, in *ImportVariantsRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/ImportVariants", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) CreateVariantSet(ctx context.Context, in *CreateVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) { + out := new(VariantSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/CreateVariantSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) ExportVariantSet(ctx context.Context, in *ExportVariantSetRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/ExportVariantSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) GetVariantSet(ctx context.Context, in *GetVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) { + out := new(VariantSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/GetVariantSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) SearchVariantSets(ctx context.Context, in *SearchVariantSetsRequest, opts ...grpc.CallOption) (*SearchVariantSetsResponse, error) { + out := new(SearchVariantSetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/SearchVariantSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) DeleteVariantSet(ctx context.Context, in *DeleteVariantSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/DeleteVariantSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) UpdateVariantSet(ctx context.Context, in *UpdateVariantSetRequest, opts ...grpc.CallOption) (*VariantSet, error) { + out := new(VariantSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/UpdateVariantSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) SearchVariants(ctx context.Context, in *SearchVariantsRequest, opts ...grpc.CallOption) (*SearchVariantsResponse, error) { + out := new(SearchVariantsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/SearchVariants", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) CreateVariant(ctx context.Context, in *CreateVariantRequest, opts ...grpc.CallOption) (*Variant, error) { + out := new(Variant) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/CreateVariant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) UpdateVariant(ctx context.Context, in *UpdateVariantRequest, opts ...grpc.CallOption) (*Variant, error) { + out := new(Variant) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/UpdateVariant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) DeleteVariant(ctx context.Context, in *DeleteVariantRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/DeleteVariant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) GetVariant(ctx context.Context, in *GetVariantRequest, opts ...grpc.CallOption) (*Variant, error) { + out := new(Variant) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/GetVariant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) MergeVariants(ctx context.Context, in *MergeVariantsRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/MergeVariants", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) SearchCallSets(ctx context.Context, in *SearchCallSetsRequest, opts ...grpc.CallOption) (*SearchCallSetsResponse, error) { + out := new(SearchCallSetsResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/SearchCallSets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) CreateCallSet(ctx context.Context, in *CreateCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) { + out := new(CallSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/CreateCallSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) UpdateCallSet(ctx context.Context, in *UpdateCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) { + out := new(CallSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/UpdateCallSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) DeleteCallSet(ctx context.Context, in *DeleteCallSetRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/DeleteCallSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *variantServiceV1Client) GetCallSet(ctx context.Context, in *GetCallSetRequest, opts ...grpc.CallOption) (*CallSet, error) { + out := new(CallSet) + err := grpc.Invoke(ctx, "/google.genomics.v1.VariantServiceV1/GetCallSet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for VariantServiceV1 service + +type VariantServiceV1Server interface { + // Creates variant data by asynchronously importing the provided information. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The variants for import will be merged with any existing variant that + // matches its reference sequence, start, end, reference bases, and + // alternative bases. If no such variant exists, a new one will be created. + // + // When variants are merged, the call information from the new variant + // is added to the existing variant, and Variant info fields are merged + // as specified in + // [infoMergeConfig][google.genomics.v1.ImportVariantsRequest.info_merge_config]. + // As a special case, for single-sample VCF files, QUAL and FILTER fields will + // be moved to the call level; these are sometimes interpreted in a + // call-specific context. + // Imported VCF headers are appended to the metadata already in a variant set. + ImportVariants(context.Context, *ImportVariantsRequest) (*google_longrunning.Operation, error) + // Creates a new variant set. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // The provided variant set must have a valid `datasetId` set - all other + // fields are optional. Note that the `id` field will be ignored, as this is + // assigned by the server. + CreateVariantSet(context.Context, *CreateVariantSetRequest) (*VariantSet, error) + // Exports variant set data to an external destination. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + ExportVariantSet(context.Context, *ExportVariantSetRequest) (*google_longrunning.Operation, error) + // Gets a variant set by ID. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetVariantSet(context.Context, *GetVariantSetRequest) (*VariantSet, error) + // Returns a list of all variant sets matching search criteria. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchVariantSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L49). + SearchVariantSets(context.Context, *SearchVariantSetsRequest) (*SearchVariantSetsResponse, error) + // Deletes a variant set including all variants, call sets, and calls within. + // This is not reversible. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteVariantSet(context.Context, *DeleteVariantSetRequest) (*google_protobuf1.Empty, error) + // Updates a variant set using patch semantics. + // + // For the definitions of variant sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + UpdateVariantSet(context.Context, *UpdateVariantSetRequest) (*VariantSet, error) + // Gets a list of variants matching the criteria. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchVariants](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L126). + SearchVariants(context.Context, *SearchVariantsRequest) (*SearchVariantsResponse, error) + // Creates a new variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateVariant(context.Context, *CreateVariantRequest) (*Variant, error) + // Updates a variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. Returns the modified variant without + // its calls. + UpdateVariant(context.Context, *UpdateVariantRequest) (*Variant, error) + // Deletes a variant. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteVariant(context.Context, *DeleteVariantRequest) (*google_protobuf1.Empty, error) + // Gets a variant by ID. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetVariant(context.Context, *GetVariantRequest) (*Variant, error) + // Merges the given variants with existing variants. + // + // For the definitions of variants and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Each variant will be + // merged with an existing variant that matches its reference sequence, + // start, end, reference bases, and alternative bases. If no such variant + // exists, a new one will be created. + // + // When variants are merged, the call information from the new variant + // is added to the existing variant. Variant info fields are merged as + // specified in the + // [infoMergeConfig][google.genomics.v1.MergeVariantsRequest.info_merge_config] + // field of the MergeVariantsRequest. + // + // Please exercise caution when using this method! It is easy to introduce + // mistakes in existing variants and difficult to back out of them. For + // example, + // suppose you were trying to merge a new variant with an existing one and + // both + // variants contain calls that belong to callsets with the same callset ID. + // + // // Existing variant - irrelevant fields trimmed for clarity + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 0, + // 1 + // ], + // } + // ] + // } + // + // // New variant with conflicting call information + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 1, + // 1 + // ], + // } + // ] + // } + // + // The resulting merged variant would overwrite the existing calls with those + // from the new variant: + // + // { + // "variantSetId": "10473108253681171589", + // "referenceName": "1", + // "start": "10582", + // "referenceBases": "G", + // "alternateBases": [ + // "A" + // ], + // "calls": [ + // { + // "callSetId": "10473108253681171589-0", + // "callSetName": "CALLSET0", + // "genotype": [ + // 1, + // 1 + // ], + // } + // ] + // } + // + // This may be the desired outcome, but it is up to the user to determine if + // if that is indeed the case. + MergeVariants(context.Context, *MergeVariantsRequest) (*google_protobuf1.Empty, error) + // Gets a list of call sets matching the criteria. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // Implements + // [GlobalAllianceApi.searchCallSets](https://github.com/ga4gh/schemas/blob/v0.5.1/src/main/resources/avro/variantmethods.avdl#L178). + SearchCallSets(context.Context, *SearchCallSetsRequest) (*SearchCallSetsResponse, error) + // Creates a new call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + CreateCallSet(context.Context, *CreateCallSetRequest) (*CallSet, error) + // Updates a call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + // + // This method supports patch semantics. + UpdateCallSet(context.Context, *UpdateCallSetRequest) (*CallSet, error) + // Deletes a call set. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + DeleteCallSet(context.Context, *DeleteCallSetRequest) (*google_protobuf1.Empty, error) + // Gets a call set by ID. + // + // For the definitions of call sets and other genomics resources, see + // [Fundamentals of Google + // Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics) + GetCallSet(context.Context, *GetCallSetRequest) (*CallSet, error) +} + +func RegisterVariantServiceV1Server(s *grpc.Server, srv VariantServiceV1Server) { + s.RegisterService(&_VariantServiceV1_serviceDesc, srv) +} + +func _VariantServiceV1_ImportVariants_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportVariantsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).ImportVariants(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/ImportVariants", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).ImportVariants(ctx, req.(*ImportVariantsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_CreateVariantSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVariantSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).CreateVariantSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/CreateVariantSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).CreateVariantSet(ctx, req.(*CreateVariantSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_ExportVariantSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportVariantSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).ExportVariantSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/ExportVariantSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).ExportVariantSet(ctx, req.(*ExportVariantSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_GetVariantSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVariantSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).GetVariantSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/GetVariantSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).GetVariantSet(ctx, req.(*GetVariantSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_SearchVariantSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchVariantSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).SearchVariantSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/SearchVariantSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).SearchVariantSets(ctx, req.(*SearchVariantSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_DeleteVariantSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVariantSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).DeleteVariantSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/DeleteVariantSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).DeleteVariantSet(ctx, req.(*DeleteVariantSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_UpdateVariantSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateVariantSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).UpdateVariantSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/UpdateVariantSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).UpdateVariantSet(ctx, req.(*UpdateVariantSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_SearchVariants_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchVariantsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).SearchVariants(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/SearchVariants", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).SearchVariants(ctx, req.(*SearchVariantsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_CreateVariant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVariantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).CreateVariant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/CreateVariant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).CreateVariant(ctx, req.(*CreateVariantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_UpdateVariant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateVariantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).UpdateVariant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/UpdateVariant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).UpdateVariant(ctx, req.(*UpdateVariantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_DeleteVariant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVariantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).DeleteVariant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/DeleteVariant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).DeleteVariant(ctx, req.(*DeleteVariantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_GetVariant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVariantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).GetVariant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/GetVariant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).GetVariant(ctx, req.(*GetVariantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_MergeVariants_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MergeVariantsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).MergeVariants(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/MergeVariants", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).MergeVariants(ctx, req.(*MergeVariantsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_SearchCallSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchCallSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).SearchCallSets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/SearchCallSets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).SearchCallSets(ctx, req.(*SearchCallSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_CreateCallSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCallSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).CreateCallSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/CreateCallSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).CreateCallSet(ctx, req.(*CreateCallSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_UpdateCallSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCallSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).UpdateCallSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/UpdateCallSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).UpdateCallSet(ctx, req.(*UpdateCallSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_DeleteCallSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCallSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).DeleteCallSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/DeleteCallSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).DeleteCallSet(ctx, req.(*DeleteCallSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VariantServiceV1_GetCallSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCallSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VariantServiceV1Server).GetCallSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1.VariantServiceV1/GetCallSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VariantServiceV1Server).GetCallSet(ctx, req.(*GetCallSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _VariantServiceV1_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1.VariantServiceV1", + HandlerType: (*VariantServiceV1Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ImportVariants", + Handler: _VariantServiceV1_ImportVariants_Handler, + }, + { + MethodName: "CreateVariantSet", + Handler: _VariantServiceV1_CreateVariantSet_Handler, + }, + { + MethodName: "ExportVariantSet", + Handler: _VariantServiceV1_ExportVariantSet_Handler, + }, + { + MethodName: "GetVariantSet", + Handler: _VariantServiceV1_GetVariantSet_Handler, + }, + { + MethodName: "SearchVariantSets", + Handler: _VariantServiceV1_SearchVariantSets_Handler, + }, + { + MethodName: "DeleteVariantSet", + Handler: _VariantServiceV1_DeleteVariantSet_Handler, + }, + { + MethodName: "UpdateVariantSet", + Handler: _VariantServiceV1_UpdateVariantSet_Handler, + }, + { + MethodName: "SearchVariants", + Handler: _VariantServiceV1_SearchVariants_Handler, + }, + { + MethodName: "CreateVariant", + Handler: _VariantServiceV1_CreateVariant_Handler, + }, + { + MethodName: "UpdateVariant", + Handler: _VariantServiceV1_UpdateVariant_Handler, + }, + { + MethodName: "DeleteVariant", + Handler: _VariantServiceV1_DeleteVariant_Handler, + }, + { + MethodName: "GetVariant", + Handler: _VariantServiceV1_GetVariant_Handler, + }, + { + MethodName: "MergeVariants", + Handler: _VariantServiceV1_MergeVariants_Handler, + }, + { + MethodName: "SearchCallSets", + Handler: _VariantServiceV1_SearchCallSets_Handler, + }, + { + MethodName: "CreateCallSet", + Handler: _VariantServiceV1_CreateCallSet_Handler, + }, + { + MethodName: "UpdateCallSet", + Handler: _VariantServiceV1_UpdateCallSet_Handler, + }, + { + MethodName: "DeleteCallSet", + Handler: _VariantServiceV1_DeleteCallSet_Handler, + }, + { + MethodName: "GetCallSet", + Handler: _VariantServiceV1_GetCallSet_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1/variants.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1/variants.proto", fileDescriptor11) } + +var fileDescriptor11 = []byte{ + // 2348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xdd, 0x6e, 0x1b, 0xc7, + 0xf5, 0xff, 0xef, 0x92, 0x94, 0xc8, 0x43, 0x91, 0x5a, 0x4f, 0x14, 0x69, 0x43, 0x7f, 0xc9, 0xfb, + 0xb7, 0x1d, 0x45, 0x75, 0x45, 0x9b, 0x81, 0xd3, 0x54, 0x49, 0x6a, 0x48, 0x34, 0xa5, 0xb0, 0x90, + 0x48, 0x65, 0x45, 0xbb, 0x75, 0x80, 0x82, 0x58, 0x91, 0x23, 0x7a, 0x6d, 0x72, 0x97, 0xde, 0x5d, + 0xaa, 0x96, 0x0d, 0x5f, 0x34, 0xfd, 0x42, 0x80, 0x02, 0x05, 0x1a, 0xa0, 0x57, 0xbd, 0xed, 0x45, + 0xd1, 0xa2, 0x6f, 0xe0, 0x37, 0x68, 0x7b, 0x53, 0xf4, 0x0d, 0xfa, 0x10, 0xbd, 0x2c, 0x66, 0x76, + 0x66, 0xb9, 0xbb, 0x1c, 0xae, 0x28, 0x07, 0x09, 0x7a, 0xc7, 0x39, 0x73, 0x66, 0xce, 0xd7, 0xef, + 0x9c, 0x39, 0x67, 0x25, 0xb8, 0xd6, 0xb3, 0xed, 0x5e, 0x1f, 0x97, 0x7b, 0xd8, 0xb2, 0x07, 0x66, + 0xc7, 0x2d, 0x9f, 0xdc, 0x29, 0x9f, 0x18, 0x8e, 0x69, 0x58, 0x9e, 0xbb, 0x31, 0x74, 0x6c, 0xcf, + 0x46, 0xc8, 0x67, 0xd9, 0xe0, 0x2c, 0x1b, 0x27, 0x77, 0x4a, 0x97, 0xd8, 0x31, 0x63, 0x68, 0x96, + 0x0d, 0xcb, 0xb2, 0x3d, 0xc3, 0x33, 0x6d, 0x8b, 0x9d, 0x28, 0xfd, 0x3f, 0xdb, 0xed, 0xdb, 0x56, + 0xcf, 0x19, 0x59, 0x96, 0x69, 0xf5, 0xca, 0xf6, 0x10, 0x3b, 0x11, 0xa6, 0x8b, 0x8c, 0x89, 0xae, + 0x8e, 0x46, 0xc7, 0x65, 0x3c, 0x18, 0x7a, 0xa7, 0x6c, 0x73, 0x35, 0xbe, 0x79, 0x6c, 0xe2, 0x7e, + 0xb7, 0x3d, 0x30, 0xdc, 0xa7, 0x8c, 0xe3, 0x52, 0x9c, 0xc3, 0xf5, 0x9c, 0x51, 0xc7, 0xf3, 0x77, + 0xb5, 0xd7, 0x29, 0x40, 0x0f, 0x7d, 0x33, 0x0e, 0xb1, 0xb7, 0x8f, 0x3d, 0xa3, 0x6b, 0x78, 0x06, + 0x52, 0x20, 0xf5, 0x14, 0x9f, 0xaa, 0xd2, 0xaa, 0xb4, 0x96, 0xd3, 0xc9, 0x4f, 0xb4, 0x04, 0x99, + 0x13, 0xa3, 0x3f, 0xc2, 0xaa, 0x4c, 0x69, 0xfe, 0x02, 0x15, 0x41, 0x36, 0xbb, 0x6a, 0x9a, 0x92, + 0x64, 0xb3, 0x8b, 0xee, 0x41, 0xda, 0x3b, 0x1d, 0x62, 0x35, 0xb3, 0x2a, 0xad, 0x15, 0x2b, 0xdf, + 0xd9, 0x98, 0xf4, 0xc8, 0xc6, 0xa4, 0xb4, 0x8d, 0xd6, 0xe9, 0x10, 0xeb, 0xf4, 0x20, 0x5a, 0x86, + 0x39, 0x6b, 0x34, 0x38, 0xc2, 0x8e, 0x9a, 0xa5, 0x97, 0xb2, 0x15, 0x5a, 0x85, 0x7c, 0x17, 0xbb, + 0x1d, 0xc7, 0x1c, 0x12, 0xd7, 0xa8, 0xf3, 0x74, 0x33, 0x4c, 0x42, 0xf7, 0x21, 0x6d, 0x5a, 0xc7, + 0xb6, 0x9a, 0x5a, 0x4d, 0xad, 0xe5, 0x2b, 0xb7, 0x67, 0x14, 0x5d, 0xb7, 0x8e, 0xed, 0x9a, 0xe5, + 0x39, 0xa7, 0x3a, 0x3d, 0x5d, 0x3a, 0x84, 0x5c, 0x40, 0x12, 0x78, 0xe1, 0x76, 0xd8, 0x0b, 0xf9, + 0x4a, 0x89, 0x4b, 0xe1, 0xce, 0xdd, 0xd8, 0x33, 0x5d, 0xef, 0x21, 0xe1, 0x60, 0x1e, 0xda, 0x94, + 0x3f, 0x94, 0xb4, 0x47, 0x90, 0x26, 0x26, 0xa2, 0x25, 0x50, 0x5a, 0x8f, 0x0e, 0x6a, 0xed, 0x07, + 0x8d, 0xc3, 0x83, 0x5a, 0xb5, 0xbe, 0x53, 0xaf, 0xdd, 0x57, 0xfe, 0x0f, 0xe5, 0x61, 0xbe, 0xde, + 0x68, 0xd5, 0x76, 0x6b, 0xba, 0x22, 0xa1, 0x1c, 0x64, 0x76, 0xf6, 0x9a, 0x5b, 0x2d, 0x45, 0x46, + 0x59, 0x48, 0xef, 0xec, 0x6d, 0xed, 0x2a, 0x29, 0x54, 0x80, 0x5c, 0xf5, 0xd3, 0x2d, 0x7d, 0xab, + 0xda, 0xaa, 0xe9, 0x4a, 0x1a, 0x01, 0xcc, 0x1d, 0xb6, 0xf4, 0x7a, 0x63, 0x57, 0xc9, 0x68, 0x7f, + 0x95, 0x01, 0xc6, 0x66, 0xa1, 0xcb, 0x00, 0xc4, 0x2c, 0x17, 0x7b, 0x6d, 0xb3, 0xcb, 0x14, 0xcf, + 0x31, 0x4a, 0xbd, 0xcb, 0xc2, 0x25, 0x07, 0xe1, 0x5a, 0x03, 0xc5, 0xc1, 0xc7, 0xd8, 0xc1, 0x56, + 0x07, 0xb7, 0xd9, 0xa1, 0x39, 0xba, 0x5b, 0x0c, 0xe8, 0x87, 0xf4, 0xe4, 0x7e, 0x98, 0xf3, 0xc8, + 0x1e, 0x59, 0x5d, 0x57, 0xcd, 0x50, 0x4f, 0x6b, 0x22, 0x4f, 0xeb, 0x9c, 0x77, 0x9b, 0xb0, 0xea, + 0x8b, 0x4e, 0x64, 0xed, 0xa2, 0x6d, 0xc8, 0x0e, 0x58, 0x08, 0xd4, 0x34, 0xbd, 0xe6, 0xe6, 0x6c, + 0x01, 0xd3, 0x83, 0x73, 0x08, 0x41, 0xda, 0x32, 0x06, 0x98, 0x61, 0x81, 0xfe, 0x8e, 0xc3, 0x24, + 0x3b, 0x01, 0x13, 0xed, 0xcb, 0x34, 0xcc, 0xb3, 0x6b, 0xd1, 0x75, 0x28, 0xb2, 0x14, 0xe6, 0xc6, + 0x2f, 0xd2, 0x03, 0x0b, 0x27, 0x81, 0x5c, 0x81, 0xd3, 0x96, 0x20, 0x43, 0x64, 0xb9, 0x14, 0x69, + 0x39, 0xdd, 0x5f, 0x20, 0x15, 0xe6, 0x3b, 0x0e, 0x36, 0x3c, 0xdc, 0x55, 0x17, 0x56, 0xa5, 0xb5, + 0x94, 0xce, 0x97, 0xe8, 0x06, 0x8c, 0x9d, 0xd9, 0xa6, 0x1a, 0x17, 0xe9, 0x5d, 0x85, 0x80, 0xda, + 0x20, 0xaa, 0x2f, 0x41, 0xc6, 0xf5, 0x0c, 0xc7, 0x53, 0x15, 0x7a, 0xdc, 0x5f, 0x10, 0x08, 0x62, + 0xab, 0xab, 0x16, 0x28, 0x8d, 0xfc, 0x44, 0xef, 0xc2, 0x62, 0x28, 0x12, 0x86, 0x8b, 0xdd, 0x89, + 0x90, 0x6d, 0x13, 0x2a, 0x61, 0x34, 0xfa, 0x1e, 0x76, 0x2c, 0xc3, 0xe3, 0x8c, 0xf3, 0x54, 0xe3, + 0x62, 0x40, 0xf6, 0x19, 0x55, 0x98, 0x7f, 0x36, 0x32, 0xfa, 0xa6, 0x77, 0x4a, 0x1d, 0x26, 0xe9, + 0x7c, 0x49, 0xb2, 0xf1, 0xd8, 0x24, 0xcc, 0x6a, 0x8e, 0x9e, 0x64, 0x2b, 0xf4, 0x7d, 0x96, 0x6b, + 0x40, 0x43, 0x77, 0x23, 0x21, 0x74, 0xf1, 0x04, 0x43, 0x77, 0x21, 0xd3, 0x31, 0xfa, 0x7d, 0x57, + 0xcd, 0xd3, 0xb3, 0x57, 0x13, 0xce, 0x56, 0x8d, 0x7e, 0x5f, 0xf7, 0xb9, 0xbf, 0x99, 0xbc, 0xfc, + 0x87, 0x0c, 0xf9, 0x90, 0x2c, 0x74, 0x05, 0xf2, 0x44, 0x1a, 0x07, 0x83, 0x8f, 0x9e, 0x1c, 0x21, + 0xf9, 0x48, 0xd0, 0xa0, 0x10, 0xec, 0xd3, 0x40, 0xe6, 0x7c, 0x7c, 0x31, 0x0e, 0x1a, 0xc6, 0x12, + 0x64, 0x89, 0x29, 0xb4, 0x0a, 0x12, 0x77, 0x67, 0xf4, 0x60, 0x4d, 0xf6, 0x86, 0x8f, 0x89, 0xcb, + 0xb1, 0x47, 0x2b, 0x64, 0x4e, 0x0f, 0xd6, 0xa8, 0x0c, 0x6f, 0x71, 0xbe, 0x76, 0xdf, 0x7c, 0x8a, + 0xfb, 0xe6, 0x63, 0xdb, 0x26, 0xd9, 0x98, 0x5a, 0x93, 0x74, 0xc4, 0xb7, 0xf6, 0x82, 0x1d, 0xf4, + 0x09, 0x8b, 0x81, 0x4c, 0xfd, 0xf8, 0xde, 0x19, 0x7e, 0xfc, 0x76, 0x0a, 0xdd, 0x1f, 0x64, 0x98, + 0xaf, 0xfa, 0xce, 0x60, 0x69, 0x23, 0x05, 0x69, 0xc3, 0xd3, 0x55, 0x0e, 0xa5, 0xeb, 0x45, 0xc8, + 0xb9, 0xc6, 0x60, 0xd8, 0xc7, 0xc4, 0xdd, 0x7e, 0x1e, 0x67, 0x7d, 0x42, 0xbd, 0x8b, 0x6e, 0xc2, + 0x62, 0x34, 0x3b, 0x5d, 0xea, 0x8d, 0x9c, 0x5e, 0x08, 0xa7, 0x67, 0x24, 0xf3, 0x32, 0xd1, 0xcc, + 0xe3, 0x30, 0x4d, 0x4f, 0x87, 0x29, 0xd3, 0xf6, 0xdb, 0x71, 0xcf, 0x8f, 0xa1, 0x18, 0x2d, 0x8c, + 0x82, 0xda, 0x20, 0x89, 0x6a, 0xc3, 0x55, 0xc8, 0x8f, 0x86, 0x43, 0xec, 0xf8, 0x95, 0x97, 0x0a, + 0x4d, 0xe9, 0x40, 0x49, 0xf4, 0x1e, 0xed, 0x37, 0x69, 0x78, 0xbb, 0x3e, 0x18, 0xda, 0x8e, 0xc7, + 0x62, 0xee, 0xea, 0xf8, 0xd9, 0x08, 0xbb, 0xa2, 0x1a, 0x27, 0x09, 0x6a, 0xdc, 0x55, 0xc8, 0xbb, + 0xf6, 0xc8, 0xe9, 0xe0, 0xf6, 0xc8, 0x31, 0x5d, 0x8a, 0xa9, 0x9c, 0x0e, 0x3e, 0xe9, 0x81, 0x63, + 0xba, 0xe8, 0x53, 0x98, 0x3b, 0xb6, 0x9d, 0x81, 0xe1, 0xa9, 0x29, 0xfa, 0xb4, 0x0b, 0xdf, 0x57, + 0xa1, 0x06, 0x1b, 0x3b, 0xf4, 0x9c, 0xce, 0xce, 0xa3, 0x4d, 0x78, 0xc7, 0x22, 0xbf, 0xfa, 0xe6, + 0x0b, 0xdc, 0x8e, 0x1a, 0xef, 0xd2, 0x00, 0x66, 0xf5, 0x95, 0x80, 0x41, 0x0f, 0xbb, 0xc1, 0x45, + 0x4f, 0xe0, 0x02, 0x89, 0x4e, 0x7b, 0x80, 0x9d, 0x1e, 0x6e, 0x77, 0x6c, 0xeb, 0xd8, 0xec, 0x51, + 0x50, 0xe4, 0x2b, 0x3f, 0x98, 0x5d, 0x21, 0x12, 0xd8, 0x7d, 0x72, 0x43, 0x95, 0x5e, 0xe0, 0x87, + 0x7d, 0xd1, 0x8c, 0x52, 0x4b, 0x4f, 0x60, 0x49, 0xc4, 0x28, 0x00, 0xc3, 0xc7, 0x61, 0x30, 0x14, + 0xc5, 0x2f, 0x59, 0x70, 0x55, 0x93, 0xb7, 0x77, 0x61, 0x60, 0x34, 0x60, 0xce, 0xf7, 0x12, 0x5a, + 0x06, 0xb4, 0xd3, 0xd4, 0xf7, 0xb7, 0x5a, 0xb1, 0x26, 0xa1, 0x08, 0xc0, 0xe8, 0x0f, 0xab, 0x3b, + 0x8a, 0x84, 0x2e, 0x81, 0xca, 0xd6, 0xd5, 0xe6, 0xfe, 0xc1, 0x5e, 0xad, 0x55, 0x6b, 0xef, 0xd6, + 0x1a, 0xcd, 0xfd, 0x7a, 0xf5, 0x50, 0x91, 0xb5, 0x4d, 0x58, 0x8e, 0x9b, 0xee, 0x0e, 0x6d, 0xcb, + 0x25, 0x0f, 0xe4, 0x42, 0xa8, 0xc4, 0xb9, 0xaa, 0xe4, 0x47, 0x3a, 0xa8, 0x71, 0xae, 0xf6, 0x39, + 0xac, 0x54, 0x69, 0xfe, 0x8c, 0x1f, 0x5f, 0x8e, 0xa5, 0x7b, 0x90, 0x0f, 0x61, 0x89, 0xba, 0x20, + 0x5f, 0xb9, 0x92, 0xfc, 0x70, 0xeb, 0x30, 0x06, 0x9a, 0xf6, 0x2f, 0x19, 0x56, 0x6a, 0xcf, 0x43, + 0x8a, 0x85, 0x2e, 0x9f, 0x0d, 0xa8, 0x71, 0xfd, 0xe5, 0xb8, 0xfe, 0xa4, 0x05, 0x1a, 0x3a, 0xf6, + 0x13, 0xdc, 0xa1, 0x77, 0xa4, 0xfc, 0x1a, 0xce, 0x28, 0xf5, 0x2e, 0xfa, 0x61, 0x00, 0xe4, 0x34, + 0x8d, 0x56, 0x45, 0xa4, 0xfe, 0x14, 0x1d, 0xe3, 0x50, 0x7e, 0x0f, 0x94, 0x23, 0xb3, 0xf7, 0x6c, + 0x84, 0x9d, 0xd3, 0x36, 0x6b, 0xb2, 0x58, 0x5d, 0x5f, 0xe4, 0xf4, 0xfb, 0x3e, 0x99, 0x24, 0x7a, + 0xc0, 0xea, 0x19, 0x47, 0x7d, 0xcc, 0x1e, 0xed, 0x02, 0xa7, 0xb6, 0x08, 0x51, 0xbb, 0x7b, 0x26, + 0x10, 0xde, 0x82, 0x45, 0x46, 0xdf, 0xae, 0xef, 0x7e, 0xf6, 0xa0, 0xa6, 0x3f, 0x52, 0x24, 0xed, + 0x63, 0x58, 0xda, 0xc5, 0x6f, 0xea, 0x53, 0xed, 0xa7, 0xa0, 0x1e, 0x62, 0xc3, 0xe9, 0x3c, 0x1e, + 0x5f, 0x10, 0x94, 0x8f, 0xab, 0x90, 0x1f, 0x37, 0x94, 0x01, 0x5c, 0x82, 0x8e, 0xd2, 0x77, 0xb7, + 0xd1, 0xc3, 0x6d, 0xcf, 0x7e, 0x8a, 0x2d, 0x56, 0xdc, 0x73, 0x84, 0xd2, 0x22, 0x04, 0x52, 0xe1, + 0xe9, 0xb6, 0x6b, 0xbe, 0xc0, 0x34, 0x18, 0x19, 0x3d, 0x4b, 0x08, 0x87, 0xe6, 0x0b, 0xac, 0xfd, + 0x4a, 0x82, 0x77, 0x04, 0x92, 0x19, 0x54, 0xb7, 0x60, 0x21, 0xa4, 0xbc, 0x2f, 0xfb, 0x6c, 0xb8, + 0xe5, 0xc7, 0xa6, 0xb9, 0xe4, 0x09, 0xb1, 0xf0, 0x73, 0xaf, 0x3d, 0xa1, 0x61, 0x81, 0x90, 0x0f, + 0xb8, 0x96, 0xda, 0x3d, 0x58, 0xb9, 0x8f, 0xfb, 0x58, 0x84, 0xf9, 0xd9, 0x5c, 0xf8, 0x5a, 0x82, + 0x95, 0x07, 0xc3, 0xae, 0xf1, 0xc6, 0x37, 0xc4, 0x73, 0x4b, 0x3e, 0x6f, 0x6e, 0xa1, 0x8f, 0xc8, + 0x1b, 0x41, 0x34, 0xa0, 0xc3, 0x1f, 0xc5, 0xa1, 0xe8, 0x61, 0xda, 0x21, 0xf3, 0xe1, 0xbe, 0xe1, + 0x3e, 0x25, 0xef, 0x07, 0x61, 0x27, 0xbf, 0xb5, 0xbf, 0xc8, 0xf0, 0x76, 0x24, 0x12, 0x01, 0x00, + 0x04, 0xaf, 0xb0, 0x24, 0x7a, 0x85, 0xaf, 0x8d, 0xa3, 0x15, 0x7a, 0xe6, 0xb9, 0x4d, 0x0d, 0xbf, + 0x39, 0x8f, 0xe6, 0x6e, 0x6a, 0x22, 0x77, 0x27, 0x9f, 0xc3, 0x74, 0x62, 0xab, 0x9c, 0x11, 0xb4, + 0xca, 0x73, 0xe3, 0x56, 0x39, 0x8a, 0xcd, 0xf9, 0x44, 0x6c, 0x66, 0xa3, 0xd8, 0x24, 0x9b, 0x03, + 0xe3, 0x79, 0xdb, 0xef, 0x55, 0x73, 0xfe, 0xe6, 0xc0, 0x78, 0x4e, 0xfa, 0x05, 0x57, 0x3b, 0x85, + 0xe5, 0xb8, 0xb7, 0x18, 0x68, 0xbf, 0x07, 0x59, 0xfe, 0x55, 0x80, 0x01, 0xf6, 0x62, 0x42, 0x0c, + 0xf5, 0x80, 0x79, 0x66, 0xa8, 0xee, 0xc3, 0x52, 0xa4, 0x3c, 0xf3, 0x38, 0xdd, 0x85, 0x79, 0x76, + 0x17, 0xab, 0xcb, 0x89, 0x72, 0x39, 0xaf, 0xf6, 0x27, 0x09, 0x96, 0x22, 0xc0, 0xe5, 0xf7, 0x5d, + 0x06, 0x0e, 0xae, 0xd0, 0x24, 0xc9, 0x28, 0xf5, 0x6e, 0x58, 0x9c, 0x3c, 0xbb, 0xb8, 0x38, 0x48, + 0x53, 0xe7, 0x02, 0xe9, 0x5d, 0x58, 0x8a, 0x64, 0xe9, 0x6c, 0xaa, 0x6a, 0x15, 0xb8, 0x30, 0x2e, + 0x8e, 0x33, 0x9e, 0xf9, 0x9b, 0x0c, 0x4b, 0xf4, 0xb9, 0x7e, 0xb3, 0x76, 0x2a, 0x8c, 0x02, 0xf9, + 0x3c, 0x28, 0x30, 0x45, 0x0d, 0x8e, 0xff, 0x45, 0xe3, 0x13, 0xd1, 0x0d, 0x22, 0x1d, 0xff, 0x07, + 0xfb, 0x9b, 0xdf, 0x4a, 0xbc, 0xbc, 0xb0, 0x7e, 0xfb, 0xdc, 0xe5, 0x45, 0x34, 0x3d, 0x44, 0xd3, + 0x3b, 0x95, 0x98, 0xde, 0xe9, 0xd8, 0xd3, 0xf3, 0x82, 0x67, 0xf0, 0x58, 0x21, 0x96, 0xc1, 0x1f, + 0x42, 0x8e, 0x57, 0xa9, 0xc4, 0x14, 0x66, 0x07, 0xf5, 0x2c, 0xab, 0x5f, 0xb3, 0xa7, 0x70, 0x83, + 0xa7, 0x30, 0xbf, 0x82, 0xf9, 0xe2, 0x03, 0xc8, 0x72, 0xc9, 0x49, 0x39, 0xcc, 0x4f, 0xcd, 0x33, + 0xc1, 0xda, 0x9f, 0x83, 0x1c, 0x8e, 0x5d, 0x18, 0x9b, 0x67, 0xa5, 0xf8, 0x3c, 0x1b, 0x16, 0x28, + 0xcf, 0x2e, 0xf0, 0xeb, 0x65, 0xf1, 0x07, 0x3c, 0x8b, 0xcf, 0xa7, 0xac, 0xf6, 0x3e, 0x4d, 0xe3, + 0x73, 0x1e, 0xfa, 0x3b, 0x01, 0x9e, 0xe7, 0x60, 0x63, 0x10, 0x4f, 0xe4, 0x68, 0x9b, 0x28, 0xc5, + 0xdb, 0xc4, 0xc9, 0x3c, 0x97, 0x67, 0xe8, 0x46, 0xbf, 0xe9, 0x17, 0x4d, 0xfb, 0x0c, 0x96, 0xe3, + 0xe6, 0x7c, 0xcd, 0x87, 0x67, 0xfd, 0x27, 0x80, 0x26, 0x93, 0x17, 0x5d, 0x87, 0xd5, 0x7a, 0x63, + 0xa7, 0xd9, 0xde, 0xaf, 0xe9, 0xbb, 0xb5, 0x76, 0xf3, 0xa0, 0xa6, 0x6f, 0xb5, 0xea, 0xcd, 0xc6, + 0xe4, 0x54, 0x52, 0xdf, 0x6d, 0x34, 0xf5, 0x5a, 0xbb, 0x51, 0xfb, 0x91, 0x22, 0xa1, 0x0b, 0x50, + 0xd8, 0x6f, 0x3e, 0xac, 0xb5, 0x5b, 0xcd, 0x76, 0x75, 0x6b, 0x6f, 0xef, 0x50, 0x91, 0x2b, 0x7f, + 0x94, 0x60, 0xc5, 0x57, 0xd9, 0xb4, 0x7a, 0x41, 0xeb, 0xe2, 0x9c, 0x98, 0x1d, 0x8c, 0xbe, 0x94, + 0xa0, 0x18, 0x35, 0x07, 0x09, 0xbf, 0x63, 0x08, 0x23, 0x58, 0x5a, 0x9f, 0x85, 0xd5, 0xf7, 0x8e, + 0x76, 0xe5, 0x8b, 0x7f, 0xfe, 0xfb, 0x2b, 0x59, 0xd5, 0xde, 0x0a, 0x7f, 0xb6, 0xdf, 0x74, 0x29, + 0xf3, 0xa6, 0xb4, 0x7e, 0x5b, 0xaa, 0xbc, 0x46, 0xa0, 0x44, 0xd5, 0x7b, 0x78, 0x07, 0xbd, 0x80, + 0x62, 0x74, 0x8e, 0x12, 0xeb, 0x27, 0x1c, 0x33, 0x4b, 0x97, 0x39, 0x6b, 0xe8, 0xeb, 0xfe, 0x46, + 0xe0, 0xe1, 0x29, 0x2a, 0x99, 0xf4, 0xaa, 0x4d, 0x69, 0x1d, 0xfd, 0x52, 0x02, 0x25, 0x3e, 0x88, + 0x21, 0xe1, 0x17, 0xf5, 0x29, 0xe3, 0x5a, 0xe9, 0x8c, 0xee, 0x51, 0xbb, 0x4e, 0x35, 0xb8, 0xa2, + 0x2d, 0x86, 0x35, 0xc0, 0x9e, 0xbb, 0x19, 0xee, 0x44, 0xd1, 0xef, 0x24, 0x50, 0xe2, 0xf3, 0x90, + 0x58, 0x8f, 0x29, 0x53, 0xd3, 0x59, 0x8e, 0xa8, 0x50, 0x35, 0x6e, 0x69, 0xef, 0xc6, 0xd4, 0x28, + 0xbf, 0x8c, 0x66, 0xe0, 0xab, 0x4d, 0xfc, 0x9c, 0x3b, 0xe7, 0xe7, 0x12, 0x14, 0x22, 0x13, 0x0f, + 0x5a, 0x13, 0x69, 0x24, 0x1a, 0x8a, 0xce, 0x74, 0xcb, 0x1a, 0xd5, 0x47, 0x43, 0xab, 0x67, 0xe9, + 0x83, 0xbe, 0x92, 0xe0, 0xc2, 0xc4, 0xfc, 0x82, 0x6e, 0x09, 0x71, 0x39, 0x65, 0xc0, 0x2a, 0x7d, + 0x77, 0x46, 0x6e, 0x06, 0xe4, 0x6b, 0x54, 0xb9, 0x8b, 0xda, 0x72, 0x5c, 0x39, 0x97, 0x1e, 0x21, + 0xbe, 0xf9, 0x99, 0x04, 0x4a, 0x7c, 0x9a, 0x11, 0x07, 0x6c, 0xca, 0xcc, 0x53, 0x5a, 0x9e, 0x28, + 0xe5, 0xb5, 0xc1, 0xd0, 0x3b, 0xe5, 0x9e, 0x59, 0x3f, 0xdb, 0x33, 0xbf, 0x97, 0x40, 0x89, 0xcf, + 0x43, 0x62, 0x1d, 0xa6, 0x4c, 0x4d, 0x67, 0x46, 0xe9, 0x2e, 0xd5, 0xa5, 0x5c, 0x39, 0x53, 0x97, + 0x28, 0x9a, 0x7f, 0x4d, 0x4a, 0x4e, 0xa4, 0x75, 0x9f, 0x52, 0x72, 0x44, 0xc3, 0xd0, 0x94, 0x92, + 0x23, 0x9c, 0x04, 0xc4, 0xf9, 0x1d, 0x0a, 0xd3, 0x08, 0x0a, 0x91, 0xc4, 0x15, 0x23, 0x58, 0xd4, + 0xeb, 0x97, 0x92, 0x2a, 0xbb, 0x76, 0x99, 0xca, 0x5d, 0xd1, 0x16, 0x22, 0x75, 0x25, 0xe8, 0xc0, + 0xbf, 0x90, 0xa0, 0x10, 0xf1, 0xb9, 0x58, 0xae, 0x68, 0x26, 0x48, 0x96, 0xbb, 0x4e, 0xe5, 0x5e, + 0xaf, 0xbc, 0x13, 0xb1, 0xf7, 0xe5, 0xb8, 0xcb, 0x7e, 0x35, 0x56, 0xc2, 0x83, 0x42, 0x04, 0x7b, + 0x62, 0x1d, 0x44, 0xcd, 0xfe, 0x54, 0x6c, 0xb2, 0xc4, 0x58, 0x9f, 0x2e, 0x1e, 0xb9, 0x00, 0xe3, + 0x82, 0x80, 0x6e, 0x24, 0x17, 0x8c, 0x99, 0x6c, 0x66, 0x42, 0x51, 0x82, 0xd0, 0x21, 0x14, 0x22, + 0x4d, 0xba, 0xd8, 0x54, 0x51, 0x1f, 0x3f, 0xd5, 0x54, 0x1e, 0x61, 0x14, 0x89, 0x30, 0x1d, 0x19, + 0x08, 0xb0, 0xc6, 0x10, 0xe7, 0xbd, 0x6d, 0x12, 0xc4, 0x63, 0x0d, 0x79, 0x12, 0xc4, 0xe3, 0xad, + 0x72, 0x14, 0xe2, 0x74, 0x52, 0x8e, 0x56, 0xa2, 0x13, 0x0e, 0x71, 0xfe, 0x37, 0x81, 0x04, 0x88, + 0x47, 0x1b, 0xbb, 0x52, 0x52, 0x23, 0x1a, 0xc8, 0x5d, 0x08, 0xcb, 0xdd, 0x0c, 0x7a, 0x59, 0xf4, + 0x8b, 0x00, 0xe3, 0x89, 0x82, 0x45, 0x3d, 0x73, 0xb2, 0xe0, 0x5b, 0x54, 0xf0, 0xcd, 0x4a, 0x29, + 0x62, 0xf0, 0xcb, 0x50, 0x0f, 0xf8, 0x2a, 0xa4, 0xc6, 0x88, 0xa3, 0x3c, 0x51, 0x0b, 0x51, 0x33, + 0x3c, 0x35, 0xf4, 0x1a, 0x55, 0xe0, 0xd2, 0x7a, 0x82, 0x02, 0xc8, 0xa3, 0x30, 0xe7, 0x32, 0xa7, + 0xc1, 0xfc, 0x3c, 0x66, 0x33, 0xa9, 0x28, 0x41, 0xea, 0xf6, 0x63, 0x58, 0xee, 0xd8, 0x03, 0xc1, + 0x2d, 0xdb, 0x05, 0x8e, 0xeb, 0x03, 0x62, 0xcb, 0x81, 0xf4, 0xf9, 0x26, 0x67, 0xb2, 0xfb, 0x86, + 0xd5, 0xdb, 0xb0, 0x9d, 0x5e, 0xb9, 0x87, 0x2d, 0x6a, 0x69, 0xd9, 0xdf, 0x32, 0x86, 0xa6, 0x1b, + 0xfe, 0x4f, 0x8b, 0x8f, 0xf8, 0xef, 0xff, 0x48, 0xd2, 0xd1, 0x1c, 0xe5, 0x7c, 0xff, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x7e, 0x5e, 0x37, 0xc0, 0x92, 0x21, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/genomics/v1alpha2/pipelines.pb.go b/vendor/google.golang.org/genproto/googleapis/genomics/v1alpha2/pipelines.pb.go new file mode 100644 index 000000000..5e79261b6 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/genomics/v1alpha2/pipelines.pb.go @@ -0,0 +1,1838 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/genomics/v1alpha2/pipelines.proto + +/* +Package genomics is a generated protocol buffer package. + +It is generated from these files: + google/genomics/v1alpha2/pipelines.proto + +It has these top-level messages: + ComputeEngine + RuntimeMetadata + Pipeline + CreatePipelineRequest + RunPipelineArgs + RunPipelineRequest + GetPipelineRequest + ListPipelinesRequest + ListPipelinesResponse + DeletePipelineRequest + GetControllerConfigRequest + ControllerConfig + TimestampEvent + SetOperationStatusRequest + ServiceAccount + LoggingOptions + PipelineResources + PipelineParameter + DockerExecutor +*/ +package genomics + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc1 "google.golang.org/genproto/googleapis/rpc/code" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The types of disks that may be attached to VMs. +type PipelineResources_Disk_Type int32 + +const ( + // Default disk type. Use one of the other options below. + PipelineResources_Disk_TYPE_UNSPECIFIED PipelineResources_Disk_Type = 0 + // Specifies a Google Compute Engine persistent hard disk. See + // https://cloud.google.com/compute/docs/disks/#pdspecs for details. + PipelineResources_Disk_PERSISTENT_HDD PipelineResources_Disk_Type = 1 + // Specifies a Google Compute Engine persistent solid-state disk. See + // https://cloud.google.com/compute/docs/disks/#pdspecs for details. + PipelineResources_Disk_PERSISTENT_SSD PipelineResources_Disk_Type = 2 + // Specifies a Google Compute Engine local SSD. + // See https://cloud.google.com/compute/docs/disks/local-ssd for details. + PipelineResources_Disk_LOCAL_SSD PipelineResources_Disk_Type = 3 +) + +var PipelineResources_Disk_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "PERSISTENT_HDD", + 2: "PERSISTENT_SSD", + 3: "LOCAL_SSD", +} +var PipelineResources_Disk_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "PERSISTENT_HDD": 1, + "PERSISTENT_SSD": 2, + "LOCAL_SSD": 3, +} + +func (x PipelineResources_Disk_Type) String() string { + return proto.EnumName(PipelineResources_Disk_Type_name, int32(x)) +} +func (PipelineResources_Disk_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0, 0} +} + +// Describes a Compute Engine resource that is being managed by a running +// [pipeline][google.genomics.v1alpha2.Pipeline]. +type ComputeEngine struct { + // The instance on which the operation is running. + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName" json:"instance_name,omitempty"` + // The availability zone in which the instance resides. + Zone string `protobuf:"bytes,2,opt,name=zone" json:"zone,omitempty"` + // The machine type of the instance. + MachineType string `protobuf:"bytes,3,opt,name=machine_type,json=machineType" json:"machine_type,omitempty"` + // The names of the disks that were created for this pipeline. + DiskNames []string `protobuf:"bytes,4,rep,name=disk_names,json=diskNames" json:"disk_names,omitempty"` +} + +func (m *ComputeEngine) Reset() { *m = ComputeEngine{} } +func (m *ComputeEngine) String() string { return proto.CompactTextString(m) } +func (*ComputeEngine) ProtoMessage() {} +func (*ComputeEngine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ComputeEngine) GetInstanceName() string { + if m != nil { + return m.InstanceName + } + return "" +} + +func (m *ComputeEngine) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *ComputeEngine) GetMachineType() string { + if m != nil { + return m.MachineType + } + return "" +} + +func (m *ComputeEngine) GetDiskNames() []string { + if m != nil { + return m.DiskNames + } + return nil +} + +// Runtime metadata that will be populated in the +// [runtimeMetadata][google.genomics.v1.OperationMetadata.runtime_metadata] +// field of the Operation associated with a RunPipeline execution. +type RuntimeMetadata struct { + // Execution information specific to Google Compute Engine. + ComputeEngine *ComputeEngine `protobuf:"bytes,1,opt,name=compute_engine,json=computeEngine" json:"compute_engine,omitempty"` +} + +func (m *RuntimeMetadata) Reset() { *m = RuntimeMetadata{} } +func (m *RuntimeMetadata) String() string { return proto.CompactTextString(m) } +func (*RuntimeMetadata) ProtoMessage() {} +func (*RuntimeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *RuntimeMetadata) GetComputeEngine() *ComputeEngine { + if m != nil { + return m.ComputeEngine + } + return nil +} + +// The pipeline object. Represents a transformation from a set of input +// parameters to a set of output parameters. The transformation is defined +// as a docker image and command to run within that image. Each pipeline +// is run on a Google Compute Engine VM. A pipeline can be created with the +// `create` method and then later run with the `run` method, or a pipeline can +// be defined and run all at once with the `run` method. +type Pipeline struct { + // Required. The project in which to create the pipeline. The caller must have + // WRITE access. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. A user specified pipeline name that does not have to be unique. + // This name can be used for filtering Pipelines in ListPipelines. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // User-specified description. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // Input parameters of the pipeline. + InputParameters []*PipelineParameter `protobuf:"bytes,8,rep,name=input_parameters,json=inputParameters" json:"input_parameters,omitempty"` + // Output parameters of the pipeline. + OutputParameters []*PipelineParameter `protobuf:"bytes,9,rep,name=output_parameters,json=outputParameters" json:"output_parameters,omitempty"` + // Required. The executor indicates in which environment the pipeline runs. + // + // Types that are valid to be assigned to Executor: + // *Pipeline_Docker + Executor isPipeline_Executor `protobuf_oneof:"executor"` + // Required. Specifies resource requirements for the pipeline run. + // Required fields: + // + // * + // [minimumCpuCores][google.genomics.v1alpha2.PipelineResources.minimum_cpu_cores] + // + // * + // [minimumRamGb][google.genomics.v1alpha2.PipelineResources.minimum_ram_gb] + Resources *PipelineResources `protobuf:"bytes,6,opt,name=resources" json:"resources,omitempty"` + // Unique pipeline id that is generated by the service when CreatePipeline + // is called. Cannot be specified in the Pipeline used in the + // CreatePipelineRequest, and will be populated in the response to + // CreatePipeline and all subsequent Get and List calls. Indicates that the + // service has registered this pipeline. + PipelineId string `protobuf:"bytes,7,opt,name=pipeline_id,json=pipelineId" json:"pipeline_id,omitempty"` +} + +func (m *Pipeline) Reset() { *m = Pipeline{} } +func (m *Pipeline) String() string { return proto.CompactTextString(m) } +func (*Pipeline) ProtoMessage() {} +func (*Pipeline) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isPipeline_Executor interface { + isPipeline_Executor() +} + +type Pipeline_Docker struct { + Docker *DockerExecutor `protobuf:"bytes,5,opt,name=docker,oneof"` +} + +func (*Pipeline_Docker) isPipeline_Executor() {} + +func (m *Pipeline) GetExecutor() isPipeline_Executor { + if m != nil { + return m.Executor + } + return nil +} + +func (m *Pipeline) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Pipeline) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Pipeline) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Pipeline) GetInputParameters() []*PipelineParameter { + if m != nil { + return m.InputParameters + } + return nil +} + +func (m *Pipeline) GetOutputParameters() []*PipelineParameter { + if m != nil { + return m.OutputParameters + } + return nil +} + +func (m *Pipeline) GetDocker() *DockerExecutor { + if x, ok := m.GetExecutor().(*Pipeline_Docker); ok { + return x.Docker + } + return nil +} + +func (m *Pipeline) GetResources() *PipelineResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Pipeline) GetPipelineId() string { + if m != nil { + return m.PipelineId + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Pipeline) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Pipeline_OneofMarshaler, _Pipeline_OneofUnmarshaler, _Pipeline_OneofSizer, []interface{}{ + (*Pipeline_Docker)(nil), + } +} + +func _Pipeline_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Pipeline) + // executor + switch x := m.Executor.(type) { + case *Pipeline_Docker: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Docker); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Pipeline.Executor has unexpected type %T", x) + } + return nil +} + +func _Pipeline_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Pipeline) + switch tag { + case 5: // executor.docker + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DockerExecutor) + err := b.DecodeMessage(msg) + m.Executor = &Pipeline_Docker{msg} + return true, err + default: + return false, nil + } +} + +func _Pipeline_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Pipeline) + // executor + switch x := m.Executor.(type) { + case *Pipeline_Docker: + s := proto.Size(x.Docker) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The request to create a pipeline. The pipeline field here should not have +// `pipelineId` populated, as that will be populated by the server. +type CreatePipelineRequest struct { + // The pipeline to create. Should not have `pipelineId` populated. + Pipeline *Pipeline `protobuf:"bytes,1,opt,name=pipeline" json:"pipeline,omitempty"` +} + +func (m *CreatePipelineRequest) Reset() { *m = CreatePipelineRequest{} } +func (m *CreatePipelineRequest) String() string { return proto.CompactTextString(m) } +func (*CreatePipelineRequest) ProtoMessage() {} +func (*CreatePipelineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *CreatePipelineRequest) GetPipeline() *Pipeline { + if m != nil { + return m.Pipeline + } + return nil +} + +// The pipeline run arguments. +type RunPipelineArgs struct { + // Required. The project in which to run the pipeline. The caller must have + // WRITER access to all Google Cloud services and resources (e.g. Google + // Compute Engine) will be used. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Pipeline input arguments; keys are defined in the pipeline documentation. + // All input parameters that do not have default values must be specified. + // If parameters with defaults are specified here, the defaults will be + // overridden. + Inputs map[string]string `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Pipeline output arguments; keys are defined in the pipeline + // documentation. All output parameters of without default values + // must be specified. If parameters with defaults are specified + // here, the defaults will be overridden. + Outputs map[string]string `protobuf:"bytes,3,rep,name=outputs" json:"outputs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The Google Cloud Service Account that will be used to access data and + // services. By default, the compute service account associated with + // `projectId` is used. + ServiceAccount *ServiceAccount `protobuf:"bytes,4,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // This field is deprecated. Use `labels` instead. Client-specified pipeline + // operation identifier. + ClientId string `protobuf:"bytes,5,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + // Specifies resource requirements/overrides for the pipeline run. + Resources *PipelineResources `protobuf:"bytes,6,opt,name=resources" json:"resources,omitempty"` + // Required. Logging options. Used by the service to communicate results + // to the user. + Logging *LoggingOptions `protobuf:"bytes,7,opt,name=logging" json:"logging,omitempty"` + // How long to keep the VM up after a failure (for example docker command + // failed, copying input or output files failed, etc). While the VM is up, one + // can ssh into the VM to debug. Default is 0; maximum allowed value is 1 day. + KeepVmAliveOnFailureDuration *google_protobuf3.Duration `protobuf:"bytes,8,opt,name=keep_vm_alive_on_failure_duration,json=keepVmAliveOnFailureDuration" json:"keep_vm_alive_on_failure_duration,omitempty"` + // Labels to apply to this pipeline run. Labels will also be applied to + // compute resources (VM, disks) created by this pipeline run. When listing + // operations, operations can [filtered by labels] + // [google.longrunning.ListOperationsRequest.filter]. + // Label keys may not be empty; label values may be empty. Non-empty labels + // must be 1-63 characters long, and comply with [RFC1035] + // (https://www.ietf.org/rfc/rfc1035.txt). + // Specifically, the name must be 1-63 characters long and match the regular + // expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters must be + // a dash, lowercase letter, or digit, except the last character, which cannot + // be a dash. + Labels map[string]string `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *RunPipelineArgs) Reset() { *m = RunPipelineArgs{} } +func (m *RunPipelineArgs) String() string { return proto.CompactTextString(m) } +func (*RunPipelineArgs) ProtoMessage() {} +func (*RunPipelineArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RunPipelineArgs) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *RunPipelineArgs) GetInputs() map[string]string { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *RunPipelineArgs) GetOutputs() map[string]string { + if m != nil { + return m.Outputs + } + return nil +} + +func (m *RunPipelineArgs) GetServiceAccount() *ServiceAccount { + if m != nil { + return m.ServiceAccount + } + return nil +} + +func (m *RunPipelineArgs) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *RunPipelineArgs) GetResources() *PipelineResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *RunPipelineArgs) GetLogging() *LoggingOptions { + if m != nil { + return m.Logging + } + return nil +} + +func (m *RunPipelineArgs) GetKeepVmAliveOnFailureDuration() *google_protobuf3.Duration { + if m != nil { + return m.KeepVmAliveOnFailureDuration + } + return nil +} + +func (m *RunPipelineArgs) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// The request to run a pipeline. If `pipelineId` is specified, it +// refers to a saved pipeline created with CreatePipeline and set as +// the `pipelineId` of the returned Pipeline object. If +// `ephemeralPipeline` is specified, that pipeline is run once +// with the given args and not saved. It is an error to specify both +// `pipelineId` and `ephemeralPipeline`. `pipelineArgs` +// must be specified. +type RunPipelineRequest struct { + // Types that are valid to be assigned to Pipeline: + // *RunPipelineRequest_PipelineId + // *RunPipelineRequest_EphemeralPipeline + Pipeline isRunPipelineRequest_Pipeline `protobuf_oneof:"pipeline"` + // The arguments to use when running this pipeline. + PipelineArgs *RunPipelineArgs `protobuf:"bytes,3,opt,name=pipeline_args,json=pipelineArgs" json:"pipeline_args,omitempty"` +} + +func (m *RunPipelineRequest) Reset() { *m = RunPipelineRequest{} } +func (m *RunPipelineRequest) String() string { return proto.CompactTextString(m) } +func (*RunPipelineRequest) ProtoMessage() {} +func (*RunPipelineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isRunPipelineRequest_Pipeline interface { + isRunPipelineRequest_Pipeline() +} + +type RunPipelineRequest_PipelineId struct { + PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,json=pipelineId,oneof"` +} +type RunPipelineRequest_EphemeralPipeline struct { + EphemeralPipeline *Pipeline `protobuf:"bytes,2,opt,name=ephemeral_pipeline,json=ephemeralPipeline,oneof"` +} + +func (*RunPipelineRequest_PipelineId) isRunPipelineRequest_Pipeline() {} +func (*RunPipelineRequest_EphemeralPipeline) isRunPipelineRequest_Pipeline() {} + +func (m *RunPipelineRequest) GetPipeline() isRunPipelineRequest_Pipeline { + if m != nil { + return m.Pipeline + } + return nil +} + +func (m *RunPipelineRequest) GetPipelineId() string { + if x, ok := m.GetPipeline().(*RunPipelineRequest_PipelineId); ok { + return x.PipelineId + } + return "" +} + +func (m *RunPipelineRequest) GetEphemeralPipeline() *Pipeline { + if x, ok := m.GetPipeline().(*RunPipelineRequest_EphemeralPipeline); ok { + return x.EphemeralPipeline + } + return nil +} + +func (m *RunPipelineRequest) GetPipelineArgs() *RunPipelineArgs { + if m != nil { + return m.PipelineArgs + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RunPipelineRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RunPipelineRequest_OneofMarshaler, _RunPipelineRequest_OneofUnmarshaler, _RunPipelineRequest_OneofSizer, []interface{}{ + (*RunPipelineRequest_PipelineId)(nil), + (*RunPipelineRequest_EphemeralPipeline)(nil), + } +} + +func _RunPipelineRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RunPipelineRequest) + // pipeline + switch x := m.Pipeline.(type) { + case *RunPipelineRequest_PipelineId: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.PipelineId) + case *RunPipelineRequest_EphemeralPipeline: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EphemeralPipeline); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RunPipelineRequest.Pipeline has unexpected type %T", x) + } + return nil +} + +func _RunPipelineRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RunPipelineRequest) + switch tag { + case 1: // pipeline.pipeline_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pipeline = &RunPipelineRequest_PipelineId{x} + return true, err + case 2: // pipeline.ephemeral_pipeline + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Pipeline) + err := b.DecodeMessage(msg) + m.Pipeline = &RunPipelineRequest_EphemeralPipeline{msg} + return true, err + default: + return false, nil + } +} + +func _RunPipelineRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RunPipelineRequest) + // pipeline + switch x := m.Pipeline.(type) { + case *RunPipelineRequest_PipelineId: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.PipelineId))) + n += len(x.PipelineId) + case *RunPipelineRequest_EphemeralPipeline: + s := proto.Size(x.EphemeralPipeline) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A request to get a saved pipeline by id. +type GetPipelineRequest struct { + // Caller must have READ access to the project in which this pipeline + // is defined. + PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,json=pipelineId" json:"pipeline_id,omitempty"` +} + +func (m *GetPipelineRequest) Reset() { *m = GetPipelineRequest{} } +func (m *GetPipelineRequest) String() string { return proto.CompactTextString(m) } +func (*GetPipelineRequest) ProtoMessage() {} +func (*GetPipelineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GetPipelineRequest) GetPipelineId() string { + if m != nil { + return m.PipelineId + } + return "" +} + +// A request to list pipelines in a given project. Pipelines can be +// filtered by name using `namePrefix`: all pipelines with names that +// begin with `namePrefix` will be returned. Uses standard pagination: +// `pageSize` indicates how many pipelines to return, and +// `pageToken` comes from a previous ListPipelinesResponse to +// indicate offset. +type ListPipelinesRequest struct { + // Required. The name of the project to search for pipelines. Caller + // must have READ access to this project. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Pipelines with names that match this prefix should be + // returned. If unspecified, all pipelines in the project, up to + // `pageSize`, will be returned. + NamePrefix string `protobuf:"bytes,2,opt,name=name_prefix,json=namePrefix" json:"name_prefix,omitempty"` + // Number of pipelines to return at once. Defaults to 256, and max + // is 2048. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Token to use to indicate where to start getting results. + // If unspecified, returns the first page of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListPipelinesRequest) Reset() { *m = ListPipelinesRequest{} } +func (m *ListPipelinesRequest) String() string { return proto.CompactTextString(m) } +func (*ListPipelinesRequest) ProtoMessage() {} +func (*ListPipelinesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListPipelinesRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListPipelinesRequest) GetNamePrefix() string { + if m != nil { + return m.NamePrefix + } + return "" +} + +func (m *ListPipelinesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListPipelinesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response of ListPipelines. Contains at most `pageSize` +// pipelines. If it contains `pageSize` pipelines, and more pipelines +// exist, then `nextPageToken` will be populated and should be +// used as the `pageToken` argument to a subsequent ListPipelines +// request. +type ListPipelinesResponse struct { + // The matched pipelines. + Pipelines []*Pipeline `protobuf:"bytes,1,rep,name=pipelines" json:"pipelines,omitempty"` + // The token to use to get the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListPipelinesResponse) Reset() { *m = ListPipelinesResponse{} } +func (m *ListPipelinesResponse) String() string { return proto.CompactTextString(m) } +func (*ListPipelinesResponse) ProtoMessage() {} +func (*ListPipelinesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListPipelinesResponse) GetPipelines() []*Pipeline { + if m != nil { + return m.Pipelines + } + return nil +} + +func (m *ListPipelinesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request to delete a saved pipeline by ID. +type DeletePipelineRequest struct { + // Caller must have WRITE access to the project in which this pipeline + // is defined. + PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,json=pipelineId" json:"pipeline_id,omitempty"` +} + +func (m *DeletePipelineRequest) Reset() { *m = DeletePipelineRequest{} } +func (m *DeletePipelineRequest) String() string { return proto.CompactTextString(m) } +func (*DeletePipelineRequest) ProtoMessage() {} +func (*DeletePipelineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeletePipelineRequest) GetPipelineId() string { + if m != nil { + return m.PipelineId + } + return "" +} + +// Request to get controller configuation. Should only be used +// by VMs created by the Pipelines Service and not by end users. +type GetControllerConfigRequest struct { + // The operation to retrieve controller configuration for. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + ValidationToken uint64 `protobuf:"varint,2,opt,name=validation_token,json=validationToken" json:"validation_token,omitempty"` +} + +func (m *GetControllerConfigRequest) Reset() { *m = GetControllerConfigRequest{} } +func (m *GetControllerConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetControllerConfigRequest) ProtoMessage() {} +func (*GetControllerConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *GetControllerConfigRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *GetControllerConfigRequest) GetValidationToken() uint64 { + if m != nil { + return m.ValidationToken + } + return 0 +} + +// Stores the information that the controller will fetch from the +// server in order to run. Should only be used by VMs created by the +// Pipelines Service and not by end users. +type ControllerConfig struct { + Image string `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` + Cmd string `protobuf:"bytes,2,opt,name=cmd" json:"cmd,omitempty"` + GcsLogPath string `protobuf:"bytes,3,opt,name=gcs_log_path,json=gcsLogPath" json:"gcs_log_path,omitempty"` + MachineType string `protobuf:"bytes,4,opt,name=machine_type,json=machineType" json:"machine_type,omitempty"` + Vars map[string]string `protobuf:"bytes,5,rep,name=vars" json:"vars,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Disks map[string]string `protobuf:"bytes,6,rep,name=disks" json:"disks,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + GcsSources map[string]*ControllerConfig_RepeatedString `protobuf:"bytes,7,rep,name=gcs_sources,json=gcsSources" json:"gcs_sources,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + GcsSinks map[string]*ControllerConfig_RepeatedString `protobuf:"bytes,8,rep,name=gcs_sinks,json=gcsSinks" json:"gcs_sinks,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerConfig) Reset() { *m = ControllerConfig{} } +func (m *ControllerConfig) String() string { return proto.CompactTextString(m) } +func (*ControllerConfig) ProtoMessage() {} +func (*ControllerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ControllerConfig) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *ControllerConfig) GetCmd() string { + if m != nil { + return m.Cmd + } + return "" +} + +func (m *ControllerConfig) GetGcsLogPath() string { + if m != nil { + return m.GcsLogPath + } + return "" +} + +func (m *ControllerConfig) GetMachineType() string { + if m != nil { + return m.MachineType + } + return "" +} + +func (m *ControllerConfig) GetVars() map[string]string { + if m != nil { + return m.Vars + } + return nil +} + +func (m *ControllerConfig) GetDisks() map[string]string { + if m != nil { + return m.Disks + } + return nil +} + +func (m *ControllerConfig) GetGcsSources() map[string]*ControllerConfig_RepeatedString { + if m != nil { + return m.GcsSources + } + return nil +} + +func (m *ControllerConfig) GetGcsSinks() map[string]*ControllerConfig_RepeatedString { + if m != nil { + return m.GcsSinks + } + return nil +} + +type ControllerConfig_RepeatedString struct { + Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ControllerConfig_RepeatedString) Reset() { *m = ControllerConfig_RepeatedString{} } +func (m *ControllerConfig_RepeatedString) String() string { return proto.CompactTextString(m) } +func (*ControllerConfig_RepeatedString) ProtoMessage() {} +func (*ControllerConfig_RepeatedString) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{11, 0} +} + +func (m *ControllerConfig_RepeatedString) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +// Stores the list of events and times they occured for major events in job +// execution. +type TimestampEvent struct { + // String indicating the type of event + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The time this event occured. + Timestamp *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *TimestampEvent) Reset() { *m = TimestampEvent{} } +func (m *TimestampEvent) String() string { return proto.CompactTextString(m) } +func (*TimestampEvent) ProtoMessage() {} +func (*TimestampEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *TimestampEvent) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *TimestampEvent) GetTimestamp() *google_protobuf4.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +// Request to set operation status. Should only be used by VMs +// created by the Pipelines Service and not by end users. +type SetOperationStatusRequest struct { + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + TimestampEvents []*TimestampEvent `protobuf:"bytes,2,rep,name=timestamp_events,json=timestampEvents" json:"timestamp_events,omitempty"` + ErrorCode google_rpc1.Code `protobuf:"varint,3,opt,name=error_code,json=errorCode,enum=google.rpc.Code" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` + ValidationToken uint64 `protobuf:"varint,5,opt,name=validation_token,json=validationToken" json:"validation_token,omitempty"` +} + +func (m *SetOperationStatusRequest) Reset() { *m = SetOperationStatusRequest{} } +func (m *SetOperationStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetOperationStatusRequest) ProtoMessage() {} +func (*SetOperationStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SetOperationStatusRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *SetOperationStatusRequest) GetTimestampEvents() []*TimestampEvent { + if m != nil { + return m.TimestampEvents + } + return nil +} + +func (m *SetOperationStatusRequest) GetErrorCode() google_rpc1.Code { + if m != nil { + return m.ErrorCode + } + return google_rpc1.Code_OK +} + +func (m *SetOperationStatusRequest) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func (m *SetOperationStatusRequest) GetValidationToken() uint64 { + if m != nil { + return m.ValidationToken + } + return 0 +} + +// A Google Cloud Service Account. +type ServiceAccount struct { + // Email address of the service account. Defaults to `default`, + // which uses the compute service account associated with the project. + Email string `protobuf:"bytes,1,opt,name=email" json:"email,omitempty"` + // List of scopes to be enabled for this service account on the VM. + // The following scopes are automatically included: + // + // * https://www.googleapis.com/auth/compute + // * https://www.googleapis.com/auth/devstorage.full_control + // * https://www.googleapis.com/auth/genomics + // * https://www.googleapis.com/auth/logging.write + // * https://www.googleapis.com/auth/monitoring.write + Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"` +} + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ServiceAccount) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *ServiceAccount) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +// The logging options for the pipeline run. +type LoggingOptions struct { + // The location in Google Cloud Storage to which the pipeline logs + // will be copied. Can be specified as a fully qualified directory + // path, in which case logs will be output with a unique identifier + // as the filename in that directory, or as a fully specified path, + // which must end in `.log`, in which case that path will be + // used, and the user must ensure that logs are not + // overwritten. Stdout and stderr logs from the run are also + // generated and output as `-stdout.log` and `-stderr.log`. + GcsPath string `protobuf:"bytes,1,opt,name=gcs_path,json=gcsPath" json:"gcs_path,omitempty"` +} + +func (m *LoggingOptions) Reset() { *m = LoggingOptions{} } +func (m *LoggingOptions) String() string { return proto.CompactTextString(m) } +func (*LoggingOptions) ProtoMessage() {} +func (*LoggingOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *LoggingOptions) GetGcsPath() string { + if m != nil { + return m.GcsPath + } + return "" +} + +// The system resources for the pipeline run. +type PipelineResources struct { + // The minimum number of cores to use. Defaults to 1. + MinimumCpuCores int32 `protobuf:"varint,1,opt,name=minimum_cpu_cores,json=minimumCpuCores" json:"minimum_cpu_cores,omitempty"` + // Whether to use preemptible VMs. Defaults to `false`. In order to use this, + // must be true for both create time and run time. Cannot be true at run time + // if false at create time. + Preemptible bool `protobuf:"varint,2,opt,name=preemptible" json:"preemptible,omitempty"` + // The minimum amount of RAM to use. Defaults to 3.75 (GB) + MinimumRamGb float64 `protobuf:"fixed64,3,opt,name=minimum_ram_gb,json=minimumRamGb" json:"minimum_ram_gb,omitempty"` + // Disks to attach. + Disks []*PipelineResources_Disk `protobuf:"bytes,4,rep,name=disks" json:"disks,omitempty"` + // List of Google Compute Engine availability zones to which resource + // creation will restricted. If empty, any zone may be chosen. + Zones []string `protobuf:"bytes,5,rep,name=zones" json:"zones,omitempty"` + // The size of the boot disk. Defaults to 10 (GB). + BootDiskSizeGb int32 `protobuf:"varint,6,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` + // Whether to assign an external IP to the instance. This is an experimental + // feature that may go away. Defaults to false. + // Corresponds to `--no_address` flag for [gcloud compute instances create] + // (https://cloud.google.com/sdk/gcloud/reference/compute/instances/create). + // In order to use this, must be true for both create time and run time. + // Cannot be true at run time if false at create time. If you need to ssh into + // a private IP VM for debugging, you can ssh to a public VM and then ssh into + // the private VM's Internal IP. If noAddress is set, this pipeline run may + // only load docker images from Google Container Registry and not Docker Hub. + // ** Note: To use this option, your project must be in Google Access for + // Private IPs Early Access Program.** + NoAddress bool `protobuf:"varint,7,opt,name=no_address,json=noAddress" json:"no_address,omitempty"` +} + +func (m *PipelineResources) Reset() { *m = PipelineResources{} } +func (m *PipelineResources) String() string { return proto.CompactTextString(m) } +func (*PipelineResources) ProtoMessage() {} +func (*PipelineResources) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *PipelineResources) GetMinimumCpuCores() int32 { + if m != nil { + return m.MinimumCpuCores + } + return 0 +} + +func (m *PipelineResources) GetPreemptible() bool { + if m != nil { + return m.Preemptible + } + return false +} + +func (m *PipelineResources) GetMinimumRamGb() float64 { + if m != nil { + return m.MinimumRamGb + } + return 0 +} + +func (m *PipelineResources) GetDisks() []*PipelineResources_Disk { + if m != nil { + return m.Disks + } + return nil +} + +func (m *PipelineResources) GetZones() []string { + if m != nil { + return m.Zones + } + return nil +} + +func (m *PipelineResources) GetBootDiskSizeGb() int32 { + if m != nil { + return m.BootDiskSizeGb + } + return 0 +} + +func (m *PipelineResources) GetNoAddress() bool { + if m != nil { + return m.NoAddress + } + return false +} + +// A Google Compute Engine disk resource specification. +type PipelineResources_Disk struct { + // Required. The name of the disk that can be used in the pipeline + // parameters. Must be 1 - 63 characters. + // The name "boot" is reserved for system use. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The type of the disk to create. + Type PipelineResources_Disk_Type `protobuf:"varint,2,opt,name=type,enum=google.genomics.v1alpha2.PipelineResources_Disk_Type" json:"type,omitempty"` + // The size of the disk. Defaults to 500 (GB). + // This field is not applicable for local SSD. + SizeGb int32 `protobuf:"varint,3,opt,name=size_gb,json=sizeGb" json:"size_gb,omitempty"` + // The full or partial URL of the persistent disk to attach. See + // https://cloud.google.com/compute/docs/reference/latest/instances#resource + // and + // https://cloud.google.com/compute/docs/disks/persistent-disks#snapshots + // for more details. + Source string `protobuf:"bytes,4,opt,name=source" json:"source,omitempty"` + // Deprecated. Disks created by the Pipelines API will be deleted at the end + // of the pipeline run, regardless of what this field is set to. + AutoDelete bool `protobuf:"varint,6,opt,name=auto_delete,json=autoDelete" json:"auto_delete,omitempty"` + // Required at create time and cannot be overridden at run time. + // Specifies the path in the docker container where files on + // this disk should be located. For example, if `mountPoint` + // is `/mnt/disk`, and the parameter has `localPath` + // `inputs/file.txt`, the docker container can access the data at + // `/mnt/disk/inputs/file.txt`. + MountPoint string `protobuf:"bytes,8,opt,name=mount_point,json=mountPoint" json:"mount_point,omitempty"` +} + +func (m *PipelineResources_Disk) Reset() { *m = PipelineResources_Disk{} } +func (m *PipelineResources_Disk) String() string { return proto.CompactTextString(m) } +func (*PipelineResources_Disk) ProtoMessage() {} +func (*PipelineResources_Disk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16, 0} } + +func (m *PipelineResources_Disk) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PipelineResources_Disk) GetType() PipelineResources_Disk_Type { + if m != nil { + return m.Type + } + return PipelineResources_Disk_TYPE_UNSPECIFIED +} + +func (m *PipelineResources_Disk) GetSizeGb() int32 { + if m != nil { + return m.SizeGb + } + return 0 +} + +func (m *PipelineResources_Disk) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *PipelineResources_Disk) GetAutoDelete() bool { + if m != nil { + return m.AutoDelete + } + return false +} + +func (m *PipelineResources_Disk) GetMountPoint() string { + if m != nil { + return m.MountPoint + } + return "" +} + +// Parameters facilitate setting and delivering data into the +// pipeline's execution environment. They are defined at create time, +// with optional defaults, and can be overridden at run time. +// +// If `localCopy` is unset, then the parameter specifies a string that +// is passed as-is into the pipeline, as the value of the environment +// variable with the given name. A default value can be optionally +// specified at create time. The default can be overridden at run time +// using the inputs map. If no default is given, a value must be +// supplied at runtime. +// +// If `localCopy` is defined, then the parameter specifies a data +// source or sink, both in Google Cloud Storage and on the Docker container +// where the pipeline computation is run. The [service account associated with +// the Pipeline][google.genomics.v1alpha2.RunPipelineArgs.service_account] (by +// default the project's Compute Engine service account) must have access to the +// Google Cloud Storage paths. +// +// At run time, the Google Cloud Storage paths can be overridden if a default +// was provided at create time, or must be set otherwise. The pipeline runner +// should add a key/value pair to either the inputs or outputs map. The +// indicated data copies will be carried out before/after pipeline execution, +// just as if the corresponding arguments were provided to `gsutil cp`. +// +// For example: Given the following `PipelineParameter`, specified +// in the `inputParameters` list: +// +// ``` +// {name: "input_file", localCopy: {path: "file.txt", disk: "pd1"}} +// ``` +// +// where `disk` is defined in the `PipelineResources` object as: +// +// ``` +// {name: "pd1", mountPoint: "/mnt/disk/"} +// ``` +// +// We create a disk named `pd1`, mount it on the host VM, and map +// `/mnt/pd1` to `/mnt/disk` in the docker container. At +// runtime, an entry for `input_file` would be required in the inputs +// map, such as: +// +// ``` +// inputs["input_file"] = "gs://my-bucket/bar.txt" +// ``` +// +// This would generate the following gsutil call: +// +// ``` +// gsutil cp gs://my-bucket/bar.txt /mnt/pd1/file.txt +// ``` +// +// The file `/mnt/pd1/file.txt` maps to `/mnt/disk/file.txt` in the +// Docker container. Acceptable paths are: +// +//
+// +// +// +// +// +// +// +//
Google Cloud storage pathLocal path
filefile
globdirectory
+// +// For outputs, the direction of the copy is reversed: +// +// ``` +// gsutil cp /mnt/disk/file.txt gs://my-bucket/bar.txt +// ``` +// +// Acceptable paths are: +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +//
Local pathGoogle Cloud Storage path
filefile
filedirectory - directory must already exist
globdirectory - directory will be created if it doesn't exist
+// +// One restriction due to docker limitations, is that for outputs that are found +// on the boot disk, the local path cannot be a glob and must be a file. +type PipelineParameter struct { + // Required. Name of the parameter - the pipeline runner uses this string + // as the key to the input and output maps in RunPipeline. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Human-readable description. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // The default value for this parameter. Can be overridden at runtime. + // If `localCopy` is present, then this must be a Google Cloud Storage path + // beginning with `gs://`. + DefaultValue string `protobuf:"bytes,5,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If present, this parameter is marked for copying to and from the VM. + // `LocalCopy` indicates where on the VM the file should be. The value + // given to this parameter (either at runtime or using `defaultValue`) + // must be the remote path where the file should be. + LocalCopy *PipelineParameter_LocalCopy `protobuf:"bytes,6,opt,name=local_copy,json=localCopy" json:"local_copy,omitempty"` +} + +func (m *PipelineParameter) Reset() { *m = PipelineParameter{} } +func (m *PipelineParameter) String() string { return proto.CompactTextString(m) } +func (*PipelineParameter) ProtoMessage() {} +func (*PipelineParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *PipelineParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PipelineParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PipelineParameter) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (m *PipelineParameter) GetLocalCopy() *PipelineParameter_LocalCopy { + if m != nil { + return m.LocalCopy + } + return nil +} + +// LocalCopy defines how a remote file should be copied to and from the VM. +type PipelineParameter_LocalCopy struct { + // Required. The path within the user's docker container where + // this input should be localized to and from, relative to the specified + // disk's mount point. For example: file.txt, + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + // Required. The name of the disk where this parameter is + // located. Can be the name of one of the disks specified in the + // Resources field, or "boot", which represents the Docker + // instance's boot disk and has a mount point of `/`. + Disk string `protobuf:"bytes,2,opt,name=disk" json:"disk,omitempty"` +} + +func (m *PipelineParameter_LocalCopy) Reset() { *m = PipelineParameter_LocalCopy{} } +func (m *PipelineParameter_LocalCopy) String() string { return proto.CompactTextString(m) } +func (*PipelineParameter_LocalCopy) ProtoMessage() {} +func (*PipelineParameter_LocalCopy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17, 0} } + +func (m *PipelineParameter_LocalCopy) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *PipelineParameter_LocalCopy) GetDisk() string { + if m != nil { + return m.Disk + } + return "" +} + +// The Docker execuctor specification. +type DockerExecutor struct { + // Required. Image name from either Docker Hub or Google Container Registry. + // Users that run pipelines must have READ access to the image. + ImageName string `protobuf:"bytes,1,opt,name=image_name,json=imageName" json:"image_name,omitempty"` + // Required. The command or newline delimited script to run. The command + // string will be executed within a bash shell. + // + // If the command exits with a non-zero exit code, output parameter + // de-localization will be skipped and the pipeline operation's + // [`error`][google.longrunning.Operation.error] field will be populated. + // + // Maximum command string length is 16384. + Cmd string `protobuf:"bytes,2,opt,name=cmd" json:"cmd,omitempty"` +} + +func (m *DockerExecutor) Reset() { *m = DockerExecutor{} } +func (m *DockerExecutor) String() string { return proto.CompactTextString(m) } +func (*DockerExecutor) ProtoMessage() {} +func (*DockerExecutor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *DockerExecutor) GetImageName() string { + if m != nil { + return m.ImageName + } + return "" +} + +func (m *DockerExecutor) GetCmd() string { + if m != nil { + return m.Cmd + } + return "" +} + +func init() { + proto.RegisterType((*ComputeEngine)(nil), "google.genomics.v1alpha2.ComputeEngine") + proto.RegisterType((*RuntimeMetadata)(nil), "google.genomics.v1alpha2.RuntimeMetadata") + proto.RegisterType((*Pipeline)(nil), "google.genomics.v1alpha2.Pipeline") + proto.RegisterType((*CreatePipelineRequest)(nil), "google.genomics.v1alpha2.CreatePipelineRequest") + proto.RegisterType((*RunPipelineArgs)(nil), "google.genomics.v1alpha2.RunPipelineArgs") + proto.RegisterType((*RunPipelineRequest)(nil), "google.genomics.v1alpha2.RunPipelineRequest") + proto.RegisterType((*GetPipelineRequest)(nil), "google.genomics.v1alpha2.GetPipelineRequest") + proto.RegisterType((*ListPipelinesRequest)(nil), "google.genomics.v1alpha2.ListPipelinesRequest") + proto.RegisterType((*ListPipelinesResponse)(nil), "google.genomics.v1alpha2.ListPipelinesResponse") + proto.RegisterType((*DeletePipelineRequest)(nil), "google.genomics.v1alpha2.DeletePipelineRequest") + proto.RegisterType((*GetControllerConfigRequest)(nil), "google.genomics.v1alpha2.GetControllerConfigRequest") + proto.RegisterType((*ControllerConfig)(nil), "google.genomics.v1alpha2.ControllerConfig") + proto.RegisterType((*ControllerConfig_RepeatedString)(nil), "google.genomics.v1alpha2.ControllerConfig.RepeatedString") + proto.RegisterType((*TimestampEvent)(nil), "google.genomics.v1alpha2.TimestampEvent") + proto.RegisterType((*SetOperationStatusRequest)(nil), "google.genomics.v1alpha2.SetOperationStatusRequest") + proto.RegisterType((*ServiceAccount)(nil), "google.genomics.v1alpha2.ServiceAccount") + proto.RegisterType((*LoggingOptions)(nil), "google.genomics.v1alpha2.LoggingOptions") + proto.RegisterType((*PipelineResources)(nil), "google.genomics.v1alpha2.PipelineResources") + proto.RegisterType((*PipelineResources_Disk)(nil), "google.genomics.v1alpha2.PipelineResources.Disk") + proto.RegisterType((*PipelineParameter)(nil), "google.genomics.v1alpha2.PipelineParameter") + proto.RegisterType((*PipelineParameter_LocalCopy)(nil), "google.genomics.v1alpha2.PipelineParameter.LocalCopy") + proto.RegisterType((*DockerExecutor)(nil), "google.genomics.v1alpha2.DockerExecutor") + proto.RegisterEnum("google.genomics.v1alpha2.PipelineResources_Disk_Type", PipelineResources_Disk_Type_name, PipelineResources_Disk_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for PipelinesV1Alpha2 service + +type PipelinesV1Alpha2Client interface { + // Creates a pipeline that can be run later. Create takes a Pipeline that + // has all fields other than `pipelineId` populated, and then returns + // the same pipeline with `pipelineId` populated. This id can be used + // to run the pipeline. + // + // Caller must have WRITE permission to the project. + CreatePipeline(ctx context.Context, in *CreatePipelineRequest, opts ...grpc.CallOption) (*Pipeline, error) + // Runs a pipeline. If `pipelineId` is specified in the request, then + // run a saved pipeline. If `ephemeralPipeline` is specified, then run + // that pipeline once without saving a copy. + // + // The caller must have READ permission to the project where the pipeline + // is stored and WRITE permission to the project where the pipeline will be + // run, as VMs will be created and storage will be used. + RunPipeline(ctx context.Context, in *RunPipelineRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Retrieves a pipeline based on ID. + // + // Caller must have READ permission to the project. + GetPipeline(ctx context.Context, in *GetPipelineRequest, opts ...grpc.CallOption) (*Pipeline, error) + // Lists pipelines. + // + // Caller must have READ permission to the project. + ListPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (*ListPipelinesResponse, error) + // Deletes a pipeline based on ID. + // + // Caller must have WRITE permission to the project. + DeletePipeline(ctx context.Context, in *DeletePipelineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Gets controller configuration information. Should only be called + // by VMs created by the Pipelines Service and not by end users. + GetControllerConfig(ctx context.Context, in *GetControllerConfigRequest, opts ...grpc.CallOption) (*ControllerConfig, error) + // Sets status of a given operation. Any new timestamps (as determined by + // description) are appended to TimestampEvents. Should only be called by VMs + // created by the Pipelines Service and not by end users. + SetOperationStatus(ctx context.Context, in *SetOperationStatusRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type pipelinesV1Alpha2Client struct { + cc *grpc.ClientConn +} + +func NewPipelinesV1Alpha2Client(cc *grpc.ClientConn) PipelinesV1Alpha2Client { + return &pipelinesV1Alpha2Client{cc} +} + +func (c *pipelinesV1Alpha2Client) CreatePipeline(ctx context.Context, in *CreatePipelineRequest, opts ...grpc.CallOption) (*Pipeline, error) { + out := new(Pipeline) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/CreatePipeline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) RunPipeline(ctx context.Context, in *RunPipelineRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/RunPipeline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) GetPipeline(ctx context.Context, in *GetPipelineRequest, opts ...grpc.CallOption) (*Pipeline, error) { + out := new(Pipeline) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/GetPipeline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) ListPipelines(ctx context.Context, in *ListPipelinesRequest, opts ...grpc.CallOption) (*ListPipelinesResponse, error) { + out := new(ListPipelinesResponse) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/ListPipelines", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) DeletePipeline(ctx context.Context, in *DeletePipelineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/DeletePipeline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) GetControllerConfig(ctx context.Context, in *GetControllerConfigRequest, opts ...grpc.CallOption) (*ControllerConfig, error) { + out := new(ControllerConfig) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/GetControllerConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pipelinesV1Alpha2Client) SetOperationStatus(ctx context.Context, in *SetOperationStatusRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.genomics.v1alpha2.PipelinesV1Alpha2/SetOperationStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for PipelinesV1Alpha2 service + +type PipelinesV1Alpha2Server interface { + // Creates a pipeline that can be run later. Create takes a Pipeline that + // has all fields other than `pipelineId` populated, and then returns + // the same pipeline with `pipelineId` populated. This id can be used + // to run the pipeline. + // + // Caller must have WRITE permission to the project. + CreatePipeline(context.Context, *CreatePipelineRequest) (*Pipeline, error) + // Runs a pipeline. If `pipelineId` is specified in the request, then + // run a saved pipeline. If `ephemeralPipeline` is specified, then run + // that pipeline once without saving a copy. + // + // The caller must have READ permission to the project where the pipeline + // is stored and WRITE permission to the project where the pipeline will be + // run, as VMs will be created and storage will be used. + RunPipeline(context.Context, *RunPipelineRequest) (*google_longrunning.Operation, error) + // Retrieves a pipeline based on ID. + // + // Caller must have READ permission to the project. + GetPipeline(context.Context, *GetPipelineRequest) (*Pipeline, error) + // Lists pipelines. + // + // Caller must have READ permission to the project. + ListPipelines(context.Context, *ListPipelinesRequest) (*ListPipelinesResponse, error) + // Deletes a pipeline based on ID. + // + // Caller must have WRITE permission to the project. + DeletePipeline(context.Context, *DeletePipelineRequest) (*google_protobuf2.Empty, error) + // Gets controller configuration information. Should only be called + // by VMs created by the Pipelines Service and not by end users. + GetControllerConfig(context.Context, *GetControllerConfigRequest) (*ControllerConfig, error) + // Sets status of a given operation. Any new timestamps (as determined by + // description) are appended to TimestampEvents. Should only be called by VMs + // created by the Pipelines Service and not by end users. + SetOperationStatus(context.Context, *SetOperationStatusRequest) (*google_protobuf2.Empty, error) +} + +func RegisterPipelinesV1Alpha2Server(s *grpc.Server, srv PipelinesV1Alpha2Server) { + s.RegisterService(&_PipelinesV1Alpha2_serviceDesc, srv) +} + +func _PipelinesV1Alpha2_CreatePipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreatePipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).CreatePipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/CreatePipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).CreatePipeline(ctx, req.(*CreatePipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_RunPipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunPipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).RunPipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/RunPipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).RunPipeline(ctx, req.(*RunPipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_GetPipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).GetPipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/GetPipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).GetPipeline(ctx, req.(*GetPipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_ListPipelines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPipelinesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).ListPipelines(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/ListPipelines", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).ListPipelines(ctx, req.(*ListPipelinesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_DeletePipeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeletePipelineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).DeletePipeline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/DeletePipeline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).DeletePipeline(ctx, req.(*DeletePipelineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_GetControllerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetControllerConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).GetControllerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/GetControllerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).GetControllerConfig(ctx, req.(*GetControllerConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PipelinesV1Alpha2_SetOperationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetOperationStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PipelinesV1Alpha2Server).SetOperationStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.genomics.v1alpha2.PipelinesV1Alpha2/SetOperationStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PipelinesV1Alpha2Server).SetOperationStatus(ctx, req.(*SetOperationStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PipelinesV1Alpha2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.genomics.v1alpha2.PipelinesV1Alpha2", + HandlerType: (*PipelinesV1Alpha2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreatePipeline", + Handler: _PipelinesV1Alpha2_CreatePipeline_Handler, + }, + { + MethodName: "RunPipeline", + Handler: _PipelinesV1Alpha2_RunPipeline_Handler, + }, + { + MethodName: "GetPipeline", + Handler: _PipelinesV1Alpha2_GetPipeline_Handler, + }, + { + MethodName: "ListPipelines", + Handler: _PipelinesV1Alpha2_ListPipelines_Handler, + }, + { + MethodName: "DeletePipeline", + Handler: _PipelinesV1Alpha2_DeletePipeline_Handler, + }, + { + MethodName: "GetControllerConfig", + Handler: _PipelinesV1Alpha2_GetControllerConfig_Handler, + }, + { + MethodName: "SetOperationStatus", + Handler: _PipelinesV1Alpha2_SetOperationStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/genomics/v1alpha2/pipelines.proto", +} + +func init() { proto.RegisterFile("google/genomics/v1alpha2/pipelines.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2065 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x73, 0xdb, 0xc8, + 0xd1, 0x36, 0x28, 0x4a, 0x22, 0x9a, 0x12, 0x45, 0xcf, 0xda, 0x6b, 0x9a, 0xf6, 0xbe, 0xb6, 0xe1, + 0x37, 0xbb, 0xb2, 0x9c, 0x22, 0x63, 0x79, 0x9d, 0xc8, 0x4a, 0xd5, 0xd6, 0x4a, 0x14, 0x2d, 0xb1, + 0x22, 0x4b, 0x0c, 0xa8, 0x55, 0xbe, 0x0e, 0xa8, 0x11, 0x38, 0x82, 0xb0, 0x02, 0x30, 0x08, 0x06, + 0x50, 0x59, 0x4e, 0x25, 0x55, 0x49, 0xe5, 0x90, 0xda, 0x4a, 0x2e, 0xc9, 0xfe, 0x88, 0x5c, 0x72, + 0xcc, 0xcf, 0xc8, 0x29, 0xa7, 0x9c, 0x72, 0xc9, 0x21, 0x3f, 0x21, 0xb9, 0xa5, 0x66, 0x06, 0x03, + 0x82, 0x1f, 0x92, 0xc8, 0xaa, 0x54, 0x6e, 0x33, 0x3d, 0xdd, 0x0f, 0x9e, 0xe9, 0xe9, 0xe9, 0xe9, + 0x06, 0xac, 0x3a, 0x94, 0x3a, 0x1e, 0x69, 0x3a, 0x24, 0xa0, 0xbe, 0x6b, 0xb3, 0xe6, 0xc5, 0x0b, + 0xec, 0x85, 0x67, 0x78, 0xbd, 0x19, 0xba, 0x21, 0xf1, 0xdc, 0x80, 0xb0, 0x46, 0x18, 0xd1, 0x98, + 0xa2, 0x9a, 0xd4, 0x6c, 0x28, 0xcd, 0x86, 0xd2, 0xac, 0x3f, 0x4c, 0x31, 0x70, 0xe8, 0x36, 0x71, + 0x10, 0xd0, 0x18, 0xc7, 0x2e, 0x0d, 0x52, 0xbb, 0xfa, 0xd3, 0x74, 0xd5, 0xa3, 0x81, 0x13, 0x25, + 0x41, 0xe0, 0x06, 0x4e, 0x93, 0x86, 0x24, 0x1a, 0x52, 0xfa, 0xbf, 0x54, 0x49, 0xcc, 0x4e, 0x92, + 0xd3, 0x66, 0x3f, 0x91, 0x0a, 0xe9, 0xfa, 0x83, 0xd1, 0x75, 0xe2, 0x87, 0xf1, 0x65, 0xba, 0xf8, + 0x68, 0x74, 0x31, 0x76, 0x7d, 0xc2, 0x62, 0xec, 0x87, 0xa9, 0xc2, 0xdd, 0x54, 0x21, 0x0a, 0xed, + 0xa6, 0x4d, 0xfb, 0x44, 0x8a, 0x8d, 0xaf, 0x34, 0x58, 0x6e, 0x51, 0x3f, 0x4c, 0x62, 0xd2, 0x0e, + 0x1c, 0x37, 0x20, 0xe8, 0x29, 0x2c, 0xbb, 0x01, 0x8b, 0x71, 0x60, 0x13, 0x2b, 0xc0, 0x3e, 0xa9, + 0x69, 0x8f, 0xb5, 0x55, 0xdd, 0x5c, 0x52, 0xc2, 0x03, 0xec, 0x13, 0x84, 0xa0, 0xf8, 0x9e, 0x06, + 0xa4, 0x56, 0x10, 0x6b, 0x62, 0x8c, 0x9e, 0xc0, 0x92, 0x8f, 0xed, 0x33, 0x37, 0x20, 0x56, 0x7c, + 0x19, 0x92, 0xda, 0x9c, 0x58, 0x2b, 0xa7, 0xb2, 0xa3, 0xcb, 0x90, 0xa0, 0x8f, 0x00, 0xfa, 0x2e, + 0x3b, 0x17, 0xb8, 0xac, 0x56, 0x7c, 0x3c, 0xb7, 0xaa, 0x9b, 0x3a, 0x97, 0x70, 0x50, 0x66, 0x60, + 0x58, 0x31, 0x93, 0x80, 0x33, 0x7f, 0x4b, 0x62, 0xdc, 0xc7, 0x31, 0x46, 0x07, 0x50, 0xb1, 0x25, + 0x3d, 0x8b, 0x08, 0x7e, 0x82, 0x4e, 0x79, 0xfd, 0x93, 0xc6, 0x55, 0x47, 0xd1, 0x18, 0xda, 0x8e, + 0xb9, 0x6c, 0xe7, 0xa7, 0xc6, 0x5f, 0xe6, 0xa0, 0xd4, 0x4d, 0x4f, 0x95, 0xd3, 0x09, 0x23, 0xfa, + 0x25, 0xb1, 0x63, 0xcb, 0xed, 0xa7, 0xfb, 0xd4, 0x53, 0x49, 0xa7, 0xcf, 0x37, 0x29, 0x1c, 0x90, + 0x6e, 0x92, 0x8f, 0xd1, 0x63, 0x28, 0xf7, 0x09, 0xb3, 0x23, 0x37, 0xe4, 0x27, 0xa3, 0xf6, 0x98, + 0x13, 0xa1, 0x63, 0xa8, 0xba, 0x41, 0x98, 0xc4, 0x56, 0x88, 0x23, 0xec, 0x93, 0x98, 0x44, 0xac, + 0x56, 0x7a, 0x3c, 0xb7, 0x5a, 0x5e, 0x7f, 0x7e, 0x35, 0x67, 0x45, 0xa9, 0xab, 0x6c, 0xcc, 0x15, + 0x01, 0x92, 0xcd, 0x19, 0xfa, 0x21, 0xdc, 0xa6, 0x49, 0x3c, 0x02, 0xac, 0xcf, 0x0e, 0x5c, 0x95, + 0x28, 0x39, 0xe4, 0x6d, 0x58, 0xe8, 0x53, 0xfb, 0x9c, 0x44, 0xb5, 0x79, 0xe1, 0xdb, 0xd5, 0xab, + 0xe1, 0x76, 0x84, 0x5e, 0xfb, 0x1d, 0xb1, 0x93, 0x98, 0x46, 0x7b, 0xb7, 0xcc, 0xd4, 0x12, 0x75, + 0x40, 0x8f, 0x08, 0xa3, 0x49, 0x64, 0x13, 0x56, 0x5b, 0x10, 0x30, 0x53, 0xb0, 0x32, 0x95, 0x89, + 0x39, 0xb0, 0x46, 0x8f, 0xa0, 0xac, 0xee, 0x1d, 0x3f, 0x96, 0x45, 0xe1, 0x62, 0x50, 0xa2, 0x4e, + 0x7f, 0x1b, 0xa0, 0x44, 0x52, 0x06, 0xc6, 0x0f, 0xe0, 0x6e, 0x2b, 0x22, 0x38, 0x26, 0x03, 0xc8, + 0x9f, 0x26, 0x84, 0xc5, 0xe8, 0x33, 0x28, 0x29, 0x93, 0x34, 0x64, 0x8c, 0x29, 0xf8, 0x64, 0x36, + 0xc6, 0x9f, 0x17, 0x44, 0x30, 0xaa, 0x95, 0xad, 0xc8, 0x61, 0x37, 0xc5, 0xcb, 0x5b, 0x58, 0x10, + 0x87, 0xc6, 0x6a, 0x05, 0x71, 0x2c, 0xaf, 0xae, 0xfe, 0xe0, 0x08, 0x72, 0xa3, 0x23, 0xec, 0xda, + 0x41, 0x1c, 0x5d, 0x9a, 0x29, 0x08, 0xea, 0xc2, 0xa2, 0x3c, 0x2a, 0x56, 0x9b, 0x13, 0x78, 0xdf, + 0x9e, 0x1e, 0xef, 0x50, 0x1a, 0x4a, 0x40, 0x05, 0x83, 0xbe, 0x0f, 0x2b, 0x8c, 0x44, 0x17, 0xae, + 0x4d, 0x2c, 0x6c, 0xdb, 0x34, 0x09, 0xe2, 0x5a, 0xf1, 0xa6, 0x13, 0xef, 0x49, 0x83, 0x2d, 0xa9, + 0x6f, 0x56, 0xd8, 0xd0, 0x1c, 0x3d, 0x00, 0xdd, 0xf6, 0x5c, 0x12, 0x08, 0x8f, 0xcc, 0x0b, 0x8f, + 0x94, 0xa4, 0xa0, 0xd3, 0xff, 0x6f, 0x06, 0xc5, 0x36, 0x2c, 0x7a, 0xd4, 0x71, 0xdc, 0xc0, 0x11, + 0x01, 0x71, 0x2d, 0xe5, 0x7d, 0xa9, 0x78, 0x28, 0xee, 0x23, 0x33, 0x95, 0x21, 0x3a, 0x81, 0x27, + 0xe7, 0x84, 0x84, 0xd6, 0x85, 0x6f, 0x61, 0xcf, 0xbd, 0x20, 0x16, 0x0d, 0xac, 0x53, 0xec, 0x7a, + 0x49, 0x44, 0x2c, 0x95, 0x6b, 0x6b, 0x25, 0x81, 0x7e, 0x5f, 0xa1, 0xab, 0x7c, 0xda, 0xd8, 0x49, + 0x15, 0xcc, 0x87, 0x1c, 0xe3, 0xd8, 0xdf, 0xe2, 0x08, 0x87, 0xc1, 0x1b, 0x69, 0xaf, 0x56, 0x79, + 0x0c, 0x78, 0xf8, 0x84, 0x78, 0xea, 0x6a, 0xce, 0x10, 0x03, 0xfb, 0xc2, 0x2e, 0x8d, 0x01, 0x09, + 0x52, 0x7f, 0x0d, 0xe5, 0x5c, 0x68, 0xa0, 0x2a, 0xcc, 0x9d, 0x93, 0xcb, 0x34, 0xf2, 0xf8, 0x10, + 0xdd, 0x81, 0xf9, 0x0b, 0xec, 0x25, 0x2a, 0x49, 0xc9, 0xc9, 0x66, 0x61, 0x43, 0xab, 0x6f, 0xc2, + 0x52, 0x3e, 0x0a, 0x66, 0xb2, 0x7d, 0x0d, 0xe5, 0x1c, 0x9b, 0x59, 0x4c, 0x8d, 0x7f, 0x6a, 0x80, + 0x72, 0x3b, 0x53, 0xd7, 0xf1, 0xc9, 0xf0, 0xa5, 0x16, 0x50, 0x7b, 0xb7, 0xf2, 0xd7, 0x1a, 0xf5, + 0x00, 0x91, 0xf0, 0x8c, 0xf8, 0x24, 0xc2, 0x9e, 0x95, 0xdd, 0xdd, 0xc2, 0xb4, 0x77, 0x77, 0xef, + 0x96, 0x79, 0x3b, 0xb3, 0xcf, 0x52, 0xfc, 0x01, 0x2c, 0x67, 0xdf, 0xc5, 0x91, 0xc3, 0x44, 0xc6, + 0x2e, 0xaf, 0x3f, 0x9b, 0xfa, 0x58, 0xcc, 0xa5, 0x30, 0x37, 0xe3, 0xb9, 0x27, 0x4b, 0x11, 0xaf, + 0x00, 0xed, 0x92, 0x78, 0x74, 0xa7, 0x8f, 0x26, 0xec, 0x34, 0xbf, 0x4f, 0xe3, 0xf7, 0x1a, 0xdc, + 0xd9, 0x77, 0x59, 0x66, 0xc8, 0x94, 0xe5, 0x0d, 0xe9, 0xe5, 0x11, 0x94, 0xf9, 0x13, 0x64, 0x85, + 0x11, 0x39, 0x75, 0xdf, 0xa5, 0x9e, 0x07, 0x2e, 0xea, 0x0a, 0x09, 0xbf, 0x8b, 0x21, 0x76, 0x88, + 0xc5, 0xdc, 0xf7, 0xf2, 0xf5, 0x9d, 0x37, 0x4b, 0x5c, 0xd0, 0x73, 0xdf, 0xcb, 0xb7, 0x8e, 0x2f, + 0xc6, 0xf4, 0x9c, 0x04, 0xe2, 0xda, 0x73, 0x70, 0xec, 0x90, 0x23, 0x2e, 0x30, 0x7e, 0xa9, 0xc1, + 0xdd, 0x11, 0x52, 0x2c, 0xa4, 0x01, 0x23, 0xe8, 0x73, 0xd0, 0xb3, 0x32, 0xa8, 0xa6, 0x89, 0xa0, + 0x9e, 0x26, 0x93, 0x0e, 0x8c, 0xd0, 0xc7, 0xb0, 0x12, 0x90, 0x77, 0xfc, 0xdd, 0xca, 0xbe, 0x2f, + 0xc9, 0x2f, 0x73, 0x71, 0x37, 0xe3, 0xb0, 0x01, 0x77, 0x77, 0x88, 0x47, 0xc6, 0x73, 0xf9, 0x8d, + 0x2e, 0xfd, 0x12, 0xea, 0xbb, 0x24, 0x6e, 0xd1, 0x20, 0x8e, 0xa8, 0xe7, 0x91, 0xa8, 0x45, 0x83, + 0x53, 0xd7, 0x19, 0xc4, 0xde, 0x52, 0x56, 0x6c, 0x0d, 0xec, 0xcb, 0x99, 0xac, 0xd3, 0x47, 0xcf, + 0xa0, 0x7a, 0x81, 0x3d, 0xb7, 0x2f, 0x75, 0x06, 0x1c, 0x8b, 0xe6, 0xca, 0x40, 0x2e, 0x59, 0xfe, + 0x6d, 0x01, 0xaa, 0xa3, 0x5f, 0xe2, 0xf7, 0xc1, 0xf5, 0xb1, 0xa3, 0x8a, 0x25, 0x39, 0xe1, 0xf7, + 0xc6, 0xf6, 0xfb, 0xe9, 0x66, 0xf9, 0x10, 0x3d, 0x86, 0x25, 0xc7, 0x66, 0x96, 0x47, 0x1d, 0x2b, + 0xc4, 0xf1, 0x59, 0x5a, 0x3f, 0x80, 0x63, 0xb3, 0x7d, 0xea, 0x74, 0x71, 0x7c, 0x36, 0x56, 0x45, + 0x15, 0xc7, 0xab, 0xa8, 0x3d, 0x28, 0x5e, 0xe0, 0x88, 0xd5, 0xe6, 0xc5, 0x61, 0x7c, 0x7a, 0x5d, + 0x25, 0x34, 0x4c, 0xb3, 0x71, 0x8c, 0xa3, 0x34, 0xc1, 0x08, 0x04, 0xf4, 0x3d, 0x98, 0xe7, 0xd5, + 0x17, 0x4f, 0xce, 0x37, 0x24, 0xab, 0x31, 0xa8, 0x1d, 0x6e, 0x27, 0xb1, 0x24, 0x06, 0xfa, 0x09, + 0x94, 0xf9, 0xde, 0x54, 0xbe, 0x5f, 0x14, 0x90, 0x9b, 0x33, 0x40, 0xee, 0xda, 0xac, 0x27, 0x8d, + 0x25, 0x2e, 0x77, 0x4b, 0x2a, 0x40, 0x5f, 0x80, 0x2e, 0xc0, 0xdd, 0xe0, 0x5c, 0x95, 0x53, 0x1b, + 0x33, 0x42, 0x73, 0x53, 0x09, 0x5c, 0x72, 0xd2, 0x69, 0x7d, 0x15, 0x2a, 0x26, 0x09, 0x79, 0xfd, + 0xd0, 0xef, 0xc5, 0x11, 0x7f, 0x24, 0x3e, 0x84, 0x05, 0x91, 0xcc, 0x64, 0xac, 0xeb, 0x66, 0x3a, + 0xab, 0x7f, 0x07, 0xf4, 0xcc, 0x7b, 0x33, 0xe5, 0xd2, 0x0d, 0x80, 0x81, 0xaf, 0x66, 0xb2, 0x7c, + 0x07, 0x2b, 0x23, 0x2e, 0x99, 0x60, 0x7e, 0x98, 0x37, 0x2f, 0xaf, 0xbf, 0x9e, 0xc1, 0x29, 0xc3, + 0x3b, 0xcf, 0x7f, 0xf9, 0x02, 0x96, 0x87, 0x3c, 0xf6, 0x3f, 0xfa, 0xae, 0xe1, 0x41, 0xe5, 0x48, + 0xf5, 0x2d, 0xed, 0x0b, 0x12, 0xc4, 0xa3, 0xf5, 0xb6, 0x36, 0x5e, 0x6f, 0x6f, 0x80, 0x9e, 0xf5, + 0x3a, 0x29, 0x99, 0xfa, 0xd8, 0xeb, 0x9d, 0xa1, 0x9a, 0x03, 0x65, 0xe3, 0xeb, 0x02, 0xdc, 0xef, + 0x91, 0xf8, 0x50, 0xe5, 0x81, 0x5e, 0x8c, 0xe3, 0x84, 0xcd, 0x90, 0x35, 0x7a, 0x50, 0xcd, 0xd0, + 0x2c, 0xc2, 0xf9, 0xaa, 0xd2, 0xef, 0x9a, 0xea, 0x64, 0x78, 0x83, 0xe6, 0x4a, 0x3c, 0x34, 0x67, + 0xa8, 0x09, 0x40, 0xa2, 0x88, 0x46, 0x16, 0xef, 0xd2, 0x44, 0x82, 0xa8, 0xac, 0x57, 0x15, 0x5c, + 0x14, 0xda, 0x8d, 0x16, 0xed, 0x13, 0x53, 0x17, 0x3a, 0x7c, 0xc8, 0x1b, 0x36, 0x69, 0xe0, 0x13, + 0xc6, 0x78, 0x0e, 0x92, 0x29, 0x63, 0x49, 0x08, 0xdf, 0x4a, 0xd9, 0xc4, 0x04, 0x37, 0x3f, 0x39, + 0xc1, 0x7d, 0x06, 0x95, 0xe1, 0xa2, 0x8f, 0x87, 0x28, 0xf1, 0xb1, 0xeb, 0xa9, 0xec, 0x26, 0x26, + 0xfc, 0xa6, 0x30, 0x9b, 0x86, 0x44, 0xee, 0x59, 0x37, 0xd3, 0x99, 0xf1, 0x1c, 0x2a, 0xc3, 0x15, + 0x18, 0xba, 0x0f, 0xfc, 0xc6, 0xc9, 0x8c, 0x27, 0x21, 0x16, 0x1d, 0x9b, 0xf1, 0x74, 0x67, 0xfc, + 0xbd, 0x08, 0xb7, 0xc7, 0x0a, 0x3f, 0xb4, 0x06, 0xb7, 0x7d, 0x37, 0x70, 0xfd, 0xc4, 0xb7, 0xec, + 0x30, 0xb1, 0x6c, 0x1a, 0x89, 0xfb, 0xc8, 0x5f, 0xb4, 0x95, 0x74, 0xa1, 0x15, 0x26, 0x2d, 0x2e, + 0xe6, 0x11, 0x12, 0x46, 0x84, 0xf7, 0xc2, 0xee, 0x89, 0x27, 0xc3, 0xb1, 0x64, 0xe6, 0x45, 0xe8, + 0xff, 0xa1, 0xa2, 0xd0, 0x22, 0xec, 0x5b, 0xce, 0x89, 0xf0, 0xaa, 0x66, 0x2e, 0xa5, 0x52, 0x13, + 0xfb, 0xbb, 0x27, 0xe8, 0x8d, 0xca, 0x85, 0x45, 0x71, 0x82, 0xdf, 0x9a, 0xa1, 0x50, 0x15, 0xc9, + 0x50, 0xa5, 0xc1, 0x3b, 0x30, 0xcf, 0xdb, 0x61, 0x99, 0x9e, 0x75, 0x53, 0x4e, 0xd0, 0x33, 0xb8, + 0x7d, 0x42, 0x69, 0x6c, 0x89, 0xf6, 0x97, 0x3f, 0xd0, 0x9c, 0xc6, 0x82, 0xd8, 0x51, 0x85, 0x2f, + 0x70, 0x04, 0xfe, 0x4e, 0xef, 0x9e, 0xf0, 0x97, 0x3a, 0xa0, 0x16, 0xee, 0xf7, 0x23, 0xc2, 0x98, + 0xa8, 0x76, 0x4b, 0xa6, 0x1e, 0xd0, 0x2d, 0x29, 0xa8, 0xff, 0xa9, 0x00, 0x45, 0xae, 0x9d, 0xb5, + 0xa7, 0x5a, 0xae, 0x3d, 0xed, 0x40, 0x51, 0xbc, 0x1a, 0x05, 0x11, 0x36, 0xaf, 0x66, 0xdd, 0x43, + 0x83, 0xbf, 0x2f, 0xa6, 0x80, 0x40, 0xf7, 0x60, 0x51, 0xf1, 0x94, 0xb5, 0xc4, 0x02, 0x93, 0xfc, + 0xf8, 0xb9, 0x0b, 0x9b, 0x34, 0xd0, 0xd2, 0x19, 0x7f, 0xa5, 0x71, 0x12, 0x53, 0xab, 0x2f, 0xde, + 0x70, 0xb1, 0xb9, 0x92, 0x09, 0x5c, 0x24, 0x5f, 0x75, 0xae, 0xe0, 0xf3, 0x78, 0xb2, 0x42, 0xea, + 0x06, 0xb1, 0xa8, 0xb4, 0x75, 0x13, 0x84, 0xa8, 0xcb, 0x25, 0x46, 0x0f, 0x8a, 0xe2, 0x81, 0xbb, + 0x03, 0xd5, 0xa3, 0x1f, 0x75, 0xdb, 0xd6, 0x17, 0x07, 0xbd, 0x6e, 0xbb, 0xd5, 0x79, 0xd3, 0x69, + 0xef, 0x54, 0x6f, 0x21, 0x04, 0x95, 0x6e, 0xdb, 0xec, 0x75, 0x7a, 0x47, 0xed, 0x83, 0x23, 0x6b, + 0x6f, 0x67, 0xa7, 0xaa, 0x8d, 0xc8, 0x7a, 0xbd, 0x9d, 0x6a, 0x01, 0x2d, 0x83, 0xbe, 0x7f, 0xd8, + 0xda, 0xda, 0x17, 0xd3, 0x39, 0xe3, 0xdf, 0xda, 0x20, 0xc2, 0xb2, 0xa6, 0x77, 0xa2, 0xf3, 0x46, + 0x72, 0x4d, 0x61, 0x3c, 0xd7, 0x3c, 0x85, 0xe5, 0x3e, 0x39, 0xc5, 0x89, 0x17, 0x5b, 0x32, 0xf9, + 0xc9, 0x8e, 0x67, 0x29, 0x15, 0x1e, 0x73, 0x19, 0x3a, 0x02, 0xf0, 0xa8, 0x8d, 0x3d, 0xcb, 0xa6, + 0xe1, 0x65, 0xda, 0xf6, 0xbc, 0x9a, 0xa1, 0x43, 0x6f, 0xec, 0x73, 0xeb, 0x16, 0x0d, 0x2f, 0x4d, + 0xdd, 0x53, 0xc3, 0xfa, 0x4b, 0xd0, 0x33, 0x39, 0x67, 0x9f, 0xbb, 0x4c, 0x62, 0xcc, 0x65, 0x3c, + 0xb8, 0xd4, 0xdf, 0x0a, 0x3e, 0x36, 0xb6, 0xa0, 0x32, 0xdc, 0xb1, 0xf3, 0xe0, 0x12, 0xb5, 0x49, + 0xfe, 0xd7, 0x8e, 0x2e, 0x24, 0xe2, 0xbf, 0xce, 0x58, 0xc5, 0xb2, 0xfe, 0x9b, 0xd2, 0xc0, 0x7d, + 0xec, 0xf8, 0xc5, 0x96, 0x20, 0x8d, 0x7e, 0xab, 0x41, 0x65, 0xb8, 0xef, 0x46, 0xcd, 0x6b, 0x5e, + 0x80, 0x49, 0x1d, 0x7a, 0x7d, 0x8a, 0x2a, 0xd2, 0xf8, 0xc6, 0xaf, 0xfe, 0xfa, 0x8f, 0x3f, 0x14, + 0x1e, 0x19, 0x1f, 0x4c, 0xf8, 0x27, 0xb7, 0x99, 0x55, 0xe2, 0xe8, 0x17, 0x50, 0xce, 0x95, 0xed, + 0xe8, 0x9b, 0x53, 0x55, 0xf7, 0x8a, 0xc7, 0x47, 0x4a, 0x3b, 0xf7, 0x77, 0xae, 0x91, 0x3d, 0x0a, + 0x86, 0x21, 0x28, 0x3c, 0x34, 0xee, 0x4d, 0xa2, 0x10, 0x25, 0xc1, 0xa6, 0xb6, 0x86, 0xbe, 0xd2, + 0xa0, 0x9c, 0x6b, 0x05, 0xae, 0x23, 0x30, 0xde, 0x31, 0x4c, 0xe5, 0x88, 0x67, 0x82, 0xc5, 0x53, + 0xf4, 0x64, 0x02, 0x8b, 0xe6, 0xcf, 0x72, 0xd5, 0xf1, 0xcf, 0xd1, 0xef, 0x34, 0x58, 0x1e, 0x2a, + 0xe5, 0x51, 0xe3, 0x9a, 0x5e, 0x79, 0x42, 0x23, 0x52, 0x6f, 0x4e, 0xad, 0x2f, 0x7b, 0x04, 0xe3, + 0x81, 0x60, 0x77, 0x17, 0x4d, 0x3a, 0x26, 0xf4, 0x6b, 0x0d, 0x2a, 0xc3, 0x75, 0xfd, 0x75, 0xb1, + 0x32, 0xb1, 0x03, 0xa8, 0x7f, 0x38, 0xf6, 0xa2, 0xb7, 0xfd, 0x30, 0xbe, 0x54, 0x6e, 0x59, 0x9b, + 0xc2, 0x2d, 0x7f, 0xd4, 0xe0, 0x83, 0x09, 0x4d, 0x02, 0xfa, 0xf4, 0xda, 0xb3, 0xba, 0xa2, 0xa7, + 0xa8, 0xaf, 0x4d, 0x5f, 0xef, 0x18, 0x4d, 0x41, 0xf2, 0x19, 0xfa, 0x64, 0x52, 0x04, 0x39, 0x13, + 0x28, 0x7d, 0xad, 0x01, 0x1a, 0x2f, 0x4c, 0xd0, 0xcb, 0xeb, 0xfe, 0xd2, 0x5c, 0x51, 0xc6, 0x5c, + 0xe9, 0xb9, 0x17, 0x82, 0xd4, 0xf3, 0xfa, 0xc7, 0x93, 0x48, 0xb1, 0x31, 0xb8, 0x4d, 0x6d, 0x6d, + 0x3b, 0x84, 0x7b, 0x36, 0xf5, 0x27, 0x91, 0xd8, 0xae, 0x64, 0x31, 0xd1, 0xe5, 0x9f, 0xe9, 0x6a, + 0x3f, 0xfe, 0x5c, 0xa9, 0x51, 0x0f, 0x07, 0x4e, 0x83, 0x46, 0x4e, 0xd3, 0x21, 0x81, 0x20, 0xd1, + 0x94, 0x4b, 0x38, 0x74, 0xd9, 0xf8, 0x3f, 0xf7, 0xef, 0x2a, 0xc9, 0xbf, 0x34, 0xed, 0x64, 0x41, + 0xe8, 0xbf, 0xfc, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x10, 0x96, 0x1d, 0xa2, 0x17, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/admin/v1/iam.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/admin/v1/iam.pb.go new file mode 100644 index 000000000..44bb9b40f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/admin/v1/iam.pb.go @@ -0,0 +1,2523 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/admin/v1/iam.proto + +/* +Package admin is a generated protocol buffer package. + +It is generated from these files: + google/iam/admin/v1/iam.proto + +It has these top-level messages: + ServiceAccount + CreateServiceAccountRequest + ListServiceAccountsRequest + ListServiceAccountsResponse + GetServiceAccountRequest + DeleteServiceAccountRequest + ListServiceAccountKeysRequest + ListServiceAccountKeysResponse + GetServiceAccountKeyRequest + ServiceAccountKey + CreateServiceAccountKeyRequest + DeleteServiceAccountKeyRequest + SignBlobRequest + SignBlobResponse + SignJwtRequest + SignJwtResponse + Role + QueryGrantableRolesRequest + QueryGrantableRolesResponse + ListRolesRequest + ListRolesResponse + GetRoleRequest + CreateRoleRequest + UpdateRoleRequest + DeleteRoleRequest + UndeleteRoleRequest + Permission + QueryTestablePermissionsRequest + QueryTestablePermissionsResponse +*/ +package admin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Supported key algorithms. +type ServiceAccountKeyAlgorithm int32 + +const ( + // An unspecified key algorithm. + ServiceAccountKeyAlgorithm_KEY_ALG_UNSPECIFIED ServiceAccountKeyAlgorithm = 0 + // 1k RSA Key. + ServiceAccountKeyAlgorithm_KEY_ALG_RSA_1024 ServiceAccountKeyAlgorithm = 1 + // 2k RSA Key. + ServiceAccountKeyAlgorithm_KEY_ALG_RSA_2048 ServiceAccountKeyAlgorithm = 2 +) + +var ServiceAccountKeyAlgorithm_name = map[int32]string{ + 0: "KEY_ALG_UNSPECIFIED", + 1: "KEY_ALG_RSA_1024", + 2: "KEY_ALG_RSA_2048", +} +var ServiceAccountKeyAlgorithm_value = map[string]int32{ + "KEY_ALG_UNSPECIFIED": 0, + "KEY_ALG_RSA_1024": 1, + "KEY_ALG_RSA_2048": 2, +} + +func (x ServiceAccountKeyAlgorithm) String() string { + return proto.EnumName(ServiceAccountKeyAlgorithm_name, int32(x)) +} +func (ServiceAccountKeyAlgorithm) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Supported private key output formats. +type ServiceAccountPrivateKeyType int32 + +const ( + // Unspecified. Equivalent to `TYPE_GOOGLE_CREDENTIALS_FILE`. + ServiceAccountPrivateKeyType_TYPE_UNSPECIFIED ServiceAccountPrivateKeyType = 0 + // PKCS12 format. + // The password for the PKCS12 file is `notasecret`. + // For more information, see https://tools.ietf.org/html/rfc7292. + ServiceAccountPrivateKeyType_TYPE_PKCS12_FILE ServiceAccountPrivateKeyType = 1 + // Google Credentials File format. + ServiceAccountPrivateKeyType_TYPE_GOOGLE_CREDENTIALS_FILE ServiceAccountPrivateKeyType = 2 +) + +var ServiceAccountPrivateKeyType_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "TYPE_PKCS12_FILE", + 2: "TYPE_GOOGLE_CREDENTIALS_FILE", +} +var ServiceAccountPrivateKeyType_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "TYPE_PKCS12_FILE": 1, + "TYPE_GOOGLE_CREDENTIALS_FILE": 2, +} + +func (x ServiceAccountPrivateKeyType) String() string { + return proto.EnumName(ServiceAccountPrivateKeyType_name, int32(x)) +} +func (ServiceAccountPrivateKeyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Supported public key output formats. +type ServiceAccountPublicKeyType int32 + +const ( + // Unspecified. Returns nothing here. + ServiceAccountPublicKeyType_TYPE_NONE ServiceAccountPublicKeyType = 0 + // X509 PEM format. + ServiceAccountPublicKeyType_TYPE_X509_PEM_FILE ServiceAccountPublicKeyType = 1 + // Raw public key. + ServiceAccountPublicKeyType_TYPE_RAW_PUBLIC_KEY ServiceAccountPublicKeyType = 2 +) + +var ServiceAccountPublicKeyType_name = map[int32]string{ + 0: "TYPE_NONE", + 1: "TYPE_X509_PEM_FILE", + 2: "TYPE_RAW_PUBLIC_KEY", +} +var ServiceAccountPublicKeyType_value = map[string]int32{ + "TYPE_NONE": 0, + "TYPE_X509_PEM_FILE": 1, + "TYPE_RAW_PUBLIC_KEY": 2, +} + +func (x ServiceAccountPublicKeyType) String() string { + return proto.EnumName(ServiceAccountPublicKeyType_name, int32(x)) +} +func (ServiceAccountPublicKeyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// A view for Role objects. +type RoleView int32 + +const ( + // Omits the `included_permissions` field. + // This is the default value. + RoleView_BASIC RoleView = 0 + // Returns all fields. + RoleView_FULL RoleView = 1 +) + +var RoleView_name = map[int32]string{ + 0: "BASIC", + 1: "FULL", +} +var RoleView_value = map[string]int32{ + "BASIC": 0, + "FULL": 1, +} + +func (x RoleView) String() string { + return proto.EnumName(RoleView_name, int32(x)) +} +func (RoleView) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// `KeyType` filters to selectively retrieve certain varieties +// of keys. +type ListServiceAccountKeysRequest_KeyType int32 + +const ( + // Unspecified key type. The presence of this in the + // message will immediately result in an error. + ListServiceAccountKeysRequest_KEY_TYPE_UNSPECIFIED ListServiceAccountKeysRequest_KeyType = 0 + // User-managed keys (managed and rotated by the user). + ListServiceAccountKeysRequest_USER_MANAGED ListServiceAccountKeysRequest_KeyType = 1 + // System-managed keys (managed and rotated by Google). + ListServiceAccountKeysRequest_SYSTEM_MANAGED ListServiceAccountKeysRequest_KeyType = 2 +) + +var ListServiceAccountKeysRequest_KeyType_name = map[int32]string{ + 0: "KEY_TYPE_UNSPECIFIED", + 1: "USER_MANAGED", + 2: "SYSTEM_MANAGED", +} +var ListServiceAccountKeysRequest_KeyType_value = map[string]int32{ + "KEY_TYPE_UNSPECIFIED": 0, + "USER_MANAGED": 1, + "SYSTEM_MANAGED": 2, +} + +func (x ListServiceAccountKeysRequest_KeyType) String() string { + return proto.EnumName(ListServiceAccountKeysRequest_KeyType_name, int32(x)) +} +func (ListServiceAccountKeysRequest_KeyType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{6, 0} +} + +// A stage representing a role's lifecycle phase. +type Role_RoleLaunchStage int32 + +const ( + // The user has indicated this role is currently in an alpha phase. + Role_ALPHA Role_RoleLaunchStage = 0 + // The user has indicated this role is currently in a beta phase. + Role_BETA Role_RoleLaunchStage = 1 + // The user has indicated this role is generally available. + Role_GA Role_RoleLaunchStage = 2 + // The user has indicated this role is being deprecated. + Role_DEPRECATED Role_RoleLaunchStage = 4 + // This role is disabled and will not contribute permissions to any members + // it is granted to in policies. + Role_DISABLED Role_RoleLaunchStage = 5 + // The user has indicated this role is currently in an eap phase. + Role_EAP Role_RoleLaunchStage = 6 +) + +var Role_RoleLaunchStage_name = map[int32]string{ + 0: "ALPHA", + 1: "BETA", + 2: "GA", + 4: "DEPRECATED", + 5: "DISABLED", + 6: "EAP", +} +var Role_RoleLaunchStage_value = map[string]int32{ + "ALPHA": 0, + "BETA": 1, + "GA": 2, + "DEPRECATED": 4, + "DISABLED": 5, + "EAP": 6, +} + +func (x Role_RoleLaunchStage) String() string { + return proto.EnumName(Role_RoleLaunchStage_name, int32(x)) +} +func (Role_RoleLaunchStage) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{16, 0} } + +// A stage representing a permission's lifecycle phase. +type Permission_PermissionLaunchStage int32 + +const ( + // The permission is currently in an alpha phase. + Permission_ALPHA Permission_PermissionLaunchStage = 0 + // The permission is currently in a beta phase. + Permission_BETA Permission_PermissionLaunchStage = 1 + // The permission is generally available. + Permission_GA Permission_PermissionLaunchStage = 2 + // The permission is being deprecated. + Permission_DEPRECATED Permission_PermissionLaunchStage = 3 +) + +var Permission_PermissionLaunchStage_name = map[int32]string{ + 0: "ALPHA", + 1: "BETA", + 2: "GA", + 3: "DEPRECATED", +} +var Permission_PermissionLaunchStage_value = map[string]int32{ + "ALPHA": 0, + "BETA": 1, + "GA": 2, + "DEPRECATED": 3, +} + +func (x Permission_PermissionLaunchStage) String() string { + return proto.EnumName(Permission_PermissionLaunchStage_name, int32(x)) +} +func (Permission_PermissionLaunchStage) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0} +} + +// The state of the permission with regards to custom roles. +type Permission_CustomRolesSupportLevel int32 + +const ( + // Permission is fully supported for custom role use. + Permission_SUPPORTED Permission_CustomRolesSupportLevel = 0 + // Permission is being tested to check custom role compatibility. + Permission_TESTING Permission_CustomRolesSupportLevel = 1 + // Permission is not supported for custom role use. + Permission_NOT_SUPPORTED Permission_CustomRolesSupportLevel = 2 +) + +var Permission_CustomRolesSupportLevel_name = map[int32]string{ + 0: "SUPPORTED", + 1: "TESTING", + 2: "NOT_SUPPORTED", +} +var Permission_CustomRolesSupportLevel_value = map[string]int32{ + "SUPPORTED": 0, + "TESTING": 1, + "NOT_SUPPORTED": 2, +} + +func (x Permission_CustomRolesSupportLevel) String() string { + return proto.EnumName(Permission_CustomRolesSupportLevel_name, int32(x)) +} +func (Permission_CustomRolesSupportLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 1} +} + +// A service account in the Identity and Access Management API. +// +// To create a service account, specify the `project_id` and the `account_id` +// for the account. The `account_id` is unique within the project, and is used +// to generate the service account email address and a stable +// `unique_id`. +// +// If the account already exists, the account's resource name is returned +// in util::Status's ResourceInfo.resource_name in the format of +// projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The caller can +// use the name in other methods to access the account. +// +// All other methods can identify the service account using the format +// `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. +// Using `-` as a wildcard for the project will infer the project from +// the account. The `account` value can be the `email` address or the +// `unique_id` of the service account. +type ServiceAccount struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // + // Requests using `-` as a wildcard for the project will infer the project + // from the `account` and the `account` value can be the `email` address or + // the `unique_id` of the service account. + // + // In responses the resource name will always be in the format + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // @OutputOnly The id of the project that owns the service account. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // @OutputOnly The unique and stable id of the service account. + UniqueId string `protobuf:"bytes,4,opt,name=unique_id,json=uniqueId" json:"unique_id,omitempty"` + // @OutputOnly The email address of the service account. + Email string `protobuf:"bytes,5,opt,name=email" json:"email,omitempty"` + // Optional. A user-specified description of the service account. Must be + // fewer than 100 UTF-8 bytes. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Used to perform a consistent read-modify-write. + Etag []byte `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` + // @OutputOnly. The OAuth2 client id for the service account. + // This is used in conjunction with the OAuth2 clientconfig API to make + // three legged OAuth2 (3LO) flows to access the data of Google users. + Oauth2ClientId string `protobuf:"bytes,9,opt,name=oauth2_client_id,json=oauth2ClientId" json:"oauth2_client_id,omitempty"` +} + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ServiceAccount) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ServiceAccount) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ServiceAccount) GetUniqueId() string { + if m != nil { + return m.UniqueId + } + return "" +} + +func (m *ServiceAccount) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *ServiceAccount) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *ServiceAccount) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +func (m *ServiceAccount) GetOauth2ClientId() string { + if m != nil { + return m.Oauth2ClientId + } + return "" +} + +// The service account create request. +type CreateServiceAccountRequest struct { + // Required. The resource name of the project associated with the service + // accounts, such as `projects/my-project-123`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The account id that is used to generate the service account + // email address and a stable unique id. It is unique within a project, + // must be 6-30 characters long, and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId" json:"account_id,omitempty"` + // The [ServiceAccount][google.iam.admin.v1.ServiceAccount] resource to create. + // Currently, only the following values are user assignable: + // `display_name` . + ServiceAccount *ServiceAccount `protobuf:"bytes,3,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` +} + +func (m *CreateServiceAccountRequest) Reset() { *m = CreateServiceAccountRequest{} } +func (m *CreateServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceAccountRequest) ProtoMessage() {} +func (*CreateServiceAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CreateServiceAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateServiceAccountRequest) GetAccountId() string { + if m != nil { + return m.AccountId + } + return "" +} + +func (m *CreateServiceAccountRequest) GetServiceAccount() *ServiceAccount { + if m != nil { + return m.ServiceAccount + } + return nil +} + +// The service account list request. +type ListServiceAccountsRequest struct { + // Required. The resource name of the project associated with the service + // accounts, such as `projects/my-project-123`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional limit on the number of service accounts to include in the + // response. Further accounts can subsequently be obtained by including the + // [ListServiceAccountsResponse.next_page_token][google.iam.admin.v1.ListServiceAccountsResponse.next_page_token] + // in a subsequent request. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional pagination token returned in an earlier + // [ListServiceAccountsResponse.next_page_token][google.iam.admin.v1.ListServiceAccountsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListServiceAccountsRequest) Reset() { *m = ListServiceAccountsRequest{} } +func (m *ListServiceAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountsRequest) ProtoMessage() {} +func (*ListServiceAccountsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListServiceAccountsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListServiceAccountsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServiceAccountsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The service account list response. +type ListServiceAccountsResponse struct { + // The list of matching service accounts. + Accounts []*ServiceAccount `protobuf:"bytes,1,rep,name=accounts" json:"accounts,omitempty"` + // To retrieve the next page of results, set + // [ListServiceAccountsRequest.page_token][google.iam.admin.v1.ListServiceAccountsRequest.page_token] + // to this value. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListServiceAccountsResponse) Reset() { *m = ListServiceAccountsResponse{} } +func (m *ListServiceAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountsResponse) ProtoMessage() {} +func (*ListServiceAccountsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListServiceAccountsResponse) GetAccounts() []*ServiceAccount { + if m != nil { + return m.Accounts + } + return nil +} + +func (m *ListServiceAccountsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The service account get request. +type GetServiceAccountRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetServiceAccountRequest) Reset() { *m = GetServiceAccountRequest{} } +func (m *GetServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountRequest) ProtoMessage() {} +func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GetServiceAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The service account delete request. +type DeleteServiceAccountRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteServiceAccountRequest) Reset() { *m = DeleteServiceAccountRequest{} } +func (m *DeleteServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceAccountRequest) ProtoMessage() {} +func (*DeleteServiceAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DeleteServiceAccountRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The service account keys list request. +type ListServiceAccountKeysRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // + // Using `-` as a wildcard for the project, will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Filters the types of keys the user wants to include in the list + // response. Duplicate key types are not allowed. If no key type + // is provided, all keys are returned. + KeyTypes []ListServiceAccountKeysRequest_KeyType `protobuf:"varint,2,rep,packed,name=key_types,json=keyTypes,enum=google.iam.admin.v1.ListServiceAccountKeysRequest_KeyType" json:"key_types,omitempty"` +} + +func (m *ListServiceAccountKeysRequest) Reset() { *m = ListServiceAccountKeysRequest{} } +func (m *ListServiceAccountKeysRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountKeysRequest) ProtoMessage() {} +func (*ListServiceAccountKeysRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ListServiceAccountKeysRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListServiceAccountKeysRequest) GetKeyTypes() []ListServiceAccountKeysRequest_KeyType { + if m != nil { + return m.KeyTypes + } + return nil +} + +// The service account keys list response. +type ListServiceAccountKeysResponse struct { + // The public keys for the service account. + Keys []*ServiceAccountKey `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` +} + +func (m *ListServiceAccountKeysResponse) Reset() { *m = ListServiceAccountKeysResponse{} } +func (m *ListServiceAccountKeysResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceAccountKeysResponse) ProtoMessage() {} +func (*ListServiceAccountKeysResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListServiceAccountKeysResponse) GetKeys() []*ServiceAccountKey { + if m != nil { + return m.Keys + } + return nil +} + +// The service account key get by id request. +type GetServiceAccountKeyRequest struct { + // The resource name of the service account key in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`. + // + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The output format of the public key requested. + // X509_PEM is the default output format. + PublicKeyType ServiceAccountPublicKeyType `protobuf:"varint,2,opt,name=public_key_type,json=publicKeyType,enum=google.iam.admin.v1.ServiceAccountPublicKeyType" json:"public_key_type,omitempty"` +} + +func (m *GetServiceAccountKeyRequest) Reset() { *m = GetServiceAccountKeyRequest{} } +func (m *GetServiceAccountKeyRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountKeyRequest) ProtoMessage() {} +func (*GetServiceAccountKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *GetServiceAccountKeyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetServiceAccountKeyRequest) GetPublicKeyType() ServiceAccountPublicKeyType { + if m != nil { + return m.PublicKeyType + } + return ServiceAccountPublicKeyType_TYPE_NONE +} + +// Represents a service account key. +// +// A service account has two sets of key-pairs: user-managed, and +// system-managed. +// +// User-managed key-pairs can be created and deleted by users. Users are +// responsible for rotating these keys periodically to ensure security of +// their service accounts. Users retain the private key of these key-pairs, +// and Google retains ONLY the public key. +// +// System-managed key-pairs are managed automatically by Google, and rotated +// daily without user intervention. The private key never leaves Google's +// servers to maximize security. +// +// Public keys for all service accounts are also published at the OAuth2 +// Service Account API. +type ServiceAccountKey struct { + // The resource name of the service account key in the following format + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The output format for the private key. + // Only provided in `CreateServiceAccountKey` responses, not + // in `GetServiceAccountKey` or `ListServiceAccountKey` responses. + // + // Google never exposes system-managed private keys, and never retains + // user-managed private keys. + PrivateKeyType ServiceAccountPrivateKeyType `protobuf:"varint,2,opt,name=private_key_type,json=privateKeyType,enum=google.iam.admin.v1.ServiceAccountPrivateKeyType" json:"private_key_type,omitempty"` + // Specifies the algorithm (and possibly key size) for the key. + KeyAlgorithm ServiceAccountKeyAlgorithm `protobuf:"varint,8,opt,name=key_algorithm,json=keyAlgorithm,enum=google.iam.admin.v1.ServiceAccountKeyAlgorithm" json:"key_algorithm,omitempty"` + // The private key data. Only provided in `CreateServiceAccountKey` + // responses. Make sure to keep the private key data secure because it + // allows for the assertion of the service account identity. + // When decoded, the private key data can be used to authenticate with + // Google API client libraries and with + // gcloud + // auth activate-service-account. + PrivateKeyData []byte `protobuf:"bytes,3,opt,name=private_key_data,json=privateKeyData,proto3" json:"private_key_data,omitempty"` + // The public key data. Only provided in `GetServiceAccountKey` responses. + PublicKeyData []byte `protobuf:"bytes,7,opt,name=public_key_data,json=publicKeyData,proto3" json:"public_key_data,omitempty"` + // The key can be used after this timestamp. + ValidAfterTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=valid_after_time,json=validAfterTime" json:"valid_after_time,omitempty"` + // The key can be used before this timestamp. + ValidBeforeTime *google_protobuf3.Timestamp `protobuf:"bytes,5,opt,name=valid_before_time,json=validBeforeTime" json:"valid_before_time,omitempty"` +} + +func (m *ServiceAccountKey) Reset() { *m = ServiceAccountKey{} } +func (m *ServiceAccountKey) String() string { return proto.CompactTextString(m) } +func (*ServiceAccountKey) ProtoMessage() {} +func (*ServiceAccountKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ServiceAccountKey) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ServiceAccountKey) GetPrivateKeyType() ServiceAccountPrivateKeyType { + if m != nil { + return m.PrivateKeyType + } + return ServiceAccountPrivateKeyType_TYPE_UNSPECIFIED +} + +func (m *ServiceAccountKey) GetKeyAlgorithm() ServiceAccountKeyAlgorithm { + if m != nil { + return m.KeyAlgorithm + } + return ServiceAccountKeyAlgorithm_KEY_ALG_UNSPECIFIED +} + +func (m *ServiceAccountKey) GetPrivateKeyData() []byte { + if m != nil { + return m.PrivateKeyData + } + return nil +} + +func (m *ServiceAccountKey) GetPublicKeyData() []byte { + if m != nil { + return m.PublicKeyData + } + return nil +} + +func (m *ServiceAccountKey) GetValidAfterTime() *google_protobuf3.Timestamp { + if m != nil { + return m.ValidAfterTime + } + return nil +} + +func (m *ServiceAccountKey) GetValidBeforeTime() *google_protobuf3.Timestamp { + if m != nil { + return m.ValidBeforeTime + } + return nil +} + +// The service account key create request. +type CreateServiceAccountKeyRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the + // default output format. + PrivateKeyType ServiceAccountPrivateKeyType `protobuf:"varint,2,opt,name=private_key_type,json=privateKeyType,enum=google.iam.admin.v1.ServiceAccountPrivateKeyType" json:"private_key_type,omitempty"` + // Which type of key and algorithm to use for the key. + // The default is currently a 2K RSA key. However this may change in the + // future. + KeyAlgorithm ServiceAccountKeyAlgorithm `protobuf:"varint,3,opt,name=key_algorithm,json=keyAlgorithm,enum=google.iam.admin.v1.ServiceAccountKeyAlgorithm" json:"key_algorithm,omitempty"` +} + +func (m *CreateServiceAccountKeyRequest) Reset() { *m = CreateServiceAccountKeyRequest{} } +func (m *CreateServiceAccountKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceAccountKeyRequest) ProtoMessage() {} +func (*CreateServiceAccountKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CreateServiceAccountKeyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateServiceAccountKeyRequest) GetPrivateKeyType() ServiceAccountPrivateKeyType { + if m != nil { + return m.PrivateKeyType + } + return ServiceAccountPrivateKeyType_TYPE_UNSPECIFIED +} + +func (m *CreateServiceAccountKeyRequest) GetKeyAlgorithm() ServiceAccountKeyAlgorithm { + if m != nil { + return m.KeyAlgorithm + } + return ServiceAccountKeyAlgorithm_KEY_ALG_UNSPECIFIED +} + +// The service account key delete request. +type DeleteServiceAccountKeyRequest struct { + // The resource name of the service account key in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteServiceAccountKeyRequest) Reset() { *m = DeleteServiceAccountKeyRequest{} } +func (m *DeleteServiceAccountKeyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceAccountKeyRequest) ProtoMessage() {} +func (*DeleteServiceAccountKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *DeleteServiceAccountKeyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The service account sign blob request. +type SignBlobRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The bytes to sign. + BytesToSign []byte `protobuf:"bytes,2,opt,name=bytes_to_sign,json=bytesToSign,proto3" json:"bytes_to_sign,omitempty"` +} + +func (m *SignBlobRequest) Reset() { *m = SignBlobRequest{} } +func (m *SignBlobRequest) String() string { return proto.CompactTextString(m) } +func (*SignBlobRequest) ProtoMessage() {} +func (*SignBlobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *SignBlobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SignBlobRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +// The service account sign blob response. +type SignBlobResponse struct { + // The id of the key used to sign the blob. + KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId" json:"key_id,omitempty"` + // The signed blob. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *SignBlobResponse) Reset() { *m = SignBlobResponse{} } +func (m *SignBlobResponse) String() string { return proto.CompactTextString(m) } +func (*SignBlobResponse) ProtoMessage() {} +func (*SignBlobResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SignBlobResponse) GetKeyId() string { + if m != nil { + return m.KeyId + } + return "" +} + +func (m *SignBlobResponse) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// The service account sign JWT request. +type SignJwtRequest struct { + // The resource name of the service account in the following format: + // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. + // Using `-` as a wildcard for the project will infer the project from + // the account. The `account` value can be the `email` address or the + // `unique_id` of the service account. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The JWT payload to sign, a JSON JWT Claim set. + Payload string `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` +} + +func (m *SignJwtRequest) Reset() { *m = SignJwtRequest{} } +func (m *SignJwtRequest) String() string { return proto.CompactTextString(m) } +func (*SignJwtRequest) ProtoMessage() {} +func (*SignJwtRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *SignJwtRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SignJwtRequest) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// The service account sign JWT response. +type SignJwtResponse struct { + // The id of the key used to sign the JWT. + KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId" json:"key_id,omitempty"` + // The signed JWT. + SignedJwt string `protobuf:"bytes,2,opt,name=signed_jwt,json=signedJwt" json:"signed_jwt,omitempty"` +} + +func (m *SignJwtResponse) Reset() { *m = SignJwtResponse{} } +func (m *SignJwtResponse) String() string { return proto.CompactTextString(m) } +func (*SignJwtResponse) ProtoMessage() {} +func (*SignJwtResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *SignJwtResponse) GetKeyId() string { + if m != nil { + return m.KeyId + } + return "" +} + +func (m *SignJwtResponse) GetSignedJwt() string { + if m != nil { + return m.SignedJwt + } + return "" +} + +// A role in the Identity and Access Management API. +type Role struct { + // The name of the role. + // + // When Role is used in CreateRole, the role name must not be set. + // + // When Role is used in output and other input such as UpdateRole, the role + // name is the complete path, e.g., roles/logging.viewer for curated roles + // and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. A human-readable title for the role. Typically this + // is limited to 100 UTF-8 bytes. + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + // Optional. A human-readable description for the role. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The names of the permissions this role grants when bound in an IAM policy. + IncludedPermissions []string `protobuf:"bytes,7,rep,name=included_permissions,json=includedPermissions" json:"included_permissions,omitempty"` + // The current launch stage of the role. + Stage Role_RoleLaunchStage `protobuf:"varint,8,opt,name=stage,enum=google.iam.admin.v1.Role_RoleLaunchStage" json:"stage,omitempty"` + // Used to perform a consistent read-modify-write. + Etag []byte `protobuf:"bytes,9,opt,name=etag,proto3" json:"etag,omitempty"` + // The current deleted state of the role. This field is read only. + // It will be ignored in calls to CreateRole and UpdateRole. + Deleted bool `protobuf:"varint,11,opt,name=deleted" json:"deleted,omitempty"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Role) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Role) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Role) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Role) GetIncludedPermissions() []string { + if m != nil { + return m.IncludedPermissions + } + return nil +} + +func (m *Role) GetStage() Role_RoleLaunchStage { + if m != nil { + return m.Stage + } + return Role_ALPHA +} + +func (m *Role) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +func (m *Role) GetDeleted() bool { + if m != nil { + return m.Deleted + } + return false +} + +// The grantable role query request. +type QueryGrantableRolesRequest struct { + // Required. The full resource name to query from the list of grantable roles. + // + // The name follows the Google Cloud Platform resource format. + // For example, a Cloud Platform project with id `my-project` will be named + // `//cloudresourcemanager.googleapis.com/projects/my-project`. + FullResourceName string `protobuf:"bytes,1,opt,name=full_resource_name,json=fullResourceName" json:"full_resource_name,omitempty"` + View RoleView `protobuf:"varint,2,opt,name=view,enum=google.iam.admin.v1.RoleView" json:"view,omitempty"` + // Optional limit on the number of roles to include in the response. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional pagination token returned in an earlier + // QueryGrantableRolesResponse. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *QueryGrantableRolesRequest) Reset() { *m = QueryGrantableRolesRequest{} } +func (m *QueryGrantableRolesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGrantableRolesRequest) ProtoMessage() {} +func (*QueryGrantableRolesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *QueryGrantableRolesRequest) GetFullResourceName() string { + if m != nil { + return m.FullResourceName + } + return "" +} + +func (m *QueryGrantableRolesRequest) GetView() RoleView { + if m != nil { + return m.View + } + return RoleView_BASIC +} + +func (m *QueryGrantableRolesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *QueryGrantableRolesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The grantable role query response. +type QueryGrantableRolesResponse struct { + // The list of matching roles. + Roles []*Role `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // To retrieve the next page of results, set + // `QueryGrantableRolesRequest.page_token` to this value. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *QueryGrantableRolesResponse) Reset() { *m = QueryGrantableRolesResponse{} } +func (m *QueryGrantableRolesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGrantableRolesResponse) ProtoMessage() {} +func (*QueryGrantableRolesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *QueryGrantableRolesResponse) GetRoles() []*Role { + if m != nil { + return m.Roles + } + return nil +} + +func (m *QueryGrantableRolesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request to get all roles defined under a resource. +type ListRolesRequest struct { + // The resource name of the parent resource in one of the following formats: + // `` (empty string) -- this refers to curated roles. + // `organizations/{ORGANIZATION_ID}` + // `projects/{PROJECT_ID}` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional limit on the number of roles to include in the response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional pagination token returned in an earlier ListRolesResponse. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional view for the returned Role objects. + View RoleView `protobuf:"varint,4,opt,name=view,enum=google.iam.admin.v1.RoleView" json:"view,omitempty"` + // Include Roles that have been deleted. + ShowDeleted bool `protobuf:"varint,6,opt,name=show_deleted,json=showDeleted" json:"show_deleted,omitempty"` +} + +func (m *ListRolesRequest) Reset() { *m = ListRolesRequest{} } +func (m *ListRolesRequest) String() string { return proto.CompactTextString(m) } +func (*ListRolesRequest) ProtoMessage() {} +func (*ListRolesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ListRolesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListRolesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListRolesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListRolesRequest) GetView() RoleView { + if m != nil { + return m.View + } + return RoleView_BASIC +} + +func (m *ListRolesRequest) GetShowDeleted() bool { + if m != nil { + return m.ShowDeleted + } + return false +} + +// The response containing the roles defined under a resource. +type ListRolesResponse struct { + // The Roles defined on this resource. + Roles []*Role `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // To retrieve the next page of results, set + // `ListRolesRequest.page_token` to this value. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListRolesResponse) Reset() { *m = ListRolesResponse{} } +func (m *ListRolesResponse) String() string { return proto.CompactTextString(m) } +func (*ListRolesResponse) ProtoMessage() {} +func (*ListRolesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ListRolesResponse) GetRoles() []*Role { + if m != nil { + return m.Roles + } + return nil +} + +func (m *ListRolesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request to get the definition of an existing role. +type GetRoleRequest struct { + // The resource name of the role in one of the following formats: + // `roles/{ROLE_NAME}` + // `organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}` + // `projects/{PROJECT_ID}/roles/{ROLE_NAME}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetRoleRequest) Reset() { *m = GetRoleRequest{} } +func (m *GetRoleRequest) String() string { return proto.CompactTextString(m) } +func (*GetRoleRequest) ProtoMessage() {} +func (*GetRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *GetRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request to create a new role. +type CreateRoleRequest struct { + // The resource name of the parent resource in one of the following formats: + // `organizations/{ORGANIZATION_ID}` + // `projects/{PROJECT_ID}` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The role id to use for this role. + RoleId string `protobuf:"bytes,2,opt,name=role_id,json=roleId" json:"role_id,omitempty"` + // The Role resource to create. + Role *Role `protobuf:"bytes,3,opt,name=role" json:"role,omitempty"` +} + +func (m *CreateRoleRequest) Reset() { *m = CreateRoleRequest{} } +func (m *CreateRoleRequest) String() string { return proto.CompactTextString(m) } +func (*CreateRoleRequest) ProtoMessage() {} +func (*CreateRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *CreateRoleRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateRoleRequest) GetRoleId() string { + if m != nil { + return m.RoleId + } + return "" +} + +func (m *CreateRoleRequest) GetRole() *Role { + if m != nil { + return m.Role + } + return nil +} + +// The request to update a role. +type UpdateRoleRequest struct { + // The resource name of the role in one of the following formats: + // `roles/{ROLE_NAME}` + // `organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}` + // `projects/{PROJECT_ID}/roles/{ROLE_NAME}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The updated role. + Role *Role `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"` + // A mask describing which fields in the Role have changed. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateRoleRequest) Reset() { *m = UpdateRoleRequest{} } +func (m *UpdateRoleRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRoleRequest) ProtoMessage() {} +func (*UpdateRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *UpdateRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateRoleRequest) GetRole() *Role { + if m != nil { + return m.Role + } + return nil +} + +func (m *UpdateRoleRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// The request to delete an existing role. +type DeleteRoleRequest struct { + // The resource name of the role in one of the following formats: + // `organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}` + // `projects/{PROJECT_ID}/roles/{ROLE_NAME}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Used to perform a consistent read-modify-write. + Etag []byte `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (m *DeleteRoleRequest) Reset() { *m = DeleteRoleRequest{} } +func (m *DeleteRoleRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRoleRequest) ProtoMessage() {} +func (*DeleteRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *DeleteRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteRoleRequest) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +// The request to undelete an existing role. +type UndeleteRoleRequest struct { + // The resource name of the role in one of the following formats: + // `organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}` + // `projects/{PROJECT_ID}/roles/{ROLE_NAME}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Used to perform a consistent read-modify-write. + Etag []byte `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (m *UndeleteRoleRequest) Reset() { *m = UndeleteRoleRequest{} } +func (m *UndeleteRoleRequest) String() string { return proto.CompactTextString(m) } +func (*UndeleteRoleRequest) ProtoMessage() {} +func (*UndeleteRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *UndeleteRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UndeleteRoleRequest) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +// A permission which can be included by a role. +type Permission struct { + // The name of this Permission. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The title of this Permission. + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + // A brief description of what this Permission is used for. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // This permission can ONLY be used in predefined roles. + OnlyInPredefinedRoles bool `protobuf:"varint,4,opt,name=only_in_predefined_roles,json=onlyInPredefinedRoles" json:"only_in_predefined_roles,omitempty"` + // The current launch stage of the permission. + Stage Permission_PermissionLaunchStage `protobuf:"varint,5,opt,name=stage,enum=google.iam.admin.v1.Permission_PermissionLaunchStage" json:"stage,omitempty"` + // The current custom role support level. + CustomRolesSupportLevel Permission_CustomRolesSupportLevel `protobuf:"varint,6,opt,name=custom_roles_support_level,json=customRolesSupportLevel,enum=google.iam.admin.v1.Permission_CustomRolesSupportLevel" json:"custom_roles_support_level,omitempty"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *Permission) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Permission) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Permission) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Permission) GetOnlyInPredefinedRoles() bool { + if m != nil { + return m.OnlyInPredefinedRoles + } + return false +} + +func (m *Permission) GetStage() Permission_PermissionLaunchStage { + if m != nil { + return m.Stage + } + return Permission_ALPHA +} + +func (m *Permission) GetCustomRolesSupportLevel() Permission_CustomRolesSupportLevel { + if m != nil { + return m.CustomRolesSupportLevel + } + return Permission_SUPPORTED +} + +// A request to get permissions which can be tested on a resource. +type QueryTestablePermissionsRequest struct { + // Required. The full resource name to query from the list of testable + // permissions. + // + // The name follows the Google Cloud Platform resource format. + // For example, a Cloud Platform project with id `my-project` will be named + // `//cloudresourcemanager.googleapis.com/projects/my-project`. + FullResourceName string `protobuf:"bytes,1,opt,name=full_resource_name,json=fullResourceName" json:"full_resource_name,omitempty"` + // Optional limit on the number of permissions to include in the response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional pagination token returned in an earlier + // QueryTestablePermissionsRequest. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *QueryTestablePermissionsRequest) Reset() { *m = QueryTestablePermissionsRequest{} } +func (m *QueryTestablePermissionsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTestablePermissionsRequest) ProtoMessage() {} +func (*QueryTestablePermissionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{27} +} + +func (m *QueryTestablePermissionsRequest) GetFullResourceName() string { + if m != nil { + return m.FullResourceName + } + return "" +} + +func (m *QueryTestablePermissionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *QueryTestablePermissionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response containing permissions which can be tested on a resource. +type QueryTestablePermissionsResponse struct { + // The Permissions testable on the requested resource. + Permissions []*Permission `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"` + // To retrieve the next page of results, set + // `QueryTestableRolesRequest.page_token` to this value. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *QueryTestablePermissionsResponse) Reset() { *m = QueryTestablePermissionsResponse{} } +func (m *QueryTestablePermissionsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTestablePermissionsResponse) ProtoMessage() {} +func (*QueryTestablePermissionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{28} +} + +func (m *QueryTestablePermissionsResponse) GetPermissions() []*Permission { + if m != nil { + return m.Permissions + } + return nil +} + +func (m *QueryTestablePermissionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*ServiceAccount)(nil), "google.iam.admin.v1.ServiceAccount") + proto.RegisterType((*CreateServiceAccountRequest)(nil), "google.iam.admin.v1.CreateServiceAccountRequest") + proto.RegisterType((*ListServiceAccountsRequest)(nil), "google.iam.admin.v1.ListServiceAccountsRequest") + proto.RegisterType((*ListServiceAccountsResponse)(nil), "google.iam.admin.v1.ListServiceAccountsResponse") + proto.RegisterType((*GetServiceAccountRequest)(nil), "google.iam.admin.v1.GetServiceAccountRequest") + proto.RegisterType((*DeleteServiceAccountRequest)(nil), "google.iam.admin.v1.DeleteServiceAccountRequest") + proto.RegisterType((*ListServiceAccountKeysRequest)(nil), "google.iam.admin.v1.ListServiceAccountKeysRequest") + proto.RegisterType((*ListServiceAccountKeysResponse)(nil), "google.iam.admin.v1.ListServiceAccountKeysResponse") + proto.RegisterType((*GetServiceAccountKeyRequest)(nil), "google.iam.admin.v1.GetServiceAccountKeyRequest") + proto.RegisterType((*ServiceAccountKey)(nil), "google.iam.admin.v1.ServiceAccountKey") + proto.RegisterType((*CreateServiceAccountKeyRequest)(nil), "google.iam.admin.v1.CreateServiceAccountKeyRequest") + proto.RegisterType((*DeleteServiceAccountKeyRequest)(nil), "google.iam.admin.v1.DeleteServiceAccountKeyRequest") + proto.RegisterType((*SignBlobRequest)(nil), "google.iam.admin.v1.SignBlobRequest") + proto.RegisterType((*SignBlobResponse)(nil), "google.iam.admin.v1.SignBlobResponse") + proto.RegisterType((*SignJwtRequest)(nil), "google.iam.admin.v1.SignJwtRequest") + proto.RegisterType((*SignJwtResponse)(nil), "google.iam.admin.v1.SignJwtResponse") + proto.RegisterType((*Role)(nil), "google.iam.admin.v1.Role") + proto.RegisterType((*QueryGrantableRolesRequest)(nil), "google.iam.admin.v1.QueryGrantableRolesRequest") + proto.RegisterType((*QueryGrantableRolesResponse)(nil), "google.iam.admin.v1.QueryGrantableRolesResponse") + proto.RegisterType((*ListRolesRequest)(nil), "google.iam.admin.v1.ListRolesRequest") + proto.RegisterType((*ListRolesResponse)(nil), "google.iam.admin.v1.ListRolesResponse") + proto.RegisterType((*GetRoleRequest)(nil), "google.iam.admin.v1.GetRoleRequest") + proto.RegisterType((*CreateRoleRequest)(nil), "google.iam.admin.v1.CreateRoleRequest") + proto.RegisterType((*UpdateRoleRequest)(nil), "google.iam.admin.v1.UpdateRoleRequest") + proto.RegisterType((*DeleteRoleRequest)(nil), "google.iam.admin.v1.DeleteRoleRequest") + proto.RegisterType((*UndeleteRoleRequest)(nil), "google.iam.admin.v1.UndeleteRoleRequest") + proto.RegisterType((*Permission)(nil), "google.iam.admin.v1.Permission") + proto.RegisterType((*QueryTestablePermissionsRequest)(nil), "google.iam.admin.v1.QueryTestablePermissionsRequest") + proto.RegisterType((*QueryTestablePermissionsResponse)(nil), "google.iam.admin.v1.QueryTestablePermissionsResponse") + proto.RegisterEnum("google.iam.admin.v1.ServiceAccountKeyAlgorithm", ServiceAccountKeyAlgorithm_name, ServiceAccountKeyAlgorithm_value) + proto.RegisterEnum("google.iam.admin.v1.ServiceAccountPrivateKeyType", ServiceAccountPrivateKeyType_name, ServiceAccountPrivateKeyType_value) + proto.RegisterEnum("google.iam.admin.v1.ServiceAccountPublicKeyType", ServiceAccountPublicKeyType_name, ServiceAccountPublicKeyType_value) + proto.RegisterEnum("google.iam.admin.v1.RoleView", RoleView_name, RoleView_value) + proto.RegisterEnum("google.iam.admin.v1.ListServiceAccountKeysRequest_KeyType", ListServiceAccountKeysRequest_KeyType_name, ListServiceAccountKeysRequest_KeyType_value) + proto.RegisterEnum("google.iam.admin.v1.Role_RoleLaunchStage", Role_RoleLaunchStage_name, Role_RoleLaunchStage_value) + proto.RegisterEnum("google.iam.admin.v1.Permission_PermissionLaunchStage", Permission_PermissionLaunchStage_name, Permission_PermissionLaunchStage_value) + proto.RegisterEnum("google.iam.admin.v1.Permission_CustomRolesSupportLevel", Permission_CustomRolesSupportLevel_name, Permission_CustomRolesSupportLevel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for IAM service + +type IAMClient interface { + // Lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. + ListServiceAccounts(ctx context.Context, in *ListServiceAccountsRequest, opts ...grpc.CallOption) (*ListServiceAccountsResponse, error) + // Gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] + // and returns it. + CreateServiceAccount(ctx context.Context, in *CreateServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + // + // Currently, only the following fields are updatable: + // `display_name` . + // The `etag` is mandatory. + UpdateServiceAccount(ctx context.Context, in *ServiceAccount, opts ...grpc.CallOption) (*ServiceAccount, error) + // Deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + DeleteServiceAccount(ctx context.Context, in *DeleteServiceAccountRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. + ListServiceAccountKeys(ctx context.Context, in *ListServiceAccountKeysRequest, opts ...grpc.CallOption) (*ListServiceAccountKeysResponse, error) + // Gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] + // by key id. + GetServiceAccountKey(ctx context.Context, in *GetServiceAccountKeyRequest, opts ...grpc.CallOption) (*ServiceAccountKey, error) + // Creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] + // and returns it. + CreateServiceAccountKey(ctx context.Context, in *CreateServiceAccountKeyRequest, opts ...grpc.CallOption) (*ServiceAccountKey, error) + // Deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. + DeleteServiceAccountKey(ctx context.Context, in *DeleteServiceAccountKeyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Signs a blob using a service account's system-managed private key. + SignBlob(ctx context.Context, in *SignBlobRequest, opts ...grpc.CallOption) (*SignBlobResponse, error) + // Signs a JWT using a service account's system-managed private key. + // + // If no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an + // an expiry time of one hour by default. If you request an expiry time of + // more than one hour, the request will fail. + SignJwt(ctx context.Context, in *SignJwtRequest, opts ...grpc.CallOption) (*SignJwtResponse, error) + // Returns the IAM access control policy for a + // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Sets the IAM access control policy for a + // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Tests the specified permissions against the IAM access control policy + // for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) + // Queries roles that can be granted on a particular resource. + // A role is grantable if it can be used as the role in a binding for a policy + // for that resource. + QueryGrantableRoles(ctx context.Context, in *QueryGrantableRolesRequest, opts ...grpc.CallOption) (*QueryGrantableRolesResponse, error) + // Lists the Roles defined on a resource. + ListRoles(ctx context.Context, in *ListRolesRequest, opts ...grpc.CallOption) (*ListRolesResponse, error) + // Gets a Role definition. + GetRole(ctx context.Context, in *GetRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Creates a new Role. + CreateRole(ctx context.Context, in *CreateRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Updates a Role definition. + UpdateRole(ctx context.Context, in *UpdateRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Soft deletes a role. The role is suspended and cannot be used to create new + // IAM Policy Bindings. + // The Role will not be included in `ListRoles()` unless `show_deleted` is set + // in the `ListRolesRequest`. The Role contains the deleted boolean set. + // Existing Bindings remains, but are inactive. The Role can be undeleted + // within 7 days. After 7 days the Role is deleted and all Bindings associated + // with the role are removed. + DeleteRole(ctx context.Context, in *DeleteRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Undelete a Role, bringing it back in its previous state. + UndeleteRole(ctx context.Context, in *UndeleteRoleRequest, opts ...grpc.CallOption) (*Role, error) + // Lists the permissions testable on a resource. + // A permission is testable if it can be tested for an identity on a resource. + QueryTestablePermissions(ctx context.Context, in *QueryTestablePermissionsRequest, opts ...grpc.CallOption) (*QueryTestablePermissionsResponse, error) +} + +type iAMClient struct { + cc *grpc.ClientConn +} + +func NewIAMClient(cc *grpc.ClientConn) IAMClient { + return &iAMClient{cc} +} + +func (c *iAMClient) ListServiceAccounts(ctx context.Context, in *ListServiceAccountsRequest, opts ...grpc.CallOption) (*ListServiceAccountsResponse, error) { + out := new(ListServiceAccountsResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/ListServiceAccounts", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/GetServiceAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) CreateServiceAccount(ctx context.Context, in *CreateServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/CreateServiceAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) UpdateServiceAccount(ctx context.Context, in *ServiceAccount, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/UpdateServiceAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) DeleteServiceAccount(ctx context.Context, in *DeleteServiceAccountRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/DeleteServiceAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) ListServiceAccountKeys(ctx context.Context, in *ListServiceAccountKeysRequest, opts ...grpc.CallOption) (*ListServiceAccountKeysResponse, error) { + out := new(ListServiceAccountKeysResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/ListServiceAccountKeys", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) GetServiceAccountKey(ctx context.Context, in *GetServiceAccountKeyRequest, opts ...grpc.CallOption) (*ServiceAccountKey, error) { + out := new(ServiceAccountKey) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/GetServiceAccountKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) CreateServiceAccountKey(ctx context.Context, in *CreateServiceAccountKeyRequest, opts ...grpc.CallOption) (*ServiceAccountKey, error) { + out := new(ServiceAccountKey) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/CreateServiceAccountKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) DeleteServiceAccountKey(ctx context.Context, in *DeleteServiceAccountKeyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/DeleteServiceAccountKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) SignBlob(ctx context.Context, in *SignBlobRequest, opts ...grpc.CallOption) (*SignBlobResponse, error) { + out := new(SignBlobResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/SignBlob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) SignJwt(ctx context.Context, in *SignJwtRequest, opts ...grpc.CallOption) (*SignJwtResponse, error) { + out := new(SignJwtResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/SignJwt", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) QueryGrantableRoles(ctx context.Context, in *QueryGrantableRolesRequest, opts ...grpc.CallOption) (*QueryGrantableRolesResponse, error) { + out := new(QueryGrantableRolesResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/QueryGrantableRoles", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) ListRoles(ctx context.Context, in *ListRolesRequest, opts ...grpc.CallOption) (*ListRolesResponse, error) { + out := new(ListRolesResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/ListRoles", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) GetRole(ctx context.Context, in *GetRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/GetRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) CreateRole(ctx context.Context, in *CreateRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/CreateRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) UpdateRole(ctx context.Context, in *UpdateRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/UpdateRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) DeleteRole(ctx context.Context, in *DeleteRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/DeleteRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) UndeleteRole(ctx context.Context, in *UndeleteRoleRequest, opts ...grpc.CallOption) (*Role, error) { + out := new(Role) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/UndeleteRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMClient) QueryTestablePermissions(ctx context.Context, in *QueryTestablePermissionsRequest, opts ...grpc.CallOption) (*QueryTestablePermissionsResponse, error) { + out := new(QueryTestablePermissionsResponse) + err := grpc.Invoke(ctx, "/google.iam.admin.v1.IAM/QueryTestablePermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for IAM service + +type IAMServer interface { + // Lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. + ListServiceAccounts(context.Context, *ListServiceAccountsRequest) (*ListServiceAccountsResponse, error) + // Gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) + // Creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] + // and returns it. + CreateServiceAccount(context.Context, *CreateServiceAccountRequest) (*ServiceAccount, error) + // Updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + // + // Currently, only the following fields are updatable: + // `display_name` . + // The `etag` is mandatory. + UpdateServiceAccount(context.Context, *ServiceAccount) (*ServiceAccount, error) + // Deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + DeleteServiceAccount(context.Context, *DeleteServiceAccountRequest) (*google_protobuf1.Empty, error) + // Lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. + ListServiceAccountKeys(context.Context, *ListServiceAccountKeysRequest) (*ListServiceAccountKeysResponse, error) + // Gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] + // by key id. + GetServiceAccountKey(context.Context, *GetServiceAccountKeyRequest) (*ServiceAccountKey, error) + // Creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] + // and returns it. + CreateServiceAccountKey(context.Context, *CreateServiceAccountKeyRequest) (*ServiceAccountKey, error) + // Deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. + DeleteServiceAccountKey(context.Context, *DeleteServiceAccountKeyRequest) (*google_protobuf1.Empty, error) + // Signs a blob using a service account's system-managed private key. + SignBlob(context.Context, *SignBlobRequest) (*SignBlobResponse, error) + // Signs a JWT using a service account's system-managed private key. + // + // If no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an + // an expiry time of one hour by default. If you request an expiry time of + // more than one hour, the request will fail. + SignJwt(context.Context, *SignJwtRequest) (*SignJwtResponse, error) + // Returns the IAM access control policy for a + // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Sets the IAM access control policy for a + // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Tests the specified permissions against the IAM access control policy + // for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) + // Queries roles that can be granted on a particular resource. + // A role is grantable if it can be used as the role in a binding for a policy + // for that resource. + QueryGrantableRoles(context.Context, *QueryGrantableRolesRequest) (*QueryGrantableRolesResponse, error) + // Lists the Roles defined on a resource. + ListRoles(context.Context, *ListRolesRequest) (*ListRolesResponse, error) + // Gets a Role definition. + GetRole(context.Context, *GetRoleRequest) (*Role, error) + // Creates a new Role. + CreateRole(context.Context, *CreateRoleRequest) (*Role, error) + // Updates a Role definition. + UpdateRole(context.Context, *UpdateRoleRequest) (*Role, error) + // Soft deletes a role. The role is suspended and cannot be used to create new + // IAM Policy Bindings. + // The Role will not be included in `ListRoles()` unless `show_deleted` is set + // in the `ListRolesRequest`. The Role contains the deleted boolean set. + // Existing Bindings remains, but are inactive. The Role can be undeleted + // within 7 days. After 7 days the Role is deleted and all Bindings associated + // with the role are removed. + DeleteRole(context.Context, *DeleteRoleRequest) (*Role, error) + // Undelete a Role, bringing it back in its previous state. + UndeleteRole(context.Context, *UndeleteRoleRequest) (*Role, error) + // Lists the permissions testable on a resource. + // A permission is testable if it can be tested for an identity on a resource. + QueryTestablePermissions(context.Context, *QueryTestablePermissionsRequest) (*QueryTestablePermissionsResponse, error) +} + +func RegisterIAMServer(s *grpc.Server, srv IAMServer) { + s.RegisterService(&_IAM_serviceDesc, srv) +} + +func _IAM_ListServiceAccounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).ListServiceAccounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/ListServiceAccounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).ListServiceAccounts(ctx, req.(*ListServiceAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).GetServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/GetServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_CreateServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).CreateServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/CreateServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).CreateServiceAccount(ctx, req.(*CreateServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_UpdateServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ServiceAccount) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).UpdateServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/UpdateServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).UpdateServiceAccount(ctx, req.(*ServiceAccount)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_DeleteServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).DeleteServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/DeleteServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).DeleteServiceAccount(ctx, req.(*DeleteServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_ListServiceAccountKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceAccountKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).ListServiceAccountKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/ListServiceAccountKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).ListServiceAccountKeys(ctx, req.(*ListServiceAccountKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_GetServiceAccountKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).GetServiceAccountKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/GetServiceAccountKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).GetServiceAccountKey(ctx, req.(*GetServiceAccountKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_CreateServiceAccountKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceAccountKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).CreateServiceAccountKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/CreateServiceAccountKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).CreateServiceAccountKey(ctx, req.(*CreateServiceAccountKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_DeleteServiceAccountKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceAccountKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).DeleteServiceAccountKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/DeleteServiceAccountKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).DeleteServiceAccountKey(ctx, req.(*DeleteServiceAccountKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_SignBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).SignBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/SignBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).SignBlob(ctx, req.(*SignBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_SignJwt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignJwtRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).SignJwt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/SignJwt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).SignJwt(ctx, req.(*SignJwtRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_QueryGrantableRoles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGrantableRolesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).QueryGrantableRoles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/QueryGrantableRoles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).QueryGrantableRoles(ctx, req.(*QueryGrantableRolesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_ListRoles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRolesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).ListRoles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/ListRoles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).ListRoles(ctx, req.(*ListRolesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_GetRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).GetRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/GetRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).GetRole(ctx, req.(*GetRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_CreateRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).CreateRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/CreateRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).CreateRole(ctx, req.(*CreateRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_UpdateRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).UpdateRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/UpdateRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).UpdateRole(ctx, req.(*UpdateRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_DeleteRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).DeleteRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/DeleteRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).DeleteRole(ctx, req.(*DeleteRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_UndeleteRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UndeleteRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).UndeleteRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/UndeleteRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).UndeleteRole(ctx, req.(*UndeleteRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAM_QueryTestablePermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTestablePermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMServer).QueryTestablePermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.admin.v1.IAM/QueryTestablePermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMServer).QueryTestablePermissions(ctx, req.(*QueryTestablePermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IAM_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.iam.admin.v1.IAM", + HandlerType: (*IAMServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListServiceAccounts", + Handler: _IAM_ListServiceAccounts_Handler, + }, + { + MethodName: "GetServiceAccount", + Handler: _IAM_GetServiceAccount_Handler, + }, + { + MethodName: "CreateServiceAccount", + Handler: _IAM_CreateServiceAccount_Handler, + }, + { + MethodName: "UpdateServiceAccount", + Handler: _IAM_UpdateServiceAccount_Handler, + }, + { + MethodName: "DeleteServiceAccount", + Handler: _IAM_DeleteServiceAccount_Handler, + }, + { + MethodName: "ListServiceAccountKeys", + Handler: _IAM_ListServiceAccountKeys_Handler, + }, + { + MethodName: "GetServiceAccountKey", + Handler: _IAM_GetServiceAccountKey_Handler, + }, + { + MethodName: "CreateServiceAccountKey", + Handler: _IAM_CreateServiceAccountKey_Handler, + }, + { + MethodName: "DeleteServiceAccountKey", + Handler: _IAM_DeleteServiceAccountKey_Handler, + }, + { + MethodName: "SignBlob", + Handler: _IAM_SignBlob_Handler, + }, + { + MethodName: "SignJwt", + Handler: _IAM_SignJwt_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _IAM_GetIamPolicy_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _IAM_SetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _IAM_TestIamPermissions_Handler, + }, + { + MethodName: "QueryGrantableRoles", + Handler: _IAM_QueryGrantableRoles_Handler, + }, + { + MethodName: "ListRoles", + Handler: _IAM_ListRoles_Handler, + }, + { + MethodName: "GetRole", + Handler: _IAM_GetRole_Handler, + }, + { + MethodName: "CreateRole", + Handler: _IAM_CreateRole_Handler, + }, + { + MethodName: "UpdateRole", + Handler: _IAM_UpdateRole_Handler, + }, + { + MethodName: "DeleteRole", + Handler: _IAM_DeleteRole_Handler, + }, + { + MethodName: "UndeleteRole", + Handler: _IAM_UndeleteRole_Handler, + }, + { + MethodName: "QueryTestablePermissions", + Handler: _IAM_QueryTestablePermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/iam/admin/v1/iam.proto", +} + +func init() { proto.RegisterFile("google/iam/admin/v1/iam.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5a, 0x4f, 0x73, 0xdb, 0xc6, + 0x15, 0x37, 0x28, 0xea, 0x0f, 0x9f, 0x24, 0x0a, 0x5a, 0xc9, 0x16, 0x4b, 0x59, 0xb6, 0xb2, 0xb5, + 0x1d, 0x99, 0xb5, 0x45, 0x89, 0x96, 0x6b, 0x57, 0x1e, 0x27, 0xa5, 0x44, 0x9a, 0xa1, 0x45, 0xcb, + 0x2c, 0x48, 0x35, 0x71, 0xff, 0x0c, 0x06, 0x22, 0x56, 0x34, 0x22, 0x10, 0x80, 0x01, 0x50, 0x2a, + 0x9d, 0x49, 0x67, 0xda, 0x43, 0x2f, 0x99, 0x76, 0xda, 0x49, 0x0e, 0x39, 0xa5, 0x33, 0xbd, 0xb4, + 0xb7, 0x5e, 0x3a, 0xd3, 0x69, 0x27, 0xdf, 0xa0, 0xc7, 0x1e, 0xfa, 0x05, 0x32, 0xd3, 0xaf, 0xd0, + 0x63, 0x67, 0x17, 0x80, 0x08, 0x92, 0x00, 0x04, 0x39, 0x69, 0x2f, 0x1a, 0xec, 0xfb, 0xfb, 0xdb, + 0xb7, 0xbb, 0x6f, 0xdf, 0x3e, 0x0a, 0x56, 0xda, 0xba, 0xde, 0x56, 0x49, 0x5e, 0x91, 0x3a, 0x79, + 0x49, 0xee, 0x28, 0x5a, 0xfe, 0x64, 0x93, 0x0e, 0xd6, 0x0d, 0x53, 0xb7, 0x75, 0xb4, 0xe0, 0xb0, + 0xd7, 0x29, 0x85, 0xb1, 0xd7, 0x4f, 0x36, 0xb3, 0x57, 0x5d, 0x1d, 0xc9, 0x50, 0xf2, 0x92, 0xa6, + 0xe9, 0xb6, 0x64, 0x2b, 0xba, 0x66, 0x39, 0x2a, 0xd9, 0x6b, 0x3e, 0x8b, 0x8e, 0x2d, 0xd1, 0xd0, + 0x55, 0xa5, 0xd5, 0x73, 0xf9, 0xd9, 0x41, 0xfe, 0x00, 0x6f, 0xd9, 0xe5, 0xb1, 0xd1, 0x61, 0xf7, + 0x28, 0x4f, 0x3a, 0x86, 0xed, 0x31, 0x57, 0x87, 0x99, 0x47, 0x0a, 0x51, 0x65, 0xb1, 0x23, 0x59, + 0xc7, 0xae, 0xc4, 0xf5, 0x61, 0x09, 0x5b, 0xe9, 0x10, 0xcb, 0x96, 0x3a, 0x86, 0x23, 0x80, 0xff, + 0xc5, 0x41, 0xba, 0x41, 0xcc, 0x13, 0xa5, 0x45, 0x8a, 0xad, 0x96, 0xde, 0xd5, 0x6c, 0x84, 0x20, + 0xa9, 0x49, 0x1d, 0x92, 0xe1, 0x56, 0xb9, 0xb5, 0x94, 0xc0, 0xbe, 0xd1, 0x0a, 0x80, 0x61, 0xea, + 0x1f, 0x92, 0x96, 0x2d, 0x2a, 0x72, 0x26, 0xc1, 0x38, 0x29, 0x97, 0x52, 0x95, 0xd1, 0x32, 0xa4, + 0xba, 0x9a, 0xf2, 0xaa, 0x4b, 0x28, 0x37, 0xc9, 0xb8, 0x53, 0x0e, 0xa1, 0x2a, 0xa3, 0x45, 0x18, + 0x27, 0x1d, 0x49, 0x51, 0x33, 0xe3, 0x8c, 0xe1, 0x0c, 0xd0, 0x5b, 0x30, 0x23, 0x2b, 0x96, 0xa1, + 0x4a, 0x3d, 0x91, 0x79, 0x9b, 0x60, 0xcc, 0x69, 0x97, 0xb6, 0x4f, 0x9d, 0x22, 0x48, 0x12, 0x5b, + 0x6a, 0x67, 0x26, 0x57, 0xb9, 0xb5, 0x19, 0x81, 0x7d, 0xa3, 0x35, 0xe0, 0x75, 0xa9, 0x6b, 0xbf, + 0x2c, 0x88, 0x2d, 0x55, 0x21, 0x1a, 0x83, 0x93, 0x62, 0xaa, 0x69, 0x87, 0xbe, 0xcb, 0xc8, 0x55, + 0x19, 0x7f, 0xc1, 0xc1, 0xf2, 0xae, 0x49, 0x24, 0x9b, 0x0c, 0xce, 0x4f, 0x20, 0xaf, 0xba, 0xc4, + 0x0a, 0x9d, 0xa6, 0xe4, 0x48, 0xf9, 0xa6, 0xe9, 0x52, 0xaa, 0x32, 0xaa, 0xc1, 0x9c, 0xe5, 0xd8, + 0x12, 0x5d, 0x62, 0x66, 0x6c, 0x95, 0x5b, 0x9b, 0x2e, 0x7c, 0x7b, 0x3d, 0x60, 0x57, 0xac, 0x0f, + 0xf9, 0x4d, 0x5b, 0x03, 0x63, 0xac, 0x42, 0xb6, 0xa6, 0x58, 0xf6, 0xa0, 0x94, 0x15, 0x05, 0x6f, + 0x19, 0x52, 0x86, 0xd4, 0x26, 0xa2, 0xa5, 0xbc, 0x26, 0x0c, 0xdd, 0xb8, 0x30, 0x45, 0x09, 0x0d, + 0xe5, 0xb5, 0xb3, 0x44, 0x94, 0x69, 0xeb, 0xc7, 0x44, 0x63, 0xb8, 0xe8, 0x12, 0x49, 0x6d, 0xd2, + 0xa4, 0x04, 0xfc, 0x2b, 0x0e, 0x96, 0x03, 0xdd, 0x59, 0x86, 0xae, 0x59, 0x04, 0xbd, 0x0b, 0x53, + 0xee, 0x9c, 0xac, 0x0c, 0xb7, 0x3a, 0x16, 0x77, 0x52, 0x67, 0x4a, 0xe8, 0x16, 0xcc, 0x69, 0xe4, + 0x67, 0xb6, 0xe8, 0x03, 0xe1, 0x04, 0x70, 0x96, 0x92, 0xeb, 0x67, 0x40, 0xd6, 0x21, 0x53, 0x21, + 0x76, 0xec, 0x35, 0xc1, 0x9b, 0xb0, 0x5c, 0x22, 0x2a, 0xb9, 0xc0, 0x32, 0xd2, 0x4d, 0xbd, 0x32, + 0x3a, 0xd7, 0x3d, 0xd2, 0x8b, 0x8c, 0xee, 0xfb, 0x90, 0x3a, 0x26, 0x3d, 0xd1, 0xee, 0x19, 0xc4, + 0xca, 0x24, 0x56, 0xc7, 0xd6, 0xd2, 0x85, 0xed, 0xc0, 0x10, 0x44, 0x9a, 0x5e, 0xdf, 0x23, 0xbd, + 0x66, 0xcf, 0x20, 0xc2, 0xd4, 0xb1, 0xf3, 0x61, 0xe1, 0x2a, 0x4c, 0xba, 0x44, 0x94, 0x81, 0xc5, + 0xbd, 0xf2, 0x0b, 0xb1, 0xf9, 0xa2, 0x5e, 0x16, 0x0f, 0xf6, 0x1b, 0xf5, 0xf2, 0x6e, 0xf5, 0x49, + 0xb5, 0x5c, 0xe2, 0x2f, 0x21, 0x1e, 0x66, 0x0e, 0x1a, 0x65, 0x41, 0x7c, 0x56, 0xdc, 0x2f, 0x56, + 0xca, 0x25, 0x9e, 0x43, 0x08, 0xd2, 0x8d, 0x17, 0x8d, 0x66, 0xf9, 0xd9, 0x19, 0x2d, 0x81, 0x7f, + 0x02, 0xd7, 0xc2, 0xbc, 0xbb, 0xeb, 0xb8, 0x0d, 0xc9, 0x63, 0xd2, 0xf3, 0xd6, 0xf0, 0x56, 0x8c, + 0x35, 0xdc, 0x23, 0x3d, 0x81, 0xe9, 0xe0, 0x4f, 0x38, 0x58, 0x1e, 0x59, 0x1b, 0xca, 0x8e, 0x88, + 0xda, 0x07, 0x30, 0x67, 0x74, 0x0f, 0x55, 0xa5, 0x25, 0x7a, 0xc1, 0x63, 0xcb, 0x9e, 0x2e, 0x6c, + 0xc4, 0x70, 0x5d, 0x67, 0x9a, 0x5e, 0xc4, 0x66, 0x0d, 0xff, 0x10, 0xff, 0x7d, 0x0c, 0xe6, 0x47, + 0xa0, 0x04, 0x62, 0xf8, 0x31, 0xf0, 0x86, 0xa9, 0x9c, 0x48, 0x36, 0x19, 0x06, 0xb1, 0x19, 0x07, + 0x84, 0xa3, 0xea, 0xa1, 0x48, 0x1b, 0x03, 0x63, 0xd4, 0x84, 0x59, 0x6a, 0x54, 0x52, 0xdb, 0xba, + 0xa9, 0xd8, 0x2f, 0x3b, 0x99, 0x29, 0x66, 0x39, 0x1f, 0x2f, 0xb2, 0x45, 0x4f, 0x4d, 0x98, 0x39, + 0xf6, 0x8d, 0x68, 0x1e, 0xf3, 0x43, 0x96, 0x25, 0x5b, 0x62, 0x67, 0x76, 0xc6, 0xef, 0xbf, 0x24, + 0xd9, 0x12, 0x3d, 0x57, 0xbe, 0x00, 0x33, 0x41, 0x27, 0x21, 0xf6, 0xc3, 0xc5, 0xe4, 0x4a, 0xc0, + 0x9f, 0x48, 0xaa, 0x22, 0x8b, 0xd2, 0x91, 0x4d, 0x4c, 0x91, 0x26, 0x7a, 0x96, 0x8a, 0xa7, 0x0b, + 0x59, 0x0f, 0xaa, 0x77, 0x0b, 0xac, 0x37, 0xbd, 0x5b, 0x40, 0x48, 0x33, 0x9d, 0x22, 0x55, 0xa1, + 0x44, 0xf4, 0x04, 0xe6, 0x1d, 0x2b, 0x87, 0xe4, 0x48, 0x37, 0x89, 0x63, 0x66, 0xfc, 0x5c, 0x33, + 0x73, 0x4c, 0x69, 0x87, 0xe9, 0x50, 0x2a, 0xfe, 0x37, 0x07, 0xd7, 0x82, 0xb2, 0xef, 0x39, 0xbb, + 0xe9, 0xff, 0xbb, 0x92, 0x63, 0xdf, 0xc0, 0x4a, 0xe2, 0x2d, 0xb8, 0x16, 0x94, 0x9f, 0xa2, 0x27, + 0x8a, 0xab, 0x30, 0xd7, 0x50, 0xda, 0xda, 0x8e, 0xaa, 0x1f, 0x46, 0xc5, 0x03, 0xc3, 0xec, 0x61, + 0xcf, 0x26, 0x96, 0x68, 0xeb, 0xa2, 0xa5, 0xb4, 0x9d, 0x94, 0x3a, 0x23, 0x4c, 0x33, 0x62, 0x53, + 0xa7, 0x26, 0x70, 0x05, 0xf8, 0xbe, 0x29, 0x37, 0x0b, 0x5c, 0x86, 0x09, 0x3a, 0x55, 0x45, 0x76, + 0xad, 0x8d, 0x1f, 0x93, 0x5e, 0x55, 0x46, 0x57, 0x21, 0x45, 0xad, 0x48, 0x76, 0xd7, 0x24, 0xae, + 0xa9, 0x3e, 0x01, 0xbf, 0x03, 0x69, 0x6a, 0xe8, 0xe9, 0x69, 0xe4, 0x1d, 0x99, 0x81, 0x49, 0x43, + 0xea, 0xa9, 0xba, 0xe4, 0x5d, 0x90, 0xde, 0x10, 0x57, 0x9c, 0x39, 0x31, 0xfd, 0x68, 0x1c, 0x2b, + 0x00, 0xd4, 0x2d, 0x91, 0xc5, 0x0f, 0x4f, 0x6d, 0xef, 0x9e, 0x75, 0x28, 0x4f, 0x4f, 0x6d, 0xfc, + 0x8f, 0x04, 0x24, 0x05, 0x5d, 0x25, 0x81, 0xfe, 0x17, 0x61, 0xdc, 0x56, 0x6c, 0x95, 0xb8, 0x6a, + 0xce, 0x00, 0xad, 0xc2, 0xb4, 0x4c, 0xac, 0x96, 0xa9, 0x18, 0xb4, 0xf2, 0x72, 0xaf, 0x3f, 0x3f, + 0x09, 0x6d, 0xc2, 0xa2, 0xa2, 0xb5, 0xd4, 0xae, 0x4c, 0x64, 0xd1, 0x20, 0x66, 0x47, 0xb1, 0x2c, + 0x5a, 0xa3, 0x65, 0x26, 0x57, 0xc7, 0xd6, 0x52, 0xc2, 0x82, 0xc7, 0xab, 0xf7, 0x59, 0xe8, 0x5d, + 0x18, 0xb7, 0x6c, 0xa9, 0x4d, 0xdc, 0x23, 0x7f, 0x3b, 0x70, 0xa3, 0x50, 0xa0, 0xec, 0x4f, 0x4d, + 0xea, 0x6a, 0xad, 0x97, 0x0d, 0xaa, 0x20, 0x38, 0x7a, 0x67, 0x15, 0x4c, 0xca, 0x57, 0xc1, 0x64, + 0x60, 0x52, 0x66, 0xfb, 0x45, 0xce, 0x4c, 0xaf, 0x72, 0x6b, 0x53, 0x82, 0x37, 0xc4, 0x07, 0x30, + 0x37, 0x64, 0x07, 0xa5, 0x60, 0xbc, 0x58, 0xab, 0xbf, 0x57, 0xe4, 0x2f, 0xa1, 0x29, 0x48, 0xee, + 0x94, 0x9b, 0x45, 0x9e, 0x43, 0x13, 0x90, 0xa8, 0x14, 0xf9, 0x04, 0x4a, 0x03, 0x94, 0xca, 0x75, + 0xa1, 0xbc, 0x5b, 0x6c, 0x96, 0x4b, 0x7c, 0x12, 0xcd, 0xc0, 0x54, 0xa9, 0xda, 0x28, 0xee, 0xd4, + 0xca, 0x25, 0x7e, 0x1c, 0x4d, 0xc2, 0x58, 0xb9, 0x58, 0xe7, 0x27, 0xf0, 0xdf, 0x38, 0xc8, 0xfe, + 0xa0, 0x4b, 0xcc, 0x5e, 0xc5, 0x94, 0x34, 0x5b, 0x3a, 0x54, 0x09, 0xf5, 0x72, 0x76, 0x15, 0xde, + 0x01, 0x74, 0xd4, 0x55, 0x55, 0xd1, 0x24, 0x96, 0xde, 0x35, 0x5b, 0x44, 0xf4, 0x45, 0x9c, 0xa7, + 0x1c, 0xc1, 0x65, 0xb0, 0x9a, 0x6c, 0x13, 0x92, 0x27, 0x0a, 0x39, 0x75, 0x0f, 0xe5, 0x4a, 0x68, + 0x44, 0x7e, 0xa8, 0x90, 0x53, 0x81, 0x89, 0x0e, 0x56, 0x2d, 0x63, 0x91, 0x55, 0x4b, 0x72, 0xb8, + 0x6a, 0x39, 0x81, 0xe5, 0x40, 0xe8, 0xee, 0xf6, 0xca, 0xc3, 0xb8, 0x49, 0x09, 0xee, 0x6d, 0xf7, + 0xad, 0x50, 0x38, 0x82, 0x23, 0x17, 0xbb, 0x48, 0xf9, 0x92, 0x03, 0x9e, 0x5e, 0xb4, 0x03, 0x91, + 0xba, 0x02, 0x13, 0x86, 0x64, 0x12, 0xcd, 0x76, 0xa3, 0xe3, 0x8e, 0xbe, 0x4e, 0x59, 0x76, 0x16, + 0xcf, 0x64, 0xfc, 0x78, 0xbe, 0x05, 0x33, 0xd6, 0x4b, 0xfd, 0x54, 0xf4, 0x76, 0xd1, 0x04, 0xdb, + 0x45, 0xd3, 0x94, 0x56, 0x72, 0x77, 0x92, 0x0a, 0xf3, 0x3e, 0xf4, 0xff, 0xeb, 0x60, 0xdd, 0x80, + 0x74, 0x85, 0x30, 0x67, 0x51, 0x19, 0xcf, 0x82, 0x79, 0xe7, 0x42, 0xf0, 0x0b, 0x86, 0x85, 0x74, + 0x09, 0x26, 0x29, 0x86, 0x7e, 0x15, 0x3e, 0x41, 0x87, 0x55, 0x19, 0xdd, 0x85, 0x24, 0xfd, 0x72, + 0xeb, 0xee, 0x88, 0x39, 0x30, 0x31, 0xfc, 0x29, 0x07, 0xf3, 0x07, 0x86, 0x3c, 0xe4, 0x35, 0x28, + 0xad, 0x78, 0x86, 0x13, 0xb1, 0x0c, 0xa3, 0x47, 0x30, 0xdd, 0x65, 0x76, 0xd9, 0x6b, 0xcb, 0x85, + 0x33, 0x7a, 0x43, 0x3e, 0xa1, 0x0f, 0xb2, 0x67, 0x92, 0x75, 0x2c, 0x80, 0x23, 0x4e, 0xbf, 0xf1, + 0x23, 0x98, 0x77, 0x56, 0xea, 0x3c, 0x50, 0x5e, 0xfe, 0x48, 0xf4, 0xf3, 0x07, 0x7e, 0x0c, 0x0b, + 0x07, 0x9a, 0xfc, 0xc6, 0xea, 0x5f, 0x8d, 0x01, 0xf4, 0x73, 0xdc, 0x37, 0x9a, 0x61, 0x1f, 0x40, + 0x46, 0xd7, 0xd4, 0x9e, 0xa8, 0x68, 0xa2, 0x61, 0x12, 0x99, 0x1c, 0x29, 0x34, 0xc3, 0x3b, 0x7b, + 0x2e, 0xc9, 0x36, 0xe9, 0x65, 0xca, 0xaf, 0x6a, 0xf5, 0x33, 0x2e, 0xdb, 0xa1, 0x68, 0xcf, 0xcb, + 0xb3, 0xe3, 0xec, 0x14, 0xdc, 0x0f, 0x0c, 0x7e, 0x1f, 0xb4, 0xef, 0x33, 0x20, 0xe7, 0xda, 0x90, + 0x6d, 0x75, 0x2d, 0x5b, 0xef, 0x38, 0x9e, 0x45, 0xab, 0x6b, 0x18, 0xba, 0x69, 0x8b, 0x2a, 0x39, + 0x21, 0x2a, 0x3b, 0x2c, 0xe9, 0xc2, 0x83, 0xf3, 0x3c, 0xec, 0x32, 0x0b, 0x0c, 0x5d, 0xc3, 0xd1, + 0xaf, 0x51, 0x75, 0x61, 0xa9, 0x15, 0xcc, 0xc0, 0x25, 0xb8, 0x1c, 0x88, 0xea, 0x22, 0x19, 0x7c, + 0x0c, 0xbf, 0x07, 0x4b, 0x21, 0x9e, 0xd1, 0x2c, 0xa4, 0x1a, 0x07, 0xf5, 0xfa, 0x73, 0xa1, 0xc9, + 0x9e, 0x0b, 0xd3, 0x30, 0xd9, 0x2c, 0x37, 0x9a, 0xd5, 0xfd, 0x0a, 0xcf, 0xa1, 0x79, 0x98, 0xdd, + 0x7f, 0xde, 0x14, 0xfb, 0xfc, 0x04, 0x2d, 0xe5, 0xaf, 0xb3, 0xcc, 0xd9, 0xa4, 0x15, 0xda, 0xa1, + 0x4a, 0x7c, 0xf7, 0xda, 0x9b, 0x65, 0xfe, 0xaf, 0xf3, 0xf8, 0xfc, 0x35, 0x07, 0xab, 0xe1, 0x68, + 0xdc, 0xfc, 0x54, 0x84, 0x69, 0xff, 0xbd, 0xec, 0x64, 0xa9, 0xeb, 0xe7, 0xac, 0x94, 0xe0, 0xd7, + 0x89, 0x9b, 0xb1, 0x72, 0x12, 0x64, 0xc3, 0xeb, 0x3b, 0xb4, 0x04, 0x0b, 0xf4, 0x91, 0x56, 0xac, + 0x55, 0x86, 0xde, 0x68, 0x8b, 0xc0, 0x7b, 0x0c, 0xa1, 0x51, 0x14, 0x37, 0x37, 0x0a, 0x5b, 0x3c, + 0x37, 0x4c, 0x2d, 0x6c, 0x6c, 0x3d, 0xe4, 0x13, 0x39, 0x15, 0xae, 0x46, 0x15, 0xa7, 0x54, 0x2b, + 0xe0, 0x15, 0xe8, 0x51, 0xeb, 0x7b, 0xbb, 0x8d, 0xcd, 0x82, 0xf8, 0xa4, 0x5a, 0x2b, 0xf3, 0x1c, + 0x5a, 0x85, 0xab, 0x8c, 0x5a, 0x79, 0xfe, 0xbc, 0x52, 0x2b, 0x8b, 0xbb, 0x42, 0xb9, 0x54, 0xde, + 0x6f, 0x56, 0x8b, 0xb5, 0x86, 0x23, 0x91, 0xc8, 0xfd, 0x14, 0x96, 0x23, 0x5e, 0x56, 0x74, 0xf3, + 0x30, 0x03, 0xfb, 0xcf, 0xf7, 0xcb, 0xfc, 0x25, 0x74, 0x05, 0x10, 0x1b, 0x7e, 0x70, 0x7f, 0xe3, + 0x7b, 0x62, 0xbd, 0xfc, 0xcc, 0xf3, 0xb3, 0x04, 0x0b, 0x8c, 0x2e, 0x14, 0xdf, 0x17, 0xeb, 0x07, + 0x3b, 0xb5, 0xea, 0xae, 0xb8, 0x57, 0x7e, 0xc1, 0x27, 0x72, 0xd7, 0x61, 0xca, 0xbb, 0x84, 0xe8, + 0x86, 0xde, 0x29, 0x36, 0xaa, 0xbb, 0xce, 0x86, 0x7e, 0x72, 0x50, 0xab, 0xf1, 0x5c, 0xe1, 0xf3, + 0x2c, 0x8c, 0x55, 0x8b, 0xcf, 0xd0, 0x1f, 0x39, 0x58, 0x08, 0xe8, 0x32, 0xa0, 0x7c, 0xcc, 0x87, + 0xb4, 0xb7, 0x37, 0xb3, 0x1b, 0xf1, 0x15, 0x9c, 0xed, 0x83, 0xef, 0xfe, 0xf2, 0x9f, 0x5f, 0x7d, + 0x9a, 0x78, 0x1b, 0xdd, 0xcc, 0x9f, 0x6c, 0xe6, 0x3f, 0xa2, 0x5b, 0xf9, 0xb1, 0xdb, 0xa0, 0xb2, + 0xf2, 0xb9, 0x8f, 0xf3, 0xd6, 0x10, 0xa2, 0xcf, 0x39, 0x98, 0x1f, 0x79, 0xeb, 0xa2, 0xbb, 0x81, + 0x6e, 0xc3, 0xfa, 0x15, 0xd9, 0x38, 0x2d, 0x12, 0x9c, 0x67, 0xc0, 0x6e, 0xa3, 0xb7, 0x83, 0x80, + 0x0d, 0xe3, 0xca, 0xe7, 0x3e, 0x46, 0xbf, 0xe7, 0x60, 0x31, 0xe8, 0xed, 0x84, 0x82, 0x83, 0x12, + 0xd1, 0xe4, 0x8a, 0x07, 0x70, 0x83, 0x01, 0xcc, 0xe1, 0x78, 0x91, 0xdb, 0xe6, 0x72, 0xe8, 0x33, + 0x0e, 0x16, 0x9d, 0x6b, 0x75, 0x08, 0x61, 0x1c, 0x7f, 0xf1, 0x40, 0x15, 0x18, 0xa8, 0x3b, 0xd9, + 0xb8, 0x51, 0xa3, 0xb0, 0x7e, 0xc7, 0xc1, 0x62, 0xd0, 0x5b, 0x2c, 0x24, 0x70, 0x11, 0x6d, 0xa5, + 0xec, 0x95, 0x91, 0xab, 0xbc, 0xdc, 0x31, 0xec, 0x9e, 0xb7, 0x98, 0xb9, 0xd8, 0x8b, 0xf9, 0x57, + 0x0e, 0xae, 0x04, 0xb7, 0x6c, 0x50, 0xe1, 0xe2, 0xdd, 0xa5, 0xec, 0xbd, 0x0b, 0xe9, 0xb8, 0x47, + 0x63, 0x8b, 0x81, 0x5e, 0x47, 0x77, 0x62, 0x82, 0xce, 0x1f, 0x53, 0x78, 0x7f, 0xe2, 0x60, 0x31, + 0xa8, 0x1b, 0x14, 0x12, 0xcd, 0x88, 0xc6, 0x51, 0x36, 0x66, 0x1b, 0x0a, 0x7f, 0x97, 0x01, 0xdd, + 0x40, 0xeb, 0xf1, 0x80, 0x32, 0x9c, 0x34, 0xc8, 0x7f, 0xe6, 0x60, 0x29, 0xa4, 0xdb, 0x80, 0xee, + 0xc5, 0x3e, 0x34, 0x6f, 0x00, 0xf8, 0x01, 0x03, 0xbc, 0x89, 0x2f, 0x14, 0x59, 0xba, 0x55, 0xbf, + 0xe0, 0x60, 0x29, 0xa4, 0x6d, 0x10, 0x82, 0x38, 0xba, 0xc9, 0x10, 0xba, 0x61, 0xdd, 0x90, 0xe6, + 0x2e, 0x1a, 0xd2, 0xcf, 0x38, 0x98, 0xf2, 0xda, 0x0a, 0xe8, 0x46, 0x70, 0x38, 0x06, 0x1b, 0x18, + 0xd9, 0x9b, 0xe7, 0x48, 0xb9, 0xbb, 0xf1, 0x11, 0x43, 0x74, 0x1f, 0x6f, 0xc4, 0x3d, 0xd9, 0x96, + 0x6b, 0x81, 0xc6, 0xed, 0xb7, 0x1c, 0x4c, 0xba, 0x4d, 0x86, 0xb0, 0x64, 0x33, 0xd0, 0xc2, 0xc8, + 0xde, 0x88, 0x16, 0x72, 0x31, 0x6d, 0x33, 0x4c, 0x5b, 0x38, 0x7f, 0x11, 0x4c, 0x4f, 0x4f, 0x6d, + 0x0a, 0xe9, 0x13, 0x0e, 0x66, 0x2a, 0xc4, 0xae, 0x4a, 0x9d, 0x3a, 0xfb, 0xe5, 0x06, 0x61, 0xbf, + 0x4b, 0xe7, 0x64, 0x9c, 0x31, 0x3d, 0x58, 0x97, 0x87, 0x64, 0x1c, 0x2e, 0xfe, 0x3e, 0xc3, 0xb1, + 0x8d, 0x1f, 0x32, 0x1c, 0x5e, 0x61, 0x76, 0x0e, 0x96, 0xb6, 0xdf, 0xf9, 0x6f, 0x38, 0x98, 0x69, + 0x44, 0xa1, 0x69, 0xc4, 0x47, 0xb3, 0xcb, 0xd0, 0x3c, 0xbe, 0x18, 0x1a, 0xcb, 0x67, 0x9f, 0x86, + 0xe7, 0x2f, 0x1c, 0x20, 0x5a, 0xf6, 0x51, 0xa2, 0xaf, 0x54, 0x5b, 0x1b, 0x72, 0x39, 0x2a, 0xe2, + 0x81, 0xbb, 0x1d, 0x43, 0xd2, 0x5d, 0xc6, 0x2a, 0x03, 0xbc, 0x8b, 0xdf, 0xb9, 0x08, 0x60, 0x7b, + 0xc4, 0x1e, 0x85, 0xfd, 0x07, 0x0e, 0x16, 0x02, 0x5a, 0x0f, 0x21, 0x95, 0x4c, 0x78, 0x7f, 0x25, + 0xa4, 0x92, 0x89, 0xe8, 0x6a, 0xe0, 0x35, 0x36, 0x0b, 0x8c, 0x57, 0xe8, 0x2c, 0xd8, 0x23, 0x66, + 0xfb, 0xd5, 0xa8, 0x38, 0x05, 0xd9, 0x81, 0xd4, 0xd9, 0x3b, 0x1f, 0xdd, 0x0c, 0xbd, 0x1a, 0x06, + 0xf0, 0xdc, 0x3a, 0x4f, 0xcc, 0x45, 0x31, 0xcf, 0x50, 0x4c, 0xa3, 0xd4, 0x19, 0x0a, 0x44, 0x60, + 0xd2, 0x7d, 0xe8, 0x87, 0x9c, 0xbd, 0xc1, 0x36, 0x40, 0x36, 0xfc, 0x15, 0x8d, 0xb3, 0xcc, 0xfa, + 0x22, 0x42, 0xfd, 0x03, 0xc7, 0x7c, 0xd0, 0xd4, 0xf3, 0x73, 0x80, 0x7e, 0xa7, 0x00, 0xdd, 0x8a, + 0xc8, 0xdf, 0x31, 0x9d, 0xb9, 0xa5, 0x21, 0xc6, 0xcc, 0x99, 0xd3, 0x62, 0x78, 0xac, 0x9b, 0x6d, + 0x49, 0x53, 0x5e, 0x3b, 0xbf, 0xd4, 0xd2, 0xd4, 0x6c, 0x7a, 0x51, 0xfd, 0x05, 0x07, 0xd0, 0x6f, + 0x1a, 0x84, 0x00, 0x18, 0xe9, 0x2a, 0x44, 0x01, 0x70, 0x2b, 0xac, 0x02, 0xee, 0xcf, 0x76, 0xc8, + 0x7d, 0xde, 0x9b, 0xfd, 0xb6, 0xd3, 0x5f, 0xf8, 0x08, 0xa0, 0xdf, 0x22, 0x08, 0x81, 0x30, 0xd2, + 0x43, 0x88, 0x82, 0x90, 0x63, 0x10, 0x6e, 0xe4, 0x62, 0x40, 0x60, 0x19, 0xcd, 0xdf, 0x63, 0x18, + 0x3c, 0xac, 0xfd, 0x10, 0x8c, 0xb6, 0x21, 0xa2, 0x10, 0xb8, 0x37, 0x11, 0xfe, 0x4e, 0x8c, 0x20, + 0x74, 0x5d, 0xd3, 0x74, 0x39, 0xbe, 0xe4, 0x20, 0x13, 0xf6, 0x78, 0x44, 0x5b, 0xe1, 0xa7, 0x2b, + 0xfc, 0xe5, 0x9b, 0xbd, 0x7f, 0x41, 0x2d, 0xf7, 0x48, 0xdc, 0x63, 0x33, 0xb8, 0x8b, 0xd7, 0xd8, + 0xcf, 0xf4, 0xbe, 0x7c, 0xf1, 0x2a, 0x44, 0x73, 0x9b, 0xcb, 0xed, 0x1c, 0xc2, 0x52, 0x4b, 0xef, + 0x04, 0x39, 0xdc, 0x99, 0xa2, 0x69, 0x87, 0x5e, 0xd7, 0x75, 0xee, 0x47, 0x0f, 0x5d, 0x81, 0xb6, + 0xae, 0x4a, 0x5a, 0x7b, 0x5d, 0x37, 0xdb, 0xf9, 0x36, 0xd1, 0xd8, 0x65, 0x9e, 0x77, 0x58, 0x92, + 0xa1, 0x58, 0x03, 0xff, 0x95, 0xf0, 0x88, 0x7d, 0xfc, 0x87, 0xe3, 0x0e, 0x27, 0x98, 0xdc, 0xbd, + 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x75, 0xcc, 0x0e, 0xa5, 0xbc, 0x20, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go new file mode 100644 index 000000000..2f481a396 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/v1/iam_policy.proto + +/* +Package iam is a generated protocol buffer package. + +It is generated from these files: + google/iam/v1/iam_policy.proto + google/iam/v1/policy.proto + +It has these top-level messages: + SetIamPolicyRequest + GetIamPolicyRequest + TestIamPermissionsRequest + TestIamPermissionsResponse + Policy + Binding + PolicyDelta + BindingDelta +*/ +package iam + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // REQUIRED: The resource for which the policy is being specified. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` + // REQUIRED: The complete policy to be applied to the `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as Projects) + // might reject them. + Policy *Policy `protobuf:"bytes,2,opt,name=policy" json:"policy,omitempty"` +} + +func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} } +func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*SetIamPolicyRequest) ProtoMessage() {} +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SetIamPolicyRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *SetIamPolicyRequest) GetPolicy() *Policy { + if m != nil { + return m.Policy + } + return nil +} + +// Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { + // REQUIRED: The resource for which the policy is being requested. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} } +func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetIamPolicyRequest) ProtoMessage() {} +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetIamPolicyRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +// Request message for `TestIamPermissions` method. +type TestIamPermissionsRequest struct { + // REQUIRED: The resource for which the policy detail is being requested. + // `resource` is usually specified as a path. For example, a Project + // resource is specified as `projects/{project}`. + Resource string `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` + // The set of permissions to check for the `resource`. Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For more + // information see + // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"` +} + +func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} } +func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) } +func (*TestIamPermissionsRequest) ProtoMessage() {} +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *TestIamPermissionsRequest) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *TestIamPermissionsRequest) GetPermissions() []string { + if m != nil { + return m.Permissions + } + return nil +} + +// Response message for `TestIamPermissions` method. +type TestIamPermissionsResponse struct { + // A subset of `TestPermissionsRequest.permissions` that the caller is + // allowed. + Permissions []string `protobuf:"bytes,1,rep,name=permissions" json:"permissions,omitempty"` +} + +func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} } +func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) } +func (*TestIamPermissionsResponse) ProtoMessage() {} +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *TestIamPermissionsResponse) GetPermissions() []string { + if m != nil { + return m.Permissions + } + return nil +} + +func init() { + proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest") + proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest") + proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest") + proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for IAMPolicy service + +type IAMPolicyClient interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) +} + +type iAMPolicyClient struct { + cc *grpc.ClientConn +} + +func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient { + return &iAMPolicyClient{cc} +} + +func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { + out := new(TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for IAMPolicy service + +type IAMPolicyServer interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) +} + +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + s.RegisterService(&_IAMPolicy_serviceDesc, srv) +} + +func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.iam.v1.IAMPolicy", + HandlerType: (*IAMPolicyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetIamPolicy", + Handler: _IAMPolicy_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _IAMPolicy_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _IAMPolicy_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/iam/v1/iam_policy.proto", +} + +func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 396 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcf, 0x4a, 0xe3, 0x40, + 0x18, 0x67, 0x52, 0x28, 0xdb, 0xe9, 0xee, 0xc2, 0xa6, 0x2c, 0xd4, 0x20, 0x25, 0x8c, 0x1e, 0xd2, + 0x80, 0x13, 0x53, 0x6f, 0x15, 0x05, 0xeb, 0x21, 0xf4, 0x20, 0x94, 0x2a, 0x82, 0x5e, 0x74, 0xac, + 0x43, 0x18, 0x48, 0x32, 0x31, 0x33, 0x2d, 0x88, 0x78, 0xf1, 0x15, 0xf4, 0xe4, 0x23, 0xf8, 0x3a, + 0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x92, 0x89, 0x35, 0x6d, 0xaa, 0x54, 0xf0, 0x54, 0x3a, 0xf3, 0xfb, + 0xf7, 0xfd, 0xbe, 0x0c, 0x6c, 0xf9, 0x9c, 0xfb, 0x01, 0x75, 0x18, 0x09, 0x9d, 0x89, 0x9b, 0xfe, + 0x9c, 0xc5, 0x3c, 0x60, 0xa3, 0x6b, 0x1c, 0x27, 0x5c, 0x72, 0xfd, 0x8f, 0xba, 0xc7, 0x8c, 0x84, + 0x78, 0xe2, 0x1a, 0xab, 0x39, 0x9c, 0xc4, 0xcc, 0x21, 0x51, 0xc4, 0x25, 0x91, 0x8c, 0x47, 0x42, + 0x81, 0x0d, 0x63, 0x56, 0xac, 0x28, 0x84, 0xce, 0x61, 0xe3, 0x90, 0xca, 0x3e, 0x09, 0x07, 0xd9, + 0xe9, 0x90, 0x5e, 0x8d, 0xa9, 0x90, 0xba, 0x01, 0x7f, 0x25, 0x54, 0xf0, 0x71, 0x32, 0xa2, 0x4d, + 0x60, 0x02, 0xab, 0x36, 0x9c, 0xfe, 0xd7, 0x37, 0x60, 0x55, 0x49, 0x34, 0x35, 0x13, 0x58, 0xf5, + 0xce, 0x7f, 0x3c, 0x13, 0x06, 0xe7, 0x4a, 0x39, 0x08, 0xb9, 0xb0, 0xe1, 0x7d, 0xcf, 0x01, 0x9d, + 0xc0, 0x95, 0x23, 0x2a, 0x32, 0x0e, 0x4d, 0x42, 0x26, 0x44, 0x3a, 0xcc, 0x32, 0xd1, 0x4c, 0x58, + 0x8f, 0x3f, 0x18, 0x4d, 0xcd, 0xac, 0x58, 0xb5, 0x61, 0xf1, 0x08, 0xed, 0x42, 0x63, 0x91, 0xb4, + 0x88, 0x79, 0x24, 0x4a, 0x7c, 0x50, 0xe2, 0x77, 0x1e, 0x2a, 0xb0, 0xd6, 0xdf, 0x3b, 0x50, 0xb3, + 0xe8, 0x12, 0xfe, 0x2e, 0xb6, 0xa7, 0xa3, 0xb9, 0x2a, 0x16, 0x54, 0x6b, 0x2c, 0xae, 0x0b, 0xb5, + 0xef, 0x9e, 0x5f, 0xee, 0xb5, 0x35, 0xd4, 0x4a, 0x57, 0x74, 0xf3, 0x3e, 0xd1, 0x8e, 0x6d, 0xdf, + 0x76, 0x45, 0x41, 0xa5, 0x0b, 0xec, 0xd4, 0xd5, 0xfb, 0xca, 0xd5, 0xfb, 0x11, 0x57, 0x7f, 0xce, + 0xf5, 0x11, 0x40, 0xbd, 0x5c, 0x9d, 0x6e, 0xcd, 0x09, 0x7f, 0xba, 0x38, 0xa3, 0xbd, 0x04, 0x52, + 0xed, 0x01, 0x39, 0x59, 0xac, 0x36, 0x5a, 0x2f, 0xc7, 0x92, 0x25, 0x56, 0x17, 0xd8, 0xbd, 0x18, + 0xfe, 0x1b, 0xf1, 0x70, 0xd6, 0xa0, 0xf7, 0x77, 0x9a, 0x7f, 0x90, 0x7e, 0xeb, 0x03, 0x70, 0xba, + 0x99, 0x03, 0x7c, 0x1e, 0x90, 0xc8, 0xc7, 0x3c, 0xf1, 0x1d, 0x9f, 0x46, 0xd9, 0x4b, 0x70, 0xd4, + 0x15, 0x89, 0x99, 0xc8, 0x1f, 0xca, 0x36, 0x23, 0xe1, 0x2b, 0x00, 0x4f, 0x5a, 0xc3, 0x53, 0xac, + 0xfd, 0x80, 0x8f, 0x2f, 0x71, 0x9f, 0x84, 0xf8, 0xd8, 0xbd, 0xa8, 0x66, 0xac, 0xad, 0xb7, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x6c, 0x3a, 0x2b, 0x4d, 0xaa, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/logging/audit_data.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/logging/audit_data.pb.go new file mode 100644 index 000000000..b22765a14 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/logging/audit_data.pb.go @@ -0,0 +1,75 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/v1/logging/audit_data.proto + +/* +Package logging is a generated protocol buffer package. + +It is generated from these files: + google/iam/v1/logging/audit_data.proto + +It has these top-level messages: + AuditData +*/ +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audit log information specific to Cloud IAM. This message is serialized +// as an `Any` type in the `ServiceData` message of an +// `AuditLog` message. +type AuditData struct { + // Policy delta between the original policy and the newly set policy. + PolicyDelta *google_iam_v1.PolicyDelta `protobuf:"bytes,2,opt,name=policy_delta,json=policyDelta" json:"policy_delta,omitempty"` +} + +func (m *AuditData) Reset() { *m = AuditData{} } +func (m *AuditData) String() string { return proto.CompactTextString(m) } +func (*AuditData) ProtoMessage() {} +func (*AuditData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AuditData) GetPolicyDelta() *google_iam_v1.PolicyDelta { + if m != nil { + return m.PolicyDelta + } + return nil +} + +func init() { + proto.RegisterType((*AuditData)(nil), "google.iam.v1.logging.AuditData") +} + +func init() { proto.RegisterFile("google/iam/v1/logging/audit_data.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 236 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4a, 0x04, 0x31, + 0x10, 0x86, 0xd9, 0x2b, 0x04, 0x73, 0x62, 0x71, 0x20, 0x68, 0xb4, 0x10, 0x0b, 0xb1, 0x9a, 0xb0, + 0x5a, 0xaa, 0x85, 0xe7, 0x81, 0x28, 0x16, 0x8b, 0x85, 0x85, 0xcd, 0x31, 0x5e, 0x96, 0x61, 0x20, + 0xc9, 0x84, 0xbb, 0xdc, 0x82, 0x8f, 0xe0, 0xab, 0xf8, 0x94, 0xb2, 0x9b, 0xa0, 0xac, 0x58, 0x85, + 0xf0, 0x7f, 0xff, 0x7c, 0xc3, 0xa8, 0x73, 0x12, 0x21, 0xd7, 0x1a, 0x46, 0x6f, 0xba, 0xda, 0x38, + 0x21, 0xe2, 0x40, 0x06, 0xb7, 0x96, 0xd3, 0xd2, 0x62, 0x42, 0x88, 0x6b, 0x49, 0x32, 0x3b, 0xc8, + 0x1c, 0x30, 0x7a, 0xe8, 0x6a, 0x28, 0x9c, 0x3e, 0x29, 0x75, 0x8c, 0x6c, 0x30, 0x04, 0x49, 0x98, + 0x58, 0xc2, 0x26, 0x97, 0xb4, 0x1e, 0x0f, 0x8f, 0xe2, 0x78, 0xf5, 0x91, 0xb3, 0xb3, 0x27, 0xb5, + 0x7b, 0xd7, 0x4b, 0x16, 0x98, 0x70, 0x76, 0xab, 0xf6, 0x72, 0xb8, 0xb4, 0xad, 0x4b, 0x78, 0x38, + 0x39, 0xad, 0x2e, 0xa6, 0x97, 0x1a, 0xc6, 0xd2, 0x66, 0x40, 0x16, 0x3d, 0xf1, 0x32, 0x8d, 0xbf, + 0x9f, 0xf9, 0x67, 0xa5, 0x8e, 0x56, 0xe2, 0xe1, 0xdf, 0x1d, 0xe7, 0xfb, 0x3f, 0x9e, 0xa6, 0x37, + 0x37, 0xd5, 0xdb, 0x4d, 0x01, 0x49, 0x1c, 0x06, 0x02, 0x59, 0x93, 0xa1, 0x36, 0x0c, 0x7b, 0x99, + 0x1c, 0x61, 0xe4, 0xcd, 0x9f, 0x9b, 0x5c, 0x97, 0xf7, 0x6b, 0x72, 0xfc, 0x90, 0xeb, 0xf7, 0x4e, + 0xb6, 0x16, 0x1e, 0xd1, 0xc3, 0x6b, 0x0d, 0xcf, 0x39, 0x7d, 0xdf, 0x19, 0xc6, 0x5c, 0x7d, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x29, 0xf1, 0xcb, 0x3a, 0x59, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go new file mode 100644 index 000000000..a22ae91be --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -0,0 +1,269 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/iam/v1/policy.proto + +package iam + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The type of action performed on a Binding in a policy. +type BindingDelta_Action int32 + +const ( + // Unspecified. + BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 + // Addition of a Binding. + BindingDelta_ADD BindingDelta_Action = 1 + // Removal of a Binding. + BindingDelta_REMOVE BindingDelta_Action = 2 +) + +var BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", +} +var BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, +} + +func (x BindingDelta_Action) String() string { + return proto.EnumName(BindingDelta_Action_name, int32(x)) +} +func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } + +// Defines an Identity and Access Management (IAM) policy. It is used to +// specify access control policies for Cloud Platform resources. +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of +// `members` to a `role`, where the members can be user accounts, Google groups, +// Google domains, and service accounts. A `role` is a named list of permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam). +type Policy struct { + // Version of the `Policy`. The default version is 0. + Version int32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` + // Associates a list of `members` to a `role`. + // Multiple `bindings` must not be specified for the same `role`. + // `bindings` with no members will result in an error. + Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings" json:"bindings,omitempty"` + // `etag` is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. + // It is strongly suggested that systems make use of the `etag` in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An `etag` is returned in the response to `getIamPolicy`, and + // systems are expected to put that etag in the request to `setIamPolicy` to + // ensure that their change will be applied to the same version of the policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the existing + // policy is overwritten blindly. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (m *Policy) Reset() { *m = Policy{} } +func (m *Policy) String() string { return proto.CompactTextString(m) } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Policy) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Policy) GetBindings() []*Binding { + if m != nil { + return m.Bindings + } + return nil +} + +func (m *Policy) GetEtag() []byte { + if m != nil { + return m.Etag + } + return nil +} + +// Associates `members` with a `role`. +type Binding struct { + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"` + // Specifies the identities requesting access for a Cloud Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // * `domain:{domain}`: A Google Apps domain name that represents all the + // users of that domain. For example, `google.com` or `example.com`. + // + // + Members []string `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *Binding) Reset() { *m = Binding{} } +func (m *Binding) String() string { return proto.CompactTextString(m) } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Binding) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *Binding) GetMembers() []string { + if m != nil { + return m.Members + } + return nil +} + +// The difference delta between two policies. +type PolicyDelta struct { + // The delta for Bindings between two policies. + BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas" json:"binding_deltas,omitempty"` +} + +func (m *PolicyDelta) Reset() { *m = PolicyDelta{} } +func (m *PolicyDelta) String() string { return proto.CompactTextString(m) } +func (*PolicyDelta) ProtoMessage() {} +func (*PolicyDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if m != nil { + return m.BindingDeltas + } + return nil +} + +// One delta entry for Binding. Each individual change (only one member in each +// entry) to a binding will be a separate entry. +type BindingDelta struct { + // The action that was performed on a Binding. + // Required + Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"` + // A single identity requesting access for a Cloud Platform resource. + // Follows the same format of Binding.members. + // Required + Member string `protobuf:"bytes,3,opt,name=member" json:"member,omitempty"` +} + +func (m *BindingDelta) Reset() { *m = BindingDelta{} } +func (m *BindingDelta) String() string { return proto.CompactTextString(m) } +func (*BindingDelta) ProtoMessage() {} +func (*BindingDelta) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *BindingDelta) GetAction() BindingDelta_Action { + if m != nil { + return m.Action + } + return BindingDelta_ACTION_UNSPECIFIED +} + +func (m *BindingDelta) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *BindingDelta) GetMember() string { + if m != nil { + return m.Member + } + return "" +} + +func init() { + proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy") + proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding") + proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta") + proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta") + proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value) +} + +func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 387 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0x8f, 0xd3, 0x30, + 0x10, 0xc5, 0xed, 0x92, 0xd2, 0xd9, 0x0f, 0x15, 0x23, 0x55, 0xd1, 0xc2, 0xa1, 0xca, 0x29, 0x27, + 0x87, 0x16, 0x21, 0x24, 0x38, 0x35, 0x4d, 0x40, 0x39, 0xb0, 0x1b, 0x0c, 0xec, 0x81, 0xcb, 0xca, + 0x69, 0x2d, 0xcb, 0x28, 0xb6, 0xa3, 0x24, 0x54, 0xe2, 0x2f, 0x21, 0xf1, 0xff, 0x38, 0xa2, 0xd8, + 0xee, 0xaa, 0x95, 0x10, 0xb7, 0x79, 0x79, 0xef, 0x65, 0xde, 0xcc, 0x18, 0xae, 0x85, 0x31, 0xa2, + 0xe6, 0x89, 0x64, 0x2a, 0xd9, 0x2f, 0x93, 0xc6, 0xd4, 0x72, 0xfb, 0x93, 0x34, 0xad, 0xe9, 0x0d, + 0xbe, 0x74, 0x1c, 0x91, 0x4c, 0x91, 0xfd, 0xf2, 0xfa, 0x85, 0x97, 0xb2, 0x46, 0x26, 0x4c, 0x6b, + 0xd3, 0xb3, 0x5e, 0x1a, 0xdd, 0x39, 0x71, 0xf4, 0x1d, 0x82, 0xd2, 0x9a, 0x71, 0x08, 0x93, 0x3d, + 0x6f, 0x3b, 0x69, 0x74, 0x88, 0x16, 0x28, 0x7e, 0x4c, 0x0f, 0x10, 0xaf, 0xe0, 0x49, 0x25, 0xf5, + 0x4e, 0x6a, 0xd1, 0x85, 0x67, 0x8b, 0x71, 0x7c, 0xbe, 0x9a, 0x93, 0x93, 0x1e, 0x24, 0x75, 0x34, + 0x7d, 0xd0, 0x61, 0x0c, 0x67, 0xbc, 0x67, 0x22, 0x1c, 0x2f, 0x50, 0x7c, 0x41, 0x6d, 0x1d, 0xbd, + 0x81, 0x89, 0x17, 0x0e, 0x74, 0x6b, 0x6a, 0x6e, 0x3b, 0x4d, 0xa9, 0xad, 0x87, 0x00, 0x8a, 0xab, + 0x8a, 0xb7, 0x5d, 0x38, 0x5a, 0x8c, 0xe3, 0x29, 0x3d, 0xc0, 0xe8, 0x13, 0x9c, 0xbb, 0x90, 0x19, + 0xaf, 0x7b, 0x86, 0x53, 0xb8, 0xf2, 0x7d, 0xee, 0x77, 0xc3, 0x87, 0x2e, 0x44, 0x36, 0xd5, 0xf3, + 0x7f, 0xa7, 0xb2, 0x26, 0x7a, 0x59, 0x1d, 0xa1, 0x2e, 0xfa, 0x8d, 0xe0, 0xe2, 0x98, 0xc7, 0x6f, + 0x21, 0x60, 0xdb, 0xfe, 0x30, 0xfd, 0xd5, 0x2a, 0xfa, 0xcf, 0xcf, 0xc8, 0xda, 0x2a, 0xa9, 0x77, + 0x3c, 0x4c, 0x33, 0x3a, 0x9a, 0x66, 0x0e, 0x81, 0x8b, 0x6f, 0x57, 0x30, 0xa5, 0x1e, 0x45, 0xaf, + 0x21, 0x70, 0x6e, 0x3c, 0x07, 0xbc, 0xde, 0x7c, 0x29, 0x6e, 0x6f, 0xee, 0xbf, 0xde, 0x7c, 0x2e, + 0xf3, 0x4d, 0xf1, 0xbe, 0xc8, 0xb3, 0xd9, 0x23, 0x3c, 0x81, 0xf1, 0x3a, 0xcb, 0x66, 0x08, 0x03, + 0x04, 0x34, 0xff, 0x78, 0x7b, 0x97, 0xcf, 0x46, 0xa9, 0x82, 0xa7, 0x5b, 0xa3, 0x4e, 0x33, 0xa5, + 0x7e, 0x2b, 0xe5, 0x70, 0xc9, 0x12, 0x7d, 0x7b, 0xe9, 0x59, 0x61, 0x6a, 0xa6, 0x05, 0x31, 0xad, + 0x48, 0x04, 0xd7, 0xf6, 0xce, 0x89, 0xa3, 0x58, 0x23, 0x3b, 0xff, 0x66, 0xde, 0x49, 0xa6, 0xfe, + 0x20, 0xf4, 0x6b, 0xf4, 0xec, 0x83, 0x73, 0x6d, 0x6a, 0xf3, 0x63, 0x47, 0x0a, 0xa6, 0xc8, 0xdd, + 0xb2, 0x0a, 0xac, 0xeb, 0xd5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x4a, 0x85, 0x10, 0x68, + 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go new file mode 100644 index 000000000..a19a55760 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/logging/type/http_request.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/logging/type/http_request.proto + +/* +Package ltype is a generated protocol buffer package. + +It is generated from these files: + google/logging/type/http_request.proto + google/logging/type/log_severity.proto + +It has these top-level messages: + HttpRequest +*/ +package ltype + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A common proto for logging HTTP requests. Only contains semantics +// defined by the HTTP specification. Product-specific logging +// information MUST be defined in a separate message. +type HttpRequest struct { + // The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`. + RequestMethod string `protobuf:"bytes,1,opt,name=request_method,json=requestMethod" json:"request_method,omitempty"` + // The scheme (http, https), the host name, the path and the query + // portion of the URL that was requested. + // Example: `"http://example.com/some/info?color=red"`. + RequestUrl string `protobuf:"bytes,2,opt,name=request_url,json=requestUrl" json:"request_url,omitempty"` + // The size of the HTTP request message in bytes, including the request + // headers and the request body. + RequestSize int64 `protobuf:"varint,3,opt,name=request_size,json=requestSize" json:"request_size,omitempty"` + // The response code indicating the status of response. + // Examples: 200, 404. + Status int32 `protobuf:"varint,4,opt,name=status" json:"status,omitempty"` + // The size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 `protobuf:"varint,5,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // The user agent sent by the client. Example: + // `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)"`. + UserAgent string `protobuf:"bytes,6,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` + // The IP address (IPv4 or IPv6) of the client that issued the HTTP + // request. Examples: `"192.168.1.1"`, `"FE80::0202:B3FF:FE1E:8329"`. + RemoteIp string `protobuf:"bytes,7,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + // The IP address (IPv4 or IPv6) of the origin server that the request was + // sent to. + ServerIp string `protobuf:"bytes,13,opt,name=server_ip,json=serverIp" json:"server_ip,omitempty"` + // The referer URL of the request, as defined in + // [HTTP/1.1 Header Field Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + Referer string `protobuf:"bytes,8,opt,name=referer" json:"referer,omitempty"` + // The request processing latency on the server, from the time the request was + // received until the response was sent. + Latency *google_protobuf1.Duration `protobuf:"bytes,14,opt,name=latency" json:"latency,omitempty"` + // Whether or not a cache lookup was attempted. + CacheLookup bool `protobuf:"varint,11,opt,name=cache_lookup,json=cacheLookup" json:"cache_lookup,omitempty"` + // Whether or not an entity was served from cache + // (with or without validation). + CacheHit bool `protobuf:"varint,9,opt,name=cache_hit,json=cacheHit" json:"cache_hit,omitempty"` + // Whether or not the response was validated with the origin server before + // being served from cache. This field is only meaningful if `cache_hit` is + // True. + CacheValidatedWithOriginServer bool `protobuf:"varint,10,opt,name=cache_validated_with_origin_server,json=cacheValidatedWithOriginServer" json:"cache_validated_with_origin_server,omitempty"` + // The number of HTTP response bytes inserted into cache. Set only when a + // cache fill was attempted. + CacheFillBytes int64 `protobuf:"varint,12,opt,name=cache_fill_bytes,json=cacheFillBytes" json:"cache_fill_bytes,omitempty"` + // Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", "websocket" + Protocol string `protobuf:"bytes,15,opt,name=protocol" json:"protocol,omitempty"` +} + +func (m *HttpRequest) Reset() { *m = HttpRequest{} } +func (m *HttpRequest) String() string { return proto.CompactTextString(m) } +func (*HttpRequest) ProtoMessage() {} +func (*HttpRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HttpRequest) GetRequestMethod() string { + if m != nil { + return m.RequestMethod + } + return "" +} + +func (m *HttpRequest) GetRequestUrl() string { + if m != nil { + return m.RequestUrl + } + return "" +} + +func (m *HttpRequest) GetRequestSize() int64 { + if m != nil { + return m.RequestSize + } + return 0 +} + +func (m *HttpRequest) GetStatus() int32 { + if m != nil { + return m.Status + } + return 0 +} + +func (m *HttpRequest) GetResponseSize() int64 { + if m != nil { + return m.ResponseSize + } + return 0 +} + +func (m *HttpRequest) GetUserAgent() string { + if m != nil { + return m.UserAgent + } + return "" +} + +func (m *HttpRequest) GetRemoteIp() string { + if m != nil { + return m.RemoteIp + } + return "" +} + +func (m *HttpRequest) GetServerIp() string { + if m != nil { + return m.ServerIp + } + return "" +} + +func (m *HttpRequest) GetReferer() string { + if m != nil { + return m.Referer + } + return "" +} + +func (m *HttpRequest) GetLatency() *google_protobuf1.Duration { + if m != nil { + return m.Latency + } + return nil +} + +func (m *HttpRequest) GetCacheLookup() bool { + if m != nil { + return m.CacheLookup + } + return false +} + +func (m *HttpRequest) GetCacheHit() bool { + if m != nil { + return m.CacheHit + } + return false +} + +func (m *HttpRequest) GetCacheValidatedWithOriginServer() bool { + if m != nil { + return m.CacheValidatedWithOriginServer + } + return false +} + +func (m *HttpRequest) GetCacheFillBytes() int64 { + if m != nil { + return m.CacheFillBytes + } + return 0 +} + +func (m *HttpRequest) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func init() { + proto.RegisterType((*HttpRequest)(nil), "google.logging.type.HttpRequest") +} + +func init() { proto.RegisterFile("google/logging/type/http_request.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 499 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xcb, 0x6f, 0x13, 0x31, + 0x10, 0xc6, 0xb5, 0x7d, 0x25, 0x71, 0x1e, 0x54, 0x46, 0x02, 0x37, 0x40, 0x09, 0x45, 0xa0, 0x3d, + 0xed, 0x4a, 0xf4, 0x82, 0xc4, 0x89, 0x80, 0xa0, 0x45, 0x45, 0x54, 0x5b, 0x1e, 0x12, 0x97, 0x95, + 0x93, 0x4c, 0xbc, 0x16, 0xce, 0xda, 0xd8, 0xde, 0xa2, 0xf4, 0xca, 0x7f, 0xc3, 0x85, 0x7f, 0x11, + 0xed, 0xd8, 0x2b, 0x81, 0xc4, 0x25, 0xd2, 0x7c, 0xdf, 0xef, 0x9b, 0x71, 0x66, 0x87, 0x3c, 0x15, + 0x5a, 0x0b, 0x05, 0xb9, 0xd2, 0x42, 0xc8, 0x5a, 0xe4, 0x7e, 0x6b, 0x20, 0xaf, 0xbc, 0x37, 0xa5, + 0x85, 0xef, 0x0d, 0x38, 0x9f, 0x19, 0xab, 0xbd, 0xa6, 0xb7, 0x03, 0x97, 0x45, 0x2e, 0x6b, 0xb9, + 0xe9, 0xfd, 0x18, 0xe6, 0x46, 0xe6, 0xbc, 0xae, 0xb5, 0xe7, 0x5e, 0xea, 0xda, 0x85, 0xc8, 0xf4, + 0x38, 0xba, 0x58, 0x2d, 0x9a, 0x75, 0xbe, 0x6a, 0x2c, 0x02, 0xc1, 0x3f, 0xf9, 0xbd, 0x47, 0x86, + 0x67, 0xde, 0x9b, 0x22, 0x0c, 0xa2, 0x4f, 0xc8, 0x24, 0xce, 0x2c, 0x37, 0xe0, 0x2b, 0xbd, 0x62, + 0xc9, 0x2c, 0x49, 0x07, 0xc5, 0x38, 0xaa, 0xef, 0x51, 0xa4, 0x0f, 0xc9, 0xb0, 0xc3, 0x1a, 0xab, + 0xd8, 0x0e, 0x32, 0x24, 0x4a, 0x9f, 0xac, 0xa2, 0x8f, 0xc8, 0xa8, 0x03, 0x9c, 0xbc, 0x01, 0xb6, + 0x3b, 0x4b, 0xd2, 0xdd, 0xa2, 0x0b, 0x5d, 0xc9, 0x1b, 0xa0, 0x77, 0xc8, 0x81, 0xf3, 0xdc, 0x37, + 0x8e, 0xed, 0xcd, 0x92, 0x74, 0xbf, 0x88, 0x15, 0x7d, 0x4c, 0xc6, 0x16, 0x9c, 0xd1, 0xb5, 0x83, + 0x90, 0xdd, 0xc7, 0xec, 0xa8, 0x13, 0x31, 0xfc, 0x80, 0x90, 0xc6, 0x81, 0x2d, 0xb9, 0x80, 0xda, + 0xb3, 0x03, 0x9c, 0x3f, 0x68, 0x95, 0x97, 0xad, 0x40, 0xef, 0x91, 0x81, 0x85, 0x8d, 0xf6, 0x50, + 0x4a, 0xc3, 0x7a, 0xe8, 0xf6, 0x83, 0x70, 0x6e, 0x5a, 0xd3, 0x81, 0xbd, 0x06, 0xdb, 0x9a, 0xe3, + 0x60, 0x06, 0xe1, 0xdc, 0x50, 0x46, 0x7a, 0x16, 0xd6, 0x60, 0xc1, 0xb2, 0x3e, 0x5a, 0x5d, 0x49, + 0x4f, 0x49, 0x4f, 0x71, 0x0f, 0xf5, 0x72, 0xcb, 0x26, 0xb3, 0x24, 0x1d, 0x3e, 0x3b, 0xca, 0xe2, + 0xf7, 0xe8, 0x96, 0x9b, 0xbd, 0x8e, 0xcb, 0x2d, 0x3a, 0xb2, 0xdd, 0xc3, 0x92, 0x2f, 0x2b, 0x28, + 0x95, 0xd6, 0xdf, 0x1a, 0xc3, 0x86, 0xb3, 0x24, 0xed, 0x17, 0x43, 0xd4, 0x2e, 0x50, 0x6a, 0x9f, + 0x13, 0x90, 0x4a, 0x7a, 0x36, 0x40, 0xbf, 0x8f, 0xc2, 0x99, 0xf4, 0xf4, 0x1d, 0x39, 0x09, 0xe6, + 0x35, 0x57, 0x72, 0xc5, 0x3d, 0xac, 0xca, 0x1f, 0xd2, 0x57, 0xa5, 0xb6, 0x52, 0xc8, 0xba, 0x0c, + 0xcf, 0x66, 0x04, 0x53, 0xc7, 0x48, 0x7e, 0xee, 0xc0, 0x2f, 0xd2, 0x57, 0x1f, 0x10, 0xbb, 0x42, + 0x8a, 0xa6, 0xe4, 0x30, 0xf4, 0x5a, 0x4b, 0xa5, 0xca, 0xc5, 0xd6, 0x83, 0x63, 0x23, 0xdc, 0xed, + 0x04, 0xf5, 0x37, 0x52, 0xa9, 0x79, 0xab, 0xd2, 0x29, 0xe9, 0xe3, 0x7f, 0x5a, 0x6a, 0xc5, 0x6e, + 0x85, 0x05, 0x75, 0xf5, 0xfc, 0x67, 0x42, 0xee, 0x2e, 0xf5, 0x26, 0xfb, 0xcf, 0x2d, 0xce, 0x0f, + 0xff, 0x3a, 0xa5, 0xcb, 0x36, 0x70, 0x99, 0x7c, 0x7d, 0x1e, 0x41, 0xa1, 0x15, 0xaf, 0x45, 0xa6, + 0xad, 0xc8, 0x05, 0xd4, 0xd8, 0x2e, 0x0f, 0x16, 0x37, 0xd2, 0xfd, 0x73, 0xfb, 0x2f, 0x54, 0xfb, + 0xfb, 0x6b, 0xe7, 0xe8, 0x6d, 0x88, 0xbe, 0x52, 0xba, 0x59, 0x65, 0x17, 0x71, 0xd2, 0xc7, 0xad, + 0x81, 0xc5, 0x01, 0x36, 0x38, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0x09, 0x49, 0xe6, 0xb8, 0x3b, + 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go new file mode 100644 index 000000000..842697738 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/logging/type/log_severity.proto + +package ltype + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The severity of the event described in a log entry, expressed as one of the +// standard severity levels listed below. For your reference, the levels are +// assigned the listed numeric values. The effect of using numeric values other +// than those listed is undefined. +// +// You can filter for log entries by severity. For example, the following +// filter expression will match log entries with severities `INFO`, `NOTICE`, +// and `WARNING`: +// +// severity > DEBUG AND severity <= WARNING +// +// If you are writing log entries, you should map other severity encodings to +// one of these standard levels. For example, you might map all of Java's FINE, +// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the +// original severity level in the log entry payload if you wish. +type LogSeverity int32 + +const ( + // (0) The log entry has no assigned severity level. + LogSeverity_DEFAULT LogSeverity = 0 + // (100) Debug or trace information. + LogSeverity_DEBUG LogSeverity = 100 + // (200) Routine information, such as ongoing status or performance. + LogSeverity_INFO LogSeverity = 200 + // (300) Normal but significant events, such as start up, shut down, or + // a configuration change. + LogSeverity_NOTICE LogSeverity = 300 + // (400) Warning events might cause problems. + LogSeverity_WARNING LogSeverity = 400 + // (500) Error events are likely to cause problems. + LogSeverity_ERROR LogSeverity = 500 + // (600) Critical events cause more severe problems or outages. + LogSeverity_CRITICAL LogSeverity = 600 + // (700) A person must take an action immediately. + LogSeverity_ALERT LogSeverity = 700 + // (800) One or more systems are unusable. + LogSeverity_EMERGENCY LogSeverity = 800 +) + +var LogSeverity_name = map[int32]string{ + 0: "DEFAULT", + 100: "DEBUG", + 200: "INFO", + 300: "NOTICE", + 400: "WARNING", + 500: "ERROR", + 600: "CRITICAL", + 700: "ALERT", + 800: "EMERGENCY", +} +var LogSeverity_value = map[string]int32{ + "DEFAULT": 0, + "DEBUG": 100, + "INFO": 200, + "NOTICE": 300, + "WARNING": 400, + "ERROR": 500, + "CRITICAL": 600, + "ALERT": 700, + "EMERGENCY": 800, +} + +func (x LogSeverity) String() string { + return proto.EnumName(LogSeverity_name, int32(x)) +} +func (LogSeverity) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func init() { + proto.RegisterEnum("google.logging.type.LogSeverity", LogSeverity_name, LogSeverity_value) +} + +func init() { proto.RegisterFile("google/logging/type/log_severity.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xc9, 0x4f, 0x4f, 0xcf, 0xcc, 0x4b, 0xd7, 0x2f, 0xa9, 0x2c, 0x00, 0x73, + 0xe2, 0x8b, 0x53, 0xcb, 0x52, 0x8b, 0x32, 0x4b, 0x2a, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, + 0x84, 0x21, 0xea, 0xf4, 0xa0, 0xea, 0xf4, 0x40, 0xea, 0xa4, 0x64, 0xa0, 0x9a, 0x13, 0x0b, 0x32, + 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0x5a, 0xb4, 0x9a, + 0x18, 0xb9, 0xb8, 0x7d, 0xf2, 0xd3, 0x83, 0xa1, 0x06, 0x09, 0x71, 0x73, 0xb1, 0xbb, 0xb8, 0xba, + 0x39, 0x86, 0xfa, 0x84, 0x08, 0x30, 0x08, 0x71, 0x72, 0xb1, 0xba, 0xb8, 0x3a, 0x85, 0xba, 0x0b, + 0xa4, 0x08, 0x71, 0x72, 0xb1, 0x78, 0xfa, 0xb9, 0xf9, 0x0b, 0x9c, 0x60, 0x14, 0xe2, 0xe6, 0x62, + 0xf3, 0xf3, 0x0f, 0xf1, 0x74, 0x76, 0x15, 0x58, 0xc3, 0x24, 0xc4, 0xc3, 0xc5, 0x1e, 0xee, 0x18, + 0xe4, 0xe7, 0xe9, 0xe7, 0x2e, 0x30, 0x81, 0x59, 0x88, 0x8b, 0x8b, 0xd5, 0x35, 0x28, 0xc8, 0x3f, + 0x48, 0xe0, 0x0b, 0xb3, 0x10, 0x2f, 0x17, 0x87, 0x73, 0x90, 0x67, 0x88, 0xa7, 0xb3, 0xa3, 0x8f, + 0xc0, 0x0d, 0x16, 0x90, 0x94, 0xa3, 0x8f, 0x6b, 0x50, 0x88, 0xc0, 0x1e, 0x56, 0x21, 0x3e, 0x2e, + 0x4e, 0x57, 0x5f, 0xd7, 0x20, 0x77, 0x57, 0x3f, 0xe7, 0x48, 0x81, 0x05, 0x6c, 0x4e, 0xcd, 0x8c, + 0x5c, 0xe2, 0xc9, 0xf9, 0xb9, 0x7a, 0x58, 0x9c, 0xef, 0x24, 0x80, 0xe4, 0xba, 0x00, 0x90, 0x93, + 0x03, 0x18, 0xa3, 0x2c, 0xa0, 0x0a, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, + 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x1e, 0xd2, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0xa3, 0x04, 0x97, + 0x75, 0x0e, 0x88, 0x5c, 0xc5, 0x24, 0xe9, 0x0e, 0xd1, 0xea, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, + 0x03, 0xb5, 0x29, 0xa4, 0xb2, 0x20, 0x35, 0x89, 0x0d, 0x6c, 0x80, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x1b, 0x91, 0x99, 0x37, 0x6e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go new file mode 100644 index 000000000..73a0e1fa9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go @@ -0,0 +1,511 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/logging/v2/log_entry.proto + +/* +Package logging is a generated protocol buffer package. + +It is generated from these files: + google/logging/v2/log_entry.proto + google/logging/v2/logging.proto + google/logging/v2/logging_config.proto + google/logging/v2/logging_metrics.proto + +It has these top-level messages: + LogEntry + LogEntryOperation + LogEntrySourceLocation + DeleteLogRequest + WriteLogEntriesRequest + WriteLogEntriesResponse + WriteLogEntriesPartialErrors + ListLogEntriesRequest + ListLogEntriesResponse + ListMonitoredResourceDescriptorsRequest + ListMonitoredResourceDescriptorsResponse + ListLogsRequest + ListLogsResponse + LogSink + ListSinksRequest + ListSinksResponse + GetSinkRequest + CreateSinkRequest + UpdateSinkRequest + DeleteSinkRequest + LogExclusion + ListExclusionsRequest + ListExclusionsResponse + GetExclusionRequest + CreateExclusionRequest + UpdateExclusionRequest + DeleteExclusionRequest + LogMetric + ListLogMetricsRequest + ListLogMetricsResponse + GetLogMetricRequest + CreateLogMetricRequest + UpdateLogMetricRequest + DeleteLogMetricRequest +*/ +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api3 "google.golang.org/genproto/googleapis/api/monitoredres" +import google_logging_type "google.golang.org/genproto/googleapis/logging/type" +import google_logging_type1 "google.golang.org/genproto/googleapis/logging/type" +import google_protobuf2 "github.com/golang/protobuf/ptypes/any" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An individual entry in a log. +type LogEntry struct { + // Required. The resource name of the log to which this log entry belongs: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // `[LOG_ID]` must be URL-encoded within `log_name`. Example: + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // `[LOG_ID]` must be less than 512 characters long and can only include the + // following characters: upper and lower case alphanumeric characters, + // forward-slash, underscore, hyphen, and period. + // + // For backward compatibility, if `log_name` begins with a forward-slash, such + // as `/projects/...`, then the log entry is ingested as usual but the + // forward-slash is removed. Listing the log entry will not show the leading + // slash and filtering for a log name with a leading slash will never return + // any results. + LogName string `protobuf:"bytes,12,opt,name=log_name,json=logName" json:"log_name,omitempty"` + // Required. The monitored resource associated with this log entry. + // Example: a log entry that reports a database error would be + // associated with the monitored resource designating the particular + // database that reported the error. + Resource *google_api3.MonitoredResource `protobuf:"bytes,8,opt,name=resource" json:"resource,omitempty"` + // Optional. The log entry payload, which can be one of multiple types. + // + // Types that are valid to be assigned to Payload: + // *LogEntry_ProtoPayload + // *LogEntry_TextPayload + // *LogEntry_JsonPayload + Payload isLogEntry_Payload `protobuf_oneof:"payload"` + // Optional. The time the event described by the log entry occurred. + // This time is used to compute the log entry's age and to enforce + // the logs retention period. If this field is omitted in a new log + // entry, then Stackdriver Logging assigns it the current time. + // + // Incoming log entries should have timestamps that are no more than + // the [logs retention period](/logging/quota-policy) in the past, + // and no more than 24 hours in the future. + // See the `entries.write` API method for more information. + Timestamp *google_protobuf4.Timestamp `protobuf:"bytes,9,opt,name=timestamp" json:"timestamp,omitempty"` + // Output only. The time the log entry was received by Stackdriver Logging. + ReceiveTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,24,opt,name=receive_timestamp,json=receiveTimestamp" json:"receive_timestamp,omitempty"` + // Optional. The severity of the log entry. The default value is + // `LogSeverity.DEFAULT`. + Severity google_logging_type1.LogSeverity `protobuf:"varint,10,opt,name=severity,enum=google.logging.type.LogSeverity" json:"severity,omitempty"` + // Optional. A unique identifier for the log entry. If you provide a value, + // then Stackdriver Logging considers other log entries in the same project, + // with the same `timestamp`, and with the same `insert_id` to be duplicates + // which can be removed. If omitted in new log entries, then Stackdriver + // Logging assigns its own unique identifier. The `insert_id` is also used + // to order log entries that have the same `timestamp` value. + InsertId string `protobuf:"bytes,4,opt,name=insert_id,json=insertId" json:"insert_id,omitempty"` + // Optional. Information about the HTTP request associated with this + // log entry, if applicable. + HttpRequest *google_logging_type.HttpRequest `protobuf:"bytes,7,opt,name=http_request,json=httpRequest" json:"http_request,omitempty"` + // Optional. A set of user-defined (key, value) data that provides additional + // information about the log entry. + Labels map[string]string `protobuf:"bytes,11,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. Information about an operation associated with the log entry, if + // applicable. + Operation *LogEntryOperation `protobuf:"bytes,15,opt,name=operation" json:"operation,omitempty"` + // Optional. Resource name of the trace associated with the log entry, if any. + // If it contains a relative resource name, the name is assumed to be relative + // to `//tracing.googleapis.com`. Example: + // `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` + Trace string `protobuf:"bytes,22,opt,name=trace" json:"trace,omitempty"` + // Optional. Source code location information associated with the log entry, + // if any. + SourceLocation *LogEntrySourceLocation `protobuf:"bytes,23,opt,name=source_location,json=sourceLocation" json:"source_location,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isLogEntry_Payload interface { + isLogEntry_Payload() +} + +type LogEntry_ProtoPayload struct { + ProtoPayload *google_protobuf2.Any `protobuf:"bytes,2,opt,name=proto_payload,json=protoPayload,oneof"` +} +type LogEntry_TextPayload struct { + TextPayload string `protobuf:"bytes,3,opt,name=text_payload,json=textPayload,oneof"` +} +type LogEntry_JsonPayload struct { + JsonPayload *google_protobuf3.Struct `protobuf:"bytes,6,opt,name=json_payload,json=jsonPayload,oneof"` +} + +func (*LogEntry_ProtoPayload) isLogEntry_Payload() {} +func (*LogEntry_TextPayload) isLogEntry_Payload() {} +func (*LogEntry_JsonPayload) isLogEntry_Payload() {} + +func (m *LogEntry) GetPayload() isLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *LogEntry) GetLogName() string { + if m != nil { + return m.LogName + } + return "" +} + +func (m *LogEntry) GetResource() *google_api3.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *LogEntry) GetProtoPayload() *google_protobuf2.Any { + if x, ok := m.GetPayload().(*LogEntry_ProtoPayload); ok { + return x.ProtoPayload + } + return nil +} + +func (m *LogEntry) GetTextPayload() string { + if x, ok := m.GetPayload().(*LogEntry_TextPayload); ok { + return x.TextPayload + } + return "" +} + +func (m *LogEntry) GetJsonPayload() *google_protobuf3.Struct { + if x, ok := m.GetPayload().(*LogEntry_JsonPayload); ok { + return x.JsonPayload + } + return nil +} + +func (m *LogEntry) GetTimestamp() *google_protobuf4.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *LogEntry) GetReceiveTimestamp() *google_protobuf4.Timestamp { + if m != nil { + return m.ReceiveTimestamp + } + return nil +} + +func (m *LogEntry) GetSeverity() google_logging_type1.LogSeverity { + if m != nil { + return m.Severity + } + return google_logging_type1.LogSeverity_DEFAULT +} + +func (m *LogEntry) GetInsertId() string { + if m != nil { + return m.InsertId + } + return "" +} + +func (m *LogEntry) GetHttpRequest() *google_logging_type.HttpRequest { + if m != nil { + return m.HttpRequest + } + return nil +} + +func (m *LogEntry) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *LogEntry) GetOperation() *LogEntryOperation { + if m != nil { + return m.Operation + } + return nil +} + +func (m *LogEntry) GetTrace() string { + if m != nil { + return m.Trace + } + return "" +} + +func (m *LogEntry) GetSourceLocation() *LogEntrySourceLocation { + if m != nil { + return m.SourceLocation + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LogEntry_OneofMarshaler, _LogEntry_OneofUnmarshaler, _LogEntry_OneofSizer, []interface{}{ + (*LogEntry_ProtoPayload)(nil), + (*LogEntry_TextPayload)(nil), + (*LogEntry_JsonPayload)(nil), + } +} + +func _LogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LogEntry) + // payload + switch x := m.Payload.(type) { + case *LogEntry_ProtoPayload: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ProtoPayload); err != nil { + return err + } + case *LogEntry_TextPayload: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.TextPayload) + case *LogEntry_JsonPayload: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonPayload); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _LogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LogEntry) + switch tag { + case 2: // payload.proto_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Any) + err := b.DecodeMessage(msg) + m.Payload = &LogEntry_ProtoPayload{msg} + return true, err + case 3: // payload.text_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Payload = &LogEntry_TextPayload{x} + return true, err + case 6: // payload.json_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Struct) + err := b.DecodeMessage(msg) + m.Payload = &LogEntry_JsonPayload{msg} + return true, err + default: + return false, nil + } +} + +func _LogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LogEntry) + // payload + switch x := m.Payload.(type) { + case *LogEntry_ProtoPayload: + s := proto.Size(x.ProtoPayload) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LogEntry_TextPayload: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TextPayload))) + n += len(x.TextPayload) + case *LogEntry_JsonPayload: + s := proto.Size(x.JsonPayload) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Additional information about a potentially long-running operation with which +// a log entry is associated. +type LogEntryOperation struct { + // Optional. An arbitrary operation identifier. Log entries with the + // same identifier are assumed to be part of the same operation. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // Optional. An arbitrary producer identifier. The combination of + // `id` and `producer` must be globally unique. Examples for `producer`: + // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. + Producer string `protobuf:"bytes,2,opt,name=producer" json:"producer,omitempty"` + // Optional. Set this to True if this is the first log entry in the operation. + First bool `protobuf:"varint,3,opt,name=first" json:"first,omitempty"` + // Optional. Set this to True if this is the last log entry in the operation. + Last bool `protobuf:"varint,4,opt,name=last" json:"last,omitempty"` +} + +func (m *LogEntryOperation) Reset() { *m = LogEntryOperation{} } +func (m *LogEntryOperation) String() string { return proto.CompactTextString(m) } +func (*LogEntryOperation) ProtoMessage() {} +func (*LogEntryOperation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *LogEntryOperation) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *LogEntryOperation) GetProducer() string { + if m != nil { + return m.Producer + } + return "" +} + +func (m *LogEntryOperation) GetFirst() bool { + if m != nil { + return m.First + } + return false +} + +func (m *LogEntryOperation) GetLast() bool { + if m != nil { + return m.Last + } + return false +} + +// Additional information about the source code location that produced the log +// entry. +type LogEntrySourceLocation struct { + // Optional. Source file name. Depending on the runtime environment, this + // might be a simple name or a fully-qualified name. + File string `protobuf:"bytes,1,opt,name=file" json:"file,omitempty"` + // Optional. Line within the source file. 1-based; 0 indicates no line number + // available. + Line int64 `protobuf:"varint,2,opt,name=line" json:"line,omitempty"` + // Optional. Human-readable name of the function or method being invoked, with + // optional context such as the class or package name. This information may be + // used in contexts such as the logs viewer, where a file and line number are + // less meaningful. The format can vary by language. For example: + // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + // (Python). + Function string `protobuf:"bytes,3,opt,name=function" json:"function,omitempty"` +} + +func (m *LogEntrySourceLocation) Reset() { *m = LogEntrySourceLocation{} } +func (m *LogEntrySourceLocation) String() string { return proto.CompactTextString(m) } +func (*LogEntrySourceLocation) ProtoMessage() {} +func (*LogEntrySourceLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *LogEntrySourceLocation) GetFile() string { + if m != nil { + return m.File + } + return "" +} + +func (m *LogEntrySourceLocation) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *LogEntrySourceLocation) GetFunction() string { + if m != nil { + return m.Function + } + return "" +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "google.logging.v2.LogEntry") + proto.RegisterType((*LogEntryOperation)(nil), "google.logging.v2.LogEntryOperation") + proto.RegisterType((*LogEntrySourceLocation)(nil), "google.logging.v2.LogEntrySourceLocation") +} + +func init() { proto.RegisterFile("google/logging/v2/log_entry.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 699 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x5f, 0x6f, 0xdb, 0x36, + 0x10, 0x8f, 0xec, 0xcc, 0x91, 0x69, 0xe7, 0x1f, 0x91, 0x25, 0x8a, 0x97, 0x61, 0x5e, 0x32, 0x6c, + 0xde, 0x8b, 0x0c, 0x78, 0x2f, 0xc9, 0x12, 0xa0, 0xa8, 0x83, 0x22, 0x29, 0xe0, 0xb6, 0x01, 0x53, + 0xf4, 0xa1, 0x28, 0x60, 0x30, 0x12, 0xad, 0xb0, 0x95, 0x49, 0x95, 0xa2, 0x8c, 0xfa, 0xa9, 0xdf, + 0xa7, 0x9f, 0xa8, 0x1f, 0xa5, 0x8f, 0x05, 0x8f, 0x94, 0xed, 0xda, 0x41, 0xfa, 0x76, 0x27, 0xfe, + 0xfe, 0xdc, 0x1d, 0x8f, 0x42, 0x7f, 0x26, 0x52, 0x26, 0x29, 0xeb, 0xa6, 0x32, 0x49, 0xb8, 0x48, + 0xba, 0x93, 0x9e, 0x09, 0x87, 0x4c, 0x68, 0x35, 0x0d, 0x33, 0x25, 0xb5, 0xc4, 0xbb, 0x16, 0x12, + 0x3a, 0x48, 0x38, 0xe9, 0xb5, 0x8e, 0x1c, 0x8b, 0x66, 0xbc, 0x4b, 0x85, 0x90, 0x9a, 0x6a, 0x2e, + 0x45, 0x6e, 0x09, 0xad, 0x93, 0x85, 0xd3, 0xb1, 0x14, 0x5c, 0x4b, 0xc5, 0xe2, 0xa1, 0x62, 0xb9, + 0x2c, 0x54, 0xc4, 0x1c, 0xe8, 0xef, 0x25, 0x63, 0x3d, 0xcd, 0x58, 0xf7, 0x5e, 0xeb, 0x6c, 0xa8, + 0xd8, 0xc7, 0x82, 0xe5, 0xfa, 0x31, 0x9c, 0x29, 0x31, 0x67, 0x13, 0xa6, 0xb8, 0x76, 0x55, 0xb6, + 0x0e, 0x1d, 0x0e, 0xb2, 0xbb, 0x62, 0xd4, 0xa5, 0xa2, 0x3c, 0x3a, 0x5a, 0x3e, 0xca, 0xb5, 0x2a, + 0xa2, 0xd2, 0xe0, 0x8f, 0xe5, 0x53, 0xcd, 0xc7, 0x2c, 0xd7, 0x74, 0x9c, 0x59, 0xc0, 0xf1, 0xd7, + 0x1a, 0xf2, 0x07, 0x32, 0x79, 0x66, 0x46, 0x82, 0x0f, 0x91, 0x6f, 0xcc, 0x05, 0x1d, 0xb3, 0xa0, + 0xd9, 0xf6, 0x3a, 0x75, 0xb2, 0x91, 0xca, 0xe4, 0x25, 0x1d, 0x33, 0x7c, 0x86, 0xfc, 0xb2, 0xc7, + 0xc0, 0x6f, 0x7b, 0x9d, 0x46, 0xef, 0xf7, 0xd0, 0x8d, 0x8e, 0x66, 0x3c, 0x7c, 0x51, 0x4e, 0x82, + 0x38, 0x10, 0x99, 0xc1, 0xf1, 0x39, 0xda, 0x04, 0xaf, 0x61, 0x46, 0xa7, 0xa9, 0xa4, 0x71, 0x50, + 0x01, 0xfe, 0x5e, 0xc9, 0x2f, 0x6b, 0x0b, 0x9f, 0x8a, 0xe9, 0xf5, 0x1a, 0x69, 0x42, 0x7e, 0x63, + 0xb1, 0xf8, 0x04, 0x35, 0x35, 0xfb, 0xa4, 0x67, 0xdc, 0xaa, 0x29, 0xeb, 0x7a, 0x8d, 0x34, 0xcc, + 0xd7, 0x12, 0x74, 0x81, 0x9a, 0xef, 0x73, 0x29, 0x66, 0xa0, 0x1a, 0x18, 0x1c, 0xac, 0x18, 0xdc, + 0xc2, 0x68, 0x0c, 0xdb, 0xc0, 0x4b, 0xf6, 0x29, 0xaa, 0xcf, 0xa6, 0x12, 0xd4, 0x81, 0xda, 0x5a, + 0xa1, 0xbe, 0x2e, 0x11, 0x64, 0x0e, 0xc6, 0x57, 0x68, 0x57, 0xb1, 0x88, 0xf1, 0x09, 0x1b, 0xce, + 0x15, 0x82, 0x9f, 0x2a, 0xec, 0x38, 0xd2, 0xec, 0x0b, 0xbe, 0x40, 0x7e, 0x79, 0xe3, 0x01, 0x6a, + 0x7b, 0x9d, 0xad, 0x5e, 0x3b, 0x5c, 0x5a, 0x4c, 0xb3, 0x1a, 0xe1, 0x40, 0x26, 0xb7, 0x0e, 0x47, + 0x66, 0x0c, 0xfc, 0x1b, 0xaa, 0x73, 0x91, 0x33, 0xa5, 0x87, 0x3c, 0x0e, 0xd6, 0xe1, 0xde, 0x7c, + 0xfb, 0xe1, 0x79, 0x8c, 0x2f, 0x51, 0x73, 0x71, 0xf1, 0x82, 0x0d, 0x28, 0xef, 0x61, 0xf9, 0x6b, + 0xad, 0x33, 0x62, 0x71, 0xa4, 0x71, 0x3f, 0x4f, 0xf0, 0x13, 0x54, 0x4b, 0xe9, 0x1d, 0x4b, 0xf3, + 0xa0, 0xd1, 0xae, 0x76, 0x1a, 0xbd, 0x7f, 0xc2, 0x95, 0x67, 0x13, 0x96, 0x5b, 0x14, 0x0e, 0x00, + 0x09, 0x31, 0x71, 0x34, 0xdc, 0x47, 0x75, 0x99, 0x31, 0x05, 0x2f, 0x29, 0xd8, 0x86, 0x12, 0xfe, + 0x7a, 0x44, 0xe3, 0x55, 0x89, 0x25, 0x73, 0x1a, 0xde, 0x43, 0xbf, 0x68, 0x45, 0x23, 0x16, 0xec, + 0x43, 0x8b, 0x36, 0xc1, 0x04, 0x6d, 0xdb, 0x3d, 0x1b, 0xa6, 0x32, 0xb2, 0xfa, 0x07, 0xa0, 0xff, + 0xef, 0x23, 0xfa, 0xb7, 0xc0, 0x18, 0x38, 0x02, 0xd9, 0xca, 0x7f, 0xc8, 0x5b, 0x67, 0xa8, 0xb1, + 0xd0, 0x04, 0xde, 0x41, 0xd5, 0x0f, 0x6c, 0x1a, 0x78, 0x60, 0x6b, 0x42, 0x53, 0xca, 0x84, 0xa6, + 0x05, 0x83, 0x55, 0xae, 0x13, 0x9b, 0xfc, 0x5f, 0x39, 0xf5, 0xfa, 0x75, 0xb4, 0xe1, 0xb6, 0xf0, + 0x98, 0xa3, 0xdd, 0x95, 0x7e, 0xf0, 0x16, 0xaa, 0xf0, 0xd8, 0x49, 0x55, 0x78, 0x8c, 0x5b, 0xc8, + 0xcf, 0x94, 0x8c, 0x8b, 0x88, 0x29, 0x27, 0x36, 0xcb, 0x8d, 0xcb, 0x88, 0xab, 0x5c, 0xc3, 0xd2, + 0xfb, 0xc4, 0x26, 0x18, 0xa3, 0xf5, 0x94, 0xe6, 0x1a, 0x2e, 0xda, 0x27, 0x10, 0x1f, 0xbf, 0x43, + 0xfb, 0x0f, 0xb7, 0x66, 0xd0, 0x23, 0x9e, 0x32, 0xe7, 0x08, 0x31, 0x28, 0x70, 0x61, 0x8b, 0xaf, + 0x12, 0x88, 0x4d, 0x1d, 0xa3, 0x42, 0x44, 0x30, 0xbf, 0xaa, 0xad, 0xa3, 0xcc, 0xfb, 0x9f, 0xd1, + 0xaf, 0x91, 0x1c, 0xaf, 0x8e, 0xb3, 0xbf, 0x59, 0x9a, 0xde, 0xc0, 0x93, 0xf5, 0xde, 0x9e, 0x3a, + 0x4c, 0x22, 0x53, 0x2a, 0x92, 0x50, 0xaa, 0xa4, 0x9b, 0x30, 0x01, 0x4f, 0xa0, 0x6b, 0x8f, 0x68, + 0xc6, 0xf3, 0x85, 0xff, 0xf1, 0xb9, 0x0b, 0xbf, 0x79, 0xde, 0x97, 0xca, 0xc1, 0x95, 0x65, 0x5f, + 0xa6, 0xb2, 0x88, 0xcd, 0x5d, 0x81, 0xcf, 0x9b, 0xde, 0x5d, 0x0d, 0x14, 0xfe, 0xfb, 0x1e, 0x00, + 0x00, 0xff, 0xff, 0xa9, 0x61, 0x44, 0xa8, 0xd0, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go new file mode 100644 index 000000000..c8d5f026f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go @@ -0,0 +1,781 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/logging/v2/logging.proto + +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api3 "google.golang.org/genproto/googleapis/api/monitoredres" +import _ "github.com/golang/protobuf/ptypes/duration" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The parameters to DeleteLog. +type DeleteLogRequest struct { + // Required. The resource name of the log to delete: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // `[LOG_ID]` must be URL-encoded. For example, + // `"projects/my-project-id/logs/syslog"`, + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // For more information about log names, see + // [LogEntry][google.logging.v2.LogEntry]. + LogName string `protobuf:"bytes,1,opt,name=log_name,json=logName" json:"log_name,omitempty"` +} + +func (m *DeleteLogRequest) Reset() { *m = DeleteLogRequest{} } +func (m *DeleteLogRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteLogRequest) ProtoMessage() {} +func (*DeleteLogRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *DeleteLogRequest) GetLogName() string { + if m != nil { + return m.LogName + } + return "" +} + +// The parameters to WriteLogEntries. +type WriteLogEntriesRequest struct { + // Optional. A default log resource name that is assigned to all log entries + // in `entries` that do not specify a value for `log_name`: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // `[LOG_ID]` must be URL-encoded. For example, + // `"projects/my-project-id/logs/syslog"` or + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // For more information about log names, see + // [LogEntry][google.logging.v2.LogEntry]. + LogName string `protobuf:"bytes,1,opt,name=log_name,json=logName" json:"log_name,omitempty"` + // Optional. A default monitored resource object that is assigned to all log + // entries in `entries` that do not specify a value for `resource`. Example: + // + // { "type": "gce_instance", + // "labels": { + // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + // + // See [LogEntry][google.logging.v2.LogEntry]. + Resource *google_api3.MonitoredResource `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` + // Optional. Default labels that are added to the `labels` field of all log + // entries in `entries`. If a log entry already has a label with the same key + // as a label in this parameter, then the log entry's label is not changed. + // See [LogEntry][google.logging.v2.LogEntry]. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Required. The log entries to send to Stackdriver Logging. The order of log + // entries in this list does not matter. Values supplied in this method's + // `log_name`, `resource`, and `labels` fields are copied into those log + // entries in this list that do not include values for their corresponding + // fields. For more information, see the [LogEntry][google.logging.v2.LogEntry] type. + // + // If the `timestamp` or `insert_id` fields are missing in log entries, then + // this method supplies the current time or a unique identifier, respectively. + // The supplied values are chosen so that, among the log entries that did not + // supply their own values, the entries earlier in the list will sort before + // the entries later in the list. See the `entries.list` method. + // + // Log entries with timestamps that are more than the + // [logs retention period](/logging/quota-policy) in the past or more than + // 24 hours in the future might be discarded. Discarding does not return + // an error. + // + // To improve throughput and to avoid exceeding the + // [quota limit](/logging/quota-policy) for calls to `entries.write`, + // you should try to include several log entries in this list, + // rather than calling this method for each individual log entry. + Entries []*LogEntry `protobuf:"bytes,4,rep,name=entries" json:"entries,omitempty"` + // Optional. Whether valid entries should be written even if some other + // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any + // entry is not written, then the response status is the error associated + // with one of the failed entries and the response includes error details + // keyed by the entries' zero-based index in the `entries.write` method. + PartialSuccess bool `protobuf:"varint,5,opt,name=partial_success,json=partialSuccess" json:"partial_success,omitempty"` +} + +func (m *WriteLogEntriesRequest) Reset() { *m = WriteLogEntriesRequest{} } +func (m *WriteLogEntriesRequest) String() string { return proto.CompactTextString(m) } +func (*WriteLogEntriesRequest) ProtoMessage() {} +func (*WriteLogEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *WriteLogEntriesRequest) GetLogName() string { + if m != nil { + return m.LogName + } + return "" +} + +func (m *WriteLogEntriesRequest) GetResource() *google_api3.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *WriteLogEntriesRequest) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *WriteLogEntriesRequest) GetEntries() []*LogEntry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *WriteLogEntriesRequest) GetPartialSuccess() bool { + if m != nil { + return m.PartialSuccess + } + return false +} + +// Result returned from WriteLogEntries. +// empty +type WriteLogEntriesResponse struct { +} + +func (m *WriteLogEntriesResponse) Reset() { *m = WriteLogEntriesResponse{} } +func (m *WriteLogEntriesResponse) String() string { return proto.CompactTextString(m) } +func (*WriteLogEntriesResponse) ProtoMessage() {} +func (*WriteLogEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +// Error details for WriteLogEntries with partial success. +type WriteLogEntriesPartialErrors struct { + // When `WriteLogEntriesRequest.partial_success` is true, records the error + // status for entries that were not written due to a permanent error, keyed + // by the entry's zero-based index in `WriteLogEntriesRequest.entries`. + // + // Failed requests for which no entries are written will not include + // per-entry errors. + LogEntryErrors map[int32]*google_rpc.Status `protobuf:"bytes,1,rep,name=log_entry_errors,json=logEntryErrors" json:"log_entry_errors,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *WriteLogEntriesPartialErrors) Reset() { *m = WriteLogEntriesPartialErrors{} } +func (m *WriteLogEntriesPartialErrors) String() string { return proto.CompactTextString(m) } +func (*WriteLogEntriesPartialErrors) ProtoMessage() {} +func (*WriteLogEntriesPartialErrors) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *WriteLogEntriesPartialErrors) GetLogEntryErrors() map[int32]*google_rpc.Status { + if m != nil { + return m.LogEntryErrors + } + return nil +} + +// The parameters to `ListLogEntries`. +type ListLogEntriesRequest struct { + // Deprecated. Use `resource_names` instead. One or more project identifiers + // or project numbers from which to retrieve log entries. Example: + // `"my-project-1A"`. If present, these project identifiers are converted to + // resource name format and added to the list of resources in + // `resource_names`. + ProjectIds []string `protobuf:"bytes,1,rep,name=project_ids,json=projectIds" json:"project_ids,omitempty"` + // Required. Names of one or more parent resources from which to + // retrieve log entries: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // + // Projects listed in the `project_ids` field are added to this list. + ResourceNames []string `protobuf:"bytes,8,rep,name=resource_names,json=resourceNames" json:"resource_names,omitempty"` + // Optional. A filter that chooses which log entries to return. See [Advanced + // Logs Filters](/logging/docs/view/advanced_filters). Only log entries that + // match the filter are returned. An empty filter matches all log entries in + // the resources listed in `resource_names`. Referencing a parent resource + // that is not listed in `resource_names` will cause the filter to return no + // results. + // The maximum length of the filter is 20000 characters. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // Optional. How the results should be sorted. Presently, the only permitted + // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first + // option returns entries in order of increasing values of + // `LogEntry.timestamp` (oldest first), and the second option returns entries + // in order of decreasing timestamps (newest first). Entries with equal + // timestamps are returned in order of their `insert_id` values. + OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `next_page_token` in the + // response indicates that more results might be available. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `page_token` must be the value of + // `next_page_token` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListLogEntriesRequest) Reset() { *m = ListLogEntriesRequest{} } +func (m *ListLogEntriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListLogEntriesRequest) ProtoMessage() {} +func (*ListLogEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *ListLogEntriesRequest) GetProjectIds() []string { + if m != nil { + return m.ProjectIds + } + return nil +} + +func (m *ListLogEntriesRequest) GetResourceNames() []string { + if m != nil { + return m.ResourceNames + } + return nil +} + +func (m *ListLogEntriesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListLogEntriesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListLogEntriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListLogEntriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Result returned from `ListLogEntries`. +type ListLogEntriesResponse struct { + // A list of log entries. If `entries` is empty, `nextPageToken` may still be + // returned, indicating that more entries may exist. See `nextPageToken` for + // more information. + Entries []*LogEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + // + // If a value for `next_page_token` appears and the `entries` field is empty, + // it means that the search found no log entries so far but it did not have + // time to search all the possible log entries. Retry the method with this + // value for `page_token` to continue the search. Alternatively, consider + // speeding up the search by changing your filter to specify a single log name + // or resource type, or to narrow the time range of the search. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListLogEntriesResponse) Reset() { *m = ListLogEntriesResponse{} } +func (m *ListLogEntriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListLogEntriesResponse) ProtoMessage() {} +func (*ListLogEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *ListLogEntriesResponse) GetEntries() []*LogEntry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListLogEntriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The parameters to ListMonitoredResourceDescriptors +type ListMonitoredResourceDescriptorsRequest struct { + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListMonitoredResourceDescriptorsRequest) Reset() { + *m = ListMonitoredResourceDescriptorsRequest{} +} +func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{6} +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Result returned from ListMonitoredResourceDescriptors. +type ListMonitoredResourceDescriptorsResponse struct { + // A list of resource descriptors. + ResourceDescriptors []*google_api3.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors" json:"resource_descriptors,omitempty"` + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListMonitoredResourceDescriptorsResponse) Reset() { + *m = ListMonitoredResourceDescriptorsResponse{} +} +func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{7} +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*google_api3.MonitoredResourceDescriptor { + if m != nil { + return m.ResourceDescriptors + } + return nil +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The parameters to ListLogs. +type ListLogsRequest struct { + // Required. The resource name that owns the logs: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListLogsRequest) Reset() { *m = ListLogsRequest{} } +func (m *ListLogsRequest) String() string { return proto.CompactTextString(m) } +func (*ListLogsRequest) ProtoMessage() {} +func (*ListLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *ListLogsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListLogsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListLogsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Result returned from ListLogs. +type ListLogsResponse struct { + // A list of log names. For example, + // `"projects/my-project/syslog"` or + // `"organizations/123/cloudresourcemanager.googleapis.com%2Factivity"`. + LogNames []string `protobuf:"bytes,3,rep,name=log_names,json=logNames" json:"log_names,omitempty"` + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListLogsResponse) Reset() { *m = ListLogsResponse{} } +func (m *ListLogsResponse) String() string { return proto.CompactTextString(m) } +func (*ListLogsResponse) ProtoMessage() {} +func (*ListLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ListLogsResponse) GetLogNames() []string { + if m != nil { + return m.LogNames + } + return nil +} + +func (m *ListLogsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*DeleteLogRequest)(nil), "google.logging.v2.DeleteLogRequest") + proto.RegisterType((*WriteLogEntriesRequest)(nil), "google.logging.v2.WriteLogEntriesRequest") + proto.RegisterType((*WriteLogEntriesResponse)(nil), "google.logging.v2.WriteLogEntriesResponse") + proto.RegisterType((*WriteLogEntriesPartialErrors)(nil), "google.logging.v2.WriteLogEntriesPartialErrors") + proto.RegisterType((*ListLogEntriesRequest)(nil), "google.logging.v2.ListLogEntriesRequest") + proto.RegisterType((*ListLogEntriesResponse)(nil), "google.logging.v2.ListLogEntriesResponse") + proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.logging.v2.ListMonitoredResourceDescriptorsRequest") + proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.logging.v2.ListMonitoredResourceDescriptorsResponse") + proto.RegisterType((*ListLogsRequest)(nil), "google.logging.v2.ListLogsRequest") + proto.RegisterType((*ListLogsResponse)(nil), "google.logging.v2.ListLogsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LoggingServiceV2 service + +type LoggingServiceV2Client interface { + // Deletes all the log entries in a log. + // The log reappears if it receives new entries. + // Log entries written shortly before the delete operation might not be + // deleted. + DeleteLog(ctx context.Context, in *DeleteLogRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) + // ## Log entry resources + // + // Writes log entries to Stackdriver Logging. This API method is the + // only way to send log entries to Stackdriver Logging. This method + // is used, directly or indirectly, by the Stackdriver Logging agent + // (fluentd) and all logging libraries configured to use Stackdriver + // Logging. + WriteLogEntries(ctx context.Context, in *WriteLogEntriesRequest, opts ...grpc.CallOption) (*WriteLogEntriesResponse, error) + // Lists log entries. Use this method to retrieve log entries from + // Stackdriver Logging. For ways to export log entries, see + // [Exporting Logs](/logging/docs/export). + ListLogEntries(ctx context.Context, in *ListLogEntriesRequest, opts ...grpc.CallOption) (*ListLogEntriesResponse, error) + // Lists the descriptors for monitored resource types used by Stackdriver + // Logging. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Lists the logs in projects, organizations, folders, or billing accounts. + // Only logs that have entries are listed. + ListLogs(ctx context.Context, in *ListLogsRequest, opts ...grpc.CallOption) (*ListLogsResponse, error) +} + +type loggingServiceV2Client struct { + cc *grpc.ClientConn +} + +func NewLoggingServiceV2Client(cc *grpc.ClientConn) LoggingServiceV2Client { + return &loggingServiceV2Client{cc} +} + +func (c *loggingServiceV2Client) DeleteLog(ctx context.Context, in *DeleteLogRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/DeleteLog", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loggingServiceV2Client) WriteLogEntries(ctx context.Context, in *WriteLogEntriesRequest, opts ...grpc.CallOption) (*WriteLogEntriesResponse, error) { + out := new(WriteLogEntriesResponse) + err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/WriteLogEntries", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loggingServiceV2Client) ListLogEntries(ctx context.Context, in *ListLogEntriesRequest, opts ...grpc.CallOption) (*ListLogEntriesResponse, error) { + out := new(ListLogEntriesResponse) + err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/ListLogEntries", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loggingServiceV2Client) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loggingServiceV2Client) ListLogs(ctx context.Context, in *ListLogsRequest, opts ...grpc.CallOption) (*ListLogsResponse, error) { + out := new(ListLogsResponse) + err := grpc.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/ListLogs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LoggingServiceV2 service + +type LoggingServiceV2Server interface { + // Deletes all the log entries in a log. + // The log reappears if it receives new entries. + // Log entries written shortly before the delete operation might not be + // deleted. + DeleteLog(context.Context, *DeleteLogRequest) (*google_protobuf5.Empty, error) + // ## Log entry resources + // + // Writes log entries to Stackdriver Logging. This API method is the + // only way to send log entries to Stackdriver Logging. This method + // is used, directly or indirectly, by the Stackdriver Logging agent + // (fluentd) and all logging libraries configured to use Stackdriver + // Logging. + WriteLogEntries(context.Context, *WriteLogEntriesRequest) (*WriteLogEntriesResponse, error) + // Lists log entries. Use this method to retrieve log entries from + // Stackdriver Logging. For ways to export log entries, see + // [Exporting Logs](/logging/docs/export). + ListLogEntries(context.Context, *ListLogEntriesRequest) (*ListLogEntriesResponse, error) + // Lists the descriptors for monitored resource types used by Stackdriver + // Logging. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Lists the logs in projects, organizations, folders, or billing accounts. + // Only logs that have entries are listed. + ListLogs(context.Context, *ListLogsRequest) (*ListLogsResponse, error) +} + +func RegisterLoggingServiceV2Server(s *grpc.Server, srv LoggingServiceV2Server) { + s.RegisterService(&_LoggingServiceV2_serviceDesc, srv) +} + +func _LoggingServiceV2_DeleteLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoggingServiceV2Server).DeleteLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.LoggingServiceV2/DeleteLog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoggingServiceV2Server).DeleteLog(ctx, req.(*DeleteLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoggingServiceV2_WriteLogEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteLogEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoggingServiceV2Server).WriteLogEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.LoggingServiceV2/WriteLogEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoggingServiceV2Server).WriteLogEntries(ctx, req.(*WriteLogEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoggingServiceV2_ListLogEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLogEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoggingServiceV2Server).ListLogEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.LoggingServiceV2/ListLogEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoggingServiceV2Server).ListLogEntries(ctx, req.(*ListLogEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoggingServiceV2_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoggingServiceV2Server).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoggingServiceV2Server).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoggingServiceV2_ListLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoggingServiceV2Server).ListLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.LoggingServiceV2/ListLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoggingServiceV2Server).ListLogs(ctx, req.(*ListLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LoggingServiceV2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.logging.v2.LoggingServiceV2", + HandlerType: (*LoggingServiceV2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteLog", + Handler: _LoggingServiceV2_DeleteLog_Handler, + }, + { + MethodName: "WriteLogEntries", + Handler: _LoggingServiceV2_WriteLogEntries_Handler, + }, + { + MethodName: "ListLogEntries", + Handler: _LoggingServiceV2_ListLogEntries_Handler, + }, + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _LoggingServiceV2_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "ListLogs", + Handler: _LoggingServiceV2_ListLogs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/logging/v2/logging.proto", +} + +func init() { proto.RegisterFile("google/logging/v2/logging.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 975 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x06, 0xa5, 0xd8, 0x91, 0x46, 0x8d, 0xad, 0x6c, 0x62, 0x59, 0x91, 0x9c, 0x58, 0xa5, 0x9b, + 0x5a, 0x11, 0x10, 0x12, 0x55, 0x11, 0x20, 0x71, 0xd0, 0x8b, 0x13, 0xa3, 0x28, 0xe0, 0x14, 0x06, + 0xdd, 0x26, 0x40, 0x2e, 0x02, 0x25, 0x4d, 0x88, 0x6d, 0x28, 0x2e, 0xbb, 0xbb, 0x92, 0xab, 0x04, + 0xe9, 0x21, 0x87, 0xbe, 0x40, 0xdf, 0xa2, 0x87, 0xbe, 0x45, 0xaf, 0xbd, 0xf4, 0xd2, 0x43, 0x8f, + 0x79, 0x88, 0x1e, 0x0b, 0xee, 0x2e, 0x65, 0xea, 0x27, 0xb2, 0xdc, 0x9b, 0x76, 0xe6, 0xdb, 0x99, + 0xf9, 0x86, 0xdf, 0xcc, 0x0a, 0x76, 0x03, 0xc6, 0x82, 0x10, 0xdd, 0x90, 0x05, 0x01, 0x8d, 0x02, + 0x77, 0xd4, 0x4e, 0x7f, 0x3a, 0x31, 0x67, 0x92, 0x91, 0xeb, 0x1a, 0xe0, 0xa4, 0xd6, 0x51, 0xbb, + 0xb6, 0x63, 0xee, 0xf8, 0x31, 0x75, 0xfd, 0x28, 0x62, 0xd2, 0x97, 0x94, 0x45, 0x42, 0x5f, 0xa8, + 0xed, 0x65, 0xbc, 0x03, 0x16, 0x51, 0xc9, 0x38, 0xf6, 0x3b, 0x1c, 0x05, 0x1b, 0xf2, 0x1e, 0x1a, + 0xd0, 0xa7, 0x0b, 0xd3, 0x76, 0x30, 0x92, 0x7c, 0x6c, 0x20, 0x77, 0x0c, 0x44, 0x9d, 0xba, 0xc3, + 0x57, 0x6e, 0x7f, 0xc8, 0x55, 0x22, 0xe3, 0xaf, 0xcf, 0xfa, 0x71, 0x10, 0xcb, 0xf4, 0xf2, 0xee, + 0xac, 0x53, 0xd2, 0x01, 0x0a, 0xe9, 0x0f, 0x62, 0x03, 0xd8, 0x36, 0x00, 0x1e, 0xf7, 0x5c, 0x21, + 0x7d, 0x39, 0x34, 0xe5, 0xdb, 0xf7, 0xa1, 0xfc, 0x14, 0x43, 0x94, 0x78, 0xcc, 0x02, 0x0f, 0x7f, + 0x1c, 0xa2, 0x90, 0xe4, 0x16, 0x14, 0x92, 0xea, 0x22, 0x7f, 0x80, 0x55, 0xab, 0x61, 0x35, 0x8b, + 0xde, 0xd5, 0x90, 0x05, 0xdf, 0xfa, 0x03, 0xb4, 0xff, 0xce, 0x41, 0xe5, 0x05, 0xa7, 0x0a, 0x7e, + 0x14, 0x49, 0x4e, 0x51, 0x5c, 0x7c, 0x8b, 0x3c, 0x82, 0x42, 0xda, 0x90, 0x6a, 0xae, 0x61, 0x35, + 0x4b, 0xed, 0xdb, 0x8e, 0xe9, 0xb3, 0x1f, 0x53, 0xe7, 0x59, 0xda, 0x36, 0xcf, 0x80, 0xbc, 0x09, + 0x9c, 0x3c, 0x83, 0xf5, 0xd0, 0xef, 0x62, 0x28, 0xaa, 0xf9, 0x46, 0xbe, 0x59, 0x6a, 0x3f, 0x70, + 0xe6, 0x3e, 0x90, 0xb3, 0xb8, 0x20, 0xe7, 0x58, 0xdd, 0x4b, 0x8c, 0x63, 0xcf, 0x04, 0x21, 0x0f, + 0xe0, 0x2a, 0x6a, 0x54, 0xf5, 0x8a, 0x8a, 0x57, 0x5f, 0x10, 0xcf, 0x84, 0x1a, 0x7b, 0x29, 0x96, + 0xec, 0xc3, 0x66, 0xec, 0x73, 0x49, 0xfd, 0xb0, 0x23, 0x86, 0xbd, 0x1e, 0x0a, 0x51, 0x5d, 0x6b, + 0x58, 0xcd, 0x82, 0xb7, 0x61, 0xcc, 0xa7, 0xda, 0x5a, 0x7b, 0x04, 0xa5, 0x4c, 0x5a, 0x52, 0x86, + 0xfc, 0x6b, 0x1c, 0x9b, 0x76, 0x24, 0x3f, 0xc9, 0x4d, 0x58, 0x1b, 0xf9, 0xe1, 0x50, 0xf7, 0xa1, + 0xe8, 0xe9, 0xc3, 0x41, 0xee, 0xa1, 0x65, 0xdf, 0x82, 0xed, 0x39, 0x22, 0x22, 0x66, 0x91, 0x40, + 0xfb, 0x83, 0x05, 0x3b, 0x33, 0xbe, 0x13, 0x9d, 0xf7, 0x88, 0x73, 0xc6, 0x05, 0x19, 0x40, 0x79, + 0xa2, 0xa7, 0x0e, 0x2a, 0x5b, 0xd5, 0x52, 0xfc, 0x9e, 0x5c, 0xdc, 0xaf, 0xa9, 0x50, 0x13, 0xf2, + 0xfa, 0xa8, 0xfb, 0xb0, 0x11, 0x4e, 0x19, 0x6b, 0xdf, 0xc3, 0x8d, 0x05, 0xb0, 0x2c, 0xdb, 0x35, + 0xcd, 0xb6, 0x99, 0x65, 0x5b, 0x6a, 0x93, 0xb4, 0x18, 0x1e, 0xf7, 0x9c, 0x53, 0x25, 0xc3, 0x6c, + 0x07, 0xfe, 0xb4, 0x60, 0xeb, 0x98, 0x0a, 0x39, 0xaf, 0xad, 0x5d, 0x28, 0xc5, 0x9c, 0xfd, 0x80, + 0x3d, 0xd9, 0xa1, 0x7d, 0x4d, 0xad, 0xe8, 0x81, 0x31, 0x7d, 0xd3, 0x17, 0xe4, 0x2e, 0x6c, 0xa4, + 0x92, 0x51, 0x0a, 0x14, 0xd5, 0x82, 0xc2, 0x5c, 0x4b, 0xad, 0x89, 0x0e, 0x05, 0xa9, 0xc0, 0xfa, + 0x2b, 0x1a, 0x4a, 0xe4, 0xa6, 0xfd, 0xe6, 0x94, 0x68, 0x97, 0xf1, 0x3e, 0xf2, 0x4e, 0x77, 0x5c, + 0xcd, 0x6b, 0xed, 0xaa, 0xf3, 0xe1, 0x98, 0xd4, 0xa1, 0x18, 0xfb, 0x01, 0x76, 0x04, 0x7d, 0x83, + 0xd5, 0x2b, 0x8a, 0x5a, 0x21, 0x31, 0x9c, 0xd2, 0x37, 0x48, 0x6e, 0x03, 0x28, 0xa7, 0x64, 0xaf, + 0x31, 0x52, 0x92, 0x28, 0x7a, 0x0a, 0xfe, 0x5d, 0x62, 0xb0, 0xcf, 0xa0, 0x32, 0xcb, 0x47, 0x7f, + 0xd1, 0xac, 0x0e, 0xad, 0x4b, 0xe8, 0xf0, 0x73, 0xd8, 0x8c, 0xf0, 0x27, 0xd9, 0xc9, 0x24, 0xd5, + 0x44, 0xae, 0x25, 0xe6, 0x93, 0x49, 0x62, 0x84, 0xfd, 0x24, 0xf1, 0xdc, 0x60, 0x3d, 0x45, 0xd1, + 0xe3, 0x34, 0x96, 0x8c, 0x4f, 0x5a, 0x3b, 0xc5, 0xcf, 0x5a, 0xca, 0x2f, 0x37, 0xcb, 0xef, 0x77, + 0x0b, 0x9a, 0x17, 0xe7, 0x31, 0x94, 0x5f, 0xc2, 0xcd, 0xc9, 0x27, 0xea, 0x9f, 0xfb, 0x0d, 0xff, + 0xfd, 0xa5, 0x0b, 0xe1, 0x3c, 0x9e, 0x77, 0x83, 0xcf, 0xe7, 0xb8, 0x44, 0x5f, 0x36, 0xcd, 0x07, + 0x99, 0xf0, 0xaf, 0xc0, 0x7a, 0xec, 0x73, 0x8c, 0xa4, 0x99, 0x52, 0x73, 0x9a, 0xee, 0x4b, 0x6e, + 0x69, 0x5f, 0xf2, 0xb3, 0x7d, 0x79, 0x01, 0xe5, 0xf3, 0x34, 0x86, 0x7e, 0x1d, 0x8a, 0xe9, 0x7a, + 0xd4, 0xbb, 0xac, 0xe8, 0x15, 0xcc, 0x7e, 0x5c, 0xb9, 0xfe, 0xf6, 0x3f, 0x6b, 0x50, 0x3e, 0xd6, + 0x02, 0x39, 0x45, 0x3e, 0xa2, 0x3d, 0x7c, 0xde, 0x26, 0x67, 0x50, 0x9c, 0xac, 0x70, 0xb2, 0xb7, + 0x40, 0x47, 0xb3, 0x0b, 0xbe, 0x56, 0x49, 0x41, 0xe9, 0x7b, 0xe1, 0x1c, 0x25, 0x8f, 0x89, 0x7d, + 0xff, 0xfd, 0x5f, 0x1f, 0x7e, 0xcd, 0xed, 0xb7, 0xee, 0xba, 0xa3, 0x76, 0x17, 0xa5, 0xff, 0x85, + 0xfb, 0x36, 0xad, 0xf9, 0x2b, 0x33, 0x6c, 0xc2, 0x6d, 0x25, 0x4f, 0x97, 0x70, 0x5b, 0xef, 0xc8, + 0x2f, 0x16, 0x6c, 0xce, 0xec, 0x12, 0x72, 0x6f, 0xe5, 0xfd, 0x5c, 0x6b, 0xad, 0x02, 0x35, 0x1b, + 0x70, 0x47, 0x55, 0x56, 0xb1, 0xaf, 0x27, 0x4f, 0xa7, 0x99, 0x86, 0x83, 0xb3, 0x04, 0x7c, 0x60, + 0xb5, 0xc8, 0x7b, 0x0b, 0x36, 0xa6, 0x07, 0x8d, 0x34, 0x17, 0xcd, 0xd3, 0xa2, 0xdd, 0x52, 0xbb, + 0xb7, 0x02, 0xd2, 0x54, 0x51, 0x57, 0x55, 0x6c, 0xd9, 0xe5, 0x6c, 0x15, 0x21, 0x15, 0x32, 0x29, + 0xe2, 0x0f, 0x0b, 0x1a, 0x17, 0x0d, 0x03, 0x39, 0xf8, 0x48, 0xb2, 0x15, 0x26, 0xb5, 0xf6, 0xf8, + 0x7f, 0xdd, 0x35, 0xa5, 0x37, 0x55, 0xe9, 0x36, 0x69, 0x24, 0xa5, 0x0f, 0x96, 0x95, 0x38, 0x86, + 0x42, 0x2a, 0x5e, 0x62, 0x7f, 0xbc, 0x37, 0x93, 0xb2, 0xf6, 0x96, 0x62, 0x4c, 0xfa, 0xcf, 0x54, + 0xfa, 0x3b, 0x64, 0x27, 0x49, 0xff, 0x56, 0x8f, 0x58, 0x46, 0x52, 0xef, 0x94, 0xa6, 0x0e, 0x7f, + 0x86, 0xad, 0x1e, 0x1b, 0xcc, 0xc7, 0x3b, 0xfc, 0xc4, 0x88, 0xfe, 0x24, 0xd1, 0xeb, 0x89, 0xf5, + 0xf2, 0xa1, 0x81, 0x04, 0x2c, 0xf4, 0xa3, 0xc0, 0x61, 0x3c, 0x70, 0x03, 0x8c, 0x94, 0x9a, 0x5d, + 0xed, 0xf2, 0x63, 0x2a, 0x32, 0x7f, 0xb7, 0x1e, 0x9b, 0x9f, 0xff, 0x5a, 0xd6, 0x6f, 0xb9, 0xed, + 0xaf, 0xf5, 0xed, 0x27, 0x21, 0x1b, 0xf6, 0x1d, 0x13, 0xda, 0x79, 0xde, 0xee, 0xae, 0xab, 0x08, + 0x5f, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xc4, 0xaa, 0x91, 0x26, 0x0a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go new file mode 100644 index 000000000..f74490812 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go @@ -0,0 +1,1203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/logging/v2/logging_config.proto + +package logging + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf5 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf6 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Available log entry formats. Log entries can be written to Stackdriver +// Logging in either format and can be exported in either format. +// Version 2 is the preferred format. +type LogSink_VersionFormat int32 + +const ( + // An unspecified format version that will default to V2. + LogSink_VERSION_FORMAT_UNSPECIFIED LogSink_VersionFormat = 0 + // `LogEntry` version 2 format. + LogSink_V2 LogSink_VersionFormat = 1 + // `LogEntry` version 1 format. + LogSink_V1 LogSink_VersionFormat = 2 +) + +var LogSink_VersionFormat_name = map[int32]string{ + 0: "VERSION_FORMAT_UNSPECIFIED", + 1: "V2", + 2: "V1", +} +var LogSink_VersionFormat_value = map[string]int32{ + "VERSION_FORMAT_UNSPECIFIED": 0, + "V2": 1, + "V1": 2, +} + +func (x LogSink_VersionFormat) String() string { + return proto.EnumName(LogSink_VersionFormat_name, int32(x)) +} +func (LogSink_VersionFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +// Describes a sink used to export log entries to one of the following +// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a +// Cloud Pub/Sub topic. A logs filter controls which log entries are +// exported. The sink must be created within a project, organization, billing +// account, or folder. +type LogSink struct { + // Required. The client-assigned sink identifier, unique within the + // project. Example: `"my-syslog-errors-to-pubsub"`. Sink identifiers are + // limited to 100 characters and can include only the following characters: + // upper and lower-case alphanumeric characters, underscores, hyphens, and + // periods. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The export destination: + // + // "storage.googleapis.com/[GCS_BUCKET]" + // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + // + // The sink's `writer_identity`, set when the sink is created, must + // have permission to write to the destination or else the log + // entries are not exported. For more information, see + // [Exporting Logs With Sinks](/logging/docs/api/tasks/exporting-logs). + Destination string `protobuf:"bytes,3,opt,name=destination" json:"destination,omitempty"` + // Optional. + // An [advanced logs filter](/logging/docs/view/advanced_filters). The only + // exported log entries are those that are in the resource owning the sink and + // that match the filter. The filter must use the log entry format specified + // by the `output_version_format` parameter. For example, in the v2 format: + // + // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // Deprecated. The log entry format to use for this sink's exported log + // entries. The v2 format is used by default and cannot be changed. + OutputVersionFormat LogSink_VersionFormat `protobuf:"varint,6,opt,name=output_version_format,json=outputVersionFormat,enum=google.logging.v2.LogSink_VersionFormat" json:"output_version_format,omitempty"` + // Output only. An IAM identity—a service account or group—under + // which Stackdriver Logging writes the exported log entries to the sink's + // destination. This field is set by + // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create) + // and + // [sinks.update](/logging/docs/api/reference/rest/v2/projects.sinks/update), + // based on the setting of `unique_writer_identity` in those methods. + // + // Until you grant this identity write-access to the destination, log entry + // exports from this sink will fail. For more information, + // see [Granting access for a + // resource](/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). + // Consult the destination service's documentation to determine the + // appropriate IAM roles to assign to the identity. + WriterIdentity string `protobuf:"bytes,8,opt,name=writer_identity,json=writerIdentity" json:"writer_identity,omitempty"` + // Optional. This field applies only to sinks owned by organizations and + // folders. If the field is false, the default, only the logs owned by the + // sink's parent resource are available for export. If the field is true, then + // logs from all the projects, folders, and billing accounts contained in the + // sink's parent resource are also available for export. Whether a particular + // log entry from the children is exported depends on the sink's filter + // expression. For example, if this field is true, then the filter + // `resource.type=gce_instance` would export all Compute Engine VM instance + // log entries from all projects in the sink's parent. To only export entries + // from certain child projects, filter on the project part of the log name: + // + // logName:("projects/test-project1/" OR "projects/test-project2/") AND + // resource.type=gce_instance + IncludeChildren bool `protobuf:"varint,9,opt,name=include_children,json=includeChildren" json:"include_children,omitempty"` + // Optional. The time at which this sink will begin exporting log entries. + // Log entries are exported only if their timestamp is not earlier than the + // start time. The default value of this field is the time the sink is + // created or updated. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,10,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Optional. The time at which this sink will stop exporting log entries. Log + // entries are exported only if their timestamp is earlier than the end time. + // If this field is not supplied, there is no end time. If both a start time + // and an end time are provided, then the end time must be later than the + // start time. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,11,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *LogSink) Reset() { *m = LogSink{} } +func (m *LogSink) String() string { return proto.CompactTextString(m) } +func (*LogSink) ProtoMessage() {} +func (*LogSink) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *LogSink) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogSink) GetDestination() string { + if m != nil { + return m.Destination + } + return "" +} + +func (m *LogSink) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *LogSink) GetOutputVersionFormat() LogSink_VersionFormat { + if m != nil { + return m.OutputVersionFormat + } + return LogSink_VERSION_FORMAT_UNSPECIFIED +} + +func (m *LogSink) GetWriterIdentity() string { + if m != nil { + return m.WriterIdentity + } + return "" +} + +func (m *LogSink) GetIncludeChildren() bool { + if m != nil { + return m.IncludeChildren + } + return false +} + +func (m *LogSink) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *LogSink) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +// The parameters to `ListSinks`. +type ListSinksRequest struct { + // Required. The parent resource whose sinks are to be listed: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListSinksRequest) Reset() { *m = ListSinksRequest{} } +func (m *ListSinksRequest) String() string { return proto.CompactTextString(m) } +func (*ListSinksRequest) ProtoMessage() {} +func (*ListSinksRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListSinksRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListSinksRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListSinksRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Result returned from `ListSinks`. +type ListSinksResponse struct { + // A list of sinks. + Sinks []*LogSink `protobuf:"bytes,1,rep,name=sinks" json:"sinks,omitempty"` + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListSinksResponse) Reset() { *m = ListSinksResponse{} } +func (m *ListSinksResponse) String() string { return proto.CompactTextString(m) } +func (*ListSinksResponse) ProtoMessage() {} +func (*ListSinksResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *ListSinksResponse) GetSinks() []*LogSink { + if m != nil { + return m.Sinks + } + return nil +} + +func (m *ListSinksResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The parameters to `GetSink`. +type GetSinkRequest struct { + // Required. The resource name of the sink: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"` +} + +func (m *GetSinkRequest) Reset() { *m = GetSinkRequest{} } +func (m *GetSinkRequest) String() string { return proto.CompactTextString(m) } +func (*GetSinkRequest) ProtoMessage() {} +func (*GetSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *GetSinkRequest) GetSinkName() string { + if m != nil { + return m.SinkName + } + return "" +} + +// The parameters to `CreateSink`. +type CreateSinkRequest struct { + // Required. The resource in which to create the sink: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // + // Examples: `"projects/my-logging-project"`, `"organizations/123456789"`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The new sink, whose `name` parameter is a sink identifier that + // is not already in use. + Sink *LogSink `protobuf:"bytes,2,opt,name=sink" json:"sink,omitempty"` + // Optional. Determines the kind of IAM identity returned as `writer_identity` + // in the new sink. If this value is omitted or set to false, and if the + // sink's parent is a project, then the value returned as `writer_identity` is + // the same group or service account used by Stackdriver Logging before the + // addition of writer identities to this API. The sink's destination must be + // in the same project as the sink itself. + // + // If this field is set to true, or if the sink is owned by a non-project + // resource such as an organization, then the value of `writer_identity` will + // be a unique service account used only for exports from the new sink. For + // more information, see `writer_identity` in [LogSink][google.logging.v2.LogSink]. + UniqueWriterIdentity bool `protobuf:"varint,3,opt,name=unique_writer_identity,json=uniqueWriterIdentity" json:"unique_writer_identity,omitempty"` +} + +func (m *CreateSinkRequest) Reset() { *m = CreateSinkRequest{} } +func (m *CreateSinkRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSinkRequest) ProtoMessage() {} +func (*CreateSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *CreateSinkRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateSinkRequest) GetSink() *LogSink { + if m != nil { + return m.Sink + } + return nil +} + +func (m *CreateSinkRequest) GetUniqueWriterIdentity() bool { + if m != nil { + return m.UniqueWriterIdentity + } + return false +} + +// The parameters to `UpdateSink`. +type UpdateSinkRequest struct { + // Required. The full resource name of the sink to update, including the + // parent resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"` + // Required. The updated sink, whose name is the same identifier that appears + // as part of `sink_name`. + Sink *LogSink `protobuf:"bytes,2,opt,name=sink" json:"sink,omitempty"` + // Optional. See + // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create) + // for a description of this field. When updating a sink, the effect of this + // field on the value of `writer_identity` in the updated sink depends on both + // the old and new values of this field: + // + // + If the old and new values of this field are both false or both true, + // then there is no change to the sink's `writer_identity`. + // + If the old value is false and the new value is true, then + // `writer_identity` is changed to a unique service account. + // + It is an error if the old value is true and the new value is + // set to false or defaulted to false. + UniqueWriterIdentity bool `protobuf:"varint,3,opt,name=unique_writer_identity,json=uniqueWriterIdentity" json:"unique_writer_identity,omitempty"` +} + +func (m *UpdateSinkRequest) Reset() { *m = UpdateSinkRequest{} } +func (m *UpdateSinkRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSinkRequest) ProtoMessage() {} +func (*UpdateSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *UpdateSinkRequest) GetSinkName() string { + if m != nil { + return m.SinkName + } + return "" +} + +func (m *UpdateSinkRequest) GetSink() *LogSink { + if m != nil { + return m.Sink + } + return nil +} + +func (m *UpdateSinkRequest) GetUniqueWriterIdentity() bool { + if m != nil { + return m.UniqueWriterIdentity + } + return false +} + +// The parameters to `DeleteSink`. +type DeleteSinkRequest struct { + // Required. The full resource name of the sink to delete, including the + // parent resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName" json:"sink_name,omitempty"` +} + +func (m *DeleteSinkRequest) Reset() { *m = DeleteSinkRequest{} } +func (m *DeleteSinkRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSinkRequest) ProtoMessage() {} +func (*DeleteSinkRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *DeleteSinkRequest) GetSinkName() string { + if m != nil { + return m.SinkName + } + return "" +} + +// Specifies a set of log entries that are not to be stored in Stackdriver +// Logging. If your project receives a large volume of logs, you might be able +// to use exclusions to reduce your chargeable logs. Exclusions are processed +// after log sinks, so you can export log entries before they are excluded. +// Audit log entries and log entries from Amazon Web Services are never +// excluded. +type LogExclusion struct { + // Required. A client-assigned identifier, such as + // `"load-balancer-exclusion"`. Identifiers are limited to 100 characters and + // can include only letters, digits, underscores, hyphens, and periods. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. A description of this exclusion. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Required. + // An [advanced logs filter](/logging/docs/view/advanced_filters) + // that matches the log entries to be excluded. By using the + // [sample function](/logging/docs/view/advanced_filters#sample), + // you can exclude less than 100% of the matching log entries. + // For example, the following filter matches 99% of low-severity log + // entries from load balancers: + // + // "resource.type=http_load_balancer severity=ERROR" + // + // The maximum length of the filter is 20000 characters. + Filter string `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"` + // Optional. The metric descriptor associated with the logs-based metric. + // If unspecified, it uses a default metric descriptor with a DELTA metric + // kind, INT64 value type, with no labels and a unit of "1". Such a metric + // counts the number of log entries matching the `filter` expression. + // + // The `name`, `type`, and `description` fields in the `metric_descriptor` + // are output only, and is constructed using the `name` and `description` + // field in the LogMetric. + // + // To create a logs-based metric that records a distribution of log values, a + // DELTA metric kind with a DISTRIBUTION value type must be used along with + // a `value_extractor` expression in the LogMetric. + // + // Each label in the metric descriptor must have a matching label + // name as the key and an extractor expression as the value in the + // `label_extractors` map. + // + // The `metric_kind` and `value_type` fields in the `metric_descriptor` cannot + // be updated once initially configured. New labels can be added in the + // `metric_descriptor`, but existing labels cannot be modified except for + // their description. + MetricDescriptor *google_api5.MetricDescriptor `protobuf:"bytes,5,opt,name=metric_descriptor,json=metricDescriptor" json:"metric_descriptor,omitempty"` + // Optional. A `value_extractor` is required when using a distribution + // logs-based metric to extract the values to record from a log entry. + // Two functions are supported for value extraction: `EXTRACT(field)` or + // `REGEXP_EXTRACT(field, regex)`. The argument are: + // 1. field: The name of the log entry field from which the value is to be + // extracted. + // 2. regex: A regular expression using the Google RE2 syntax + // (https://github.com/google/re2/wiki/Syntax) with a single capture + // group to extract data from the specified log entry field. The value + // of the field is converted to a string before applying the regex. + // It is an error to specify a regex that does not include exactly one + // capture group. + // + // The result of the extraction must be convertible to a double type, as the + // distribution always records double values. If either the extraction or + // the conversion to double fails, then those values are not recorded in the + // distribution. + // + // Example: `REGEXP_EXTRACT(jsonPayload.request, ".*quantity=(\d+).*")` + ValueExtractor string `protobuf:"bytes,6,opt,name=value_extractor,json=valueExtractor" json:"value_extractor,omitempty"` + // Optional. A map from a label key string to an extractor expression which is + // used to extract data from a log entry field and assign as the label value. + // Each label key specified in the LabelDescriptor must have an associated + // extractor expression in this map. The syntax of the extractor expression + // is the same as for the `value_extractor` field. + // + // The extracted value is converted to the type defined in the label + // descriptor. If the either the extraction or the type conversion fails, + // the label will have a default value. The default value for a string + // label is an empty string, for an integer label its 0, and for a boolean + // label its `false`. + // + // Note that there are upper bounds on the maximum number of labels and the + // number of active time series that are allowed in a project. + LabelExtractors map[string]string `protobuf:"bytes,7,rep,name=label_extractors,json=labelExtractors" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. The `bucket_options` are required when the logs-based metric is + // using a DISTRIBUTION value type and it describes the bucket boundaries + // used to create a histogram of the extracted values. + BucketOptions *google_api4.Distribution_BucketOptions `protobuf:"bytes,8,opt,name=bucket_options,json=bucketOptions" json:"bucket_options,omitempty"` + // Deprecated. The API version that created or updated this metric. + // The v2 format is used by default and cannot be changed. + Version LogMetric_ApiVersion `protobuf:"varint,4,opt,name=version,enum=google.logging.v2.LogMetric_ApiVersion" json:"version,omitempty"` +} + +func (m *LogMetric) Reset() { *m = LogMetric{} } +func (m *LogMetric) String() string { return proto.CompactTextString(m) } +func (*LogMetric) ProtoMessage() {} +func (*LogMetric) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *LogMetric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LogMetric) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *LogMetric) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *LogMetric) GetMetricDescriptor() *google_api5.MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +func (m *LogMetric) GetValueExtractor() string { + if m != nil { + return m.ValueExtractor + } + return "" +} + +func (m *LogMetric) GetLabelExtractors() map[string]string { + if m != nil { + return m.LabelExtractors + } + return nil +} + +func (m *LogMetric) GetBucketOptions() *google_api4.Distribution_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *LogMetric) GetVersion() LogMetric_ApiVersion { + if m != nil { + return m.Version + } + return LogMetric_V2 +} + +// The parameters to ListLogMetrics. +type ListLogMetricsRequest struct { + // Required. The name of the project containing the metrics: + // + // "projects/[PROJECT_ID]" + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListLogMetricsRequest) Reset() { *m = ListLogMetricsRequest{} } +func (m *ListLogMetricsRequest) String() string { return proto.CompactTextString(m) } +func (*ListLogMetricsRequest) ProtoMessage() {} +func (*ListLogMetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *ListLogMetricsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListLogMetricsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListLogMetricsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Result returned from ListLogMetrics. +type ListLogMetricsResponse struct { + // A list of logs-based metrics. + Metrics []*LogMetric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"` + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListLogMetricsResponse) Reset() { *m = ListLogMetricsResponse{} } +func (m *ListLogMetricsResponse) String() string { return proto.CompactTextString(m) } +func (*ListLogMetricsResponse) ProtoMessage() {} +func (*ListLogMetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *ListLogMetricsResponse) GetMetrics() []*LogMetric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *ListLogMetricsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The parameters to GetLogMetric. +type GetLogMetricRequest struct { + // The resource name of the desired metric: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"` +} + +func (m *GetLogMetricRequest) Reset() { *m = GetLogMetricRequest{} } +func (m *GetLogMetricRequest) String() string { return proto.CompactTextString(m) } +func (*GetLogMetricRequest) ProtoMessage() {} +func (*GetLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } + +func (m *GetLogMetricRequest) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +// The parameters to CreateLogMetric. +type CreateLogMetricRequest struct { + // The resource name of the project in which to create the metric: + // + // "projects/[PROJECT_ID]" + // + // The new metric must be provided in the request. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The new logs-based metric, which must not have an identifier that + // already exists. + Metric *LogMetric `protobuf:"bytes,2,opt,name=metric" json:"metric,omitempty"` +} + +func (m *CreateLogMetricRequest) Reset() { *m = CreateLogMetricRequest{} } +func (m *CreateLogMetricRequest) String() string { return proto.CompactTextString(m) } +func (*CreateLogMetricRequest) ProtoMessage() {} +func (*CreateLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} } + +func (m *CreateLogMetricRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateLogMetricRequest) GetMetric() *LogMetric { + if m != nil { + return m.Metric + } + return nil +} + +// The parameters to UpdateLogMetric. +type UpdateLogMetricRequest struct { + // The resource name of the metric to update: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + // + // The updated metric must be provided in the request and it's + // `name` field must be the same as `[METRIC_ID]` If the metric + // does not exist in `[PROJECT_ID]`, then a new metric is created. + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"` + // The updated metric. + Metric *LogMetric `protobuf:"bytes,2,opt,name=metric" json:"metric,omitempty"` +} + +func (m *UpdateLogMetricRequest) Reset() { *m = UpdateLogMetricRequest{} } +func (m *UpdateLogMetricRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateLogMetricRequest) ProtoMessage() {} +func (*UpdateLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} } + +func (m *UpdateLogMetricRequest) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func (m *UpdateLogMetricRequest) GetMetric() *LogMetric { + if m != nil { + return m.Metric + } + return nil +} + +// The parameters to DeleteLogMetric. +type DeleteLogMetricRequest struct { + // The resource name of the metric to delete: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName" json:"metric_name,omitempty"` +} + +func (m *DeleteLogMetricRequest) Reset() { *m = DeleteLogMetricRequest{} } +func (m *DeleteLogMetricRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteLogMetricRequest) ProtoMessage() {} +func (*DeleteLogMetricRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} } + +func (m *DeleteLogMetricRequest) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func init() { + proto.RegisterType((*LogMetric)(nil), "google.logging.v2.LogMetric") + proto.RegisterType((*ListLogMetricsRequest)(nil), "google.logging.v2.ListLogMetricsRequest") + proto.RegisterType((*ListLogMetricsResponse)(nil), "google.logging.v2.ListLogMetricsResponse") + proto.RegisterType((*GetLogMetricRequest)(nil), "google.logging.v2.GetLogMetricRequest") + proto.RegisterType((*CreateLogMetricRequest)(nil), "google.logging.v2.CreateLogMetricRequest") + proto.RegisterType((*UpdateLogMetricRequest)(nil), "google.logging.v2.UpdateLogMetricRequest") + proto.RegisterType((*DeleteLogMetricRequest)(nil), "google.logging.v2.DeleteLogMetricRequest") + proto.RegisterEnum("google.logging.v2.LogMetric_ApiVersion", LogMetric_ApiVersion_name, LogMetric_ApiVersion_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for MetricsServiceV2 service + +type MetricsServiceV2Client interface { + // Lists logs-based metrics. + ListLogMetrics(ctx context.Context, in *ListLogMetricsRequest, opts ...grpc.CallOption) (*ListLogMetricsResponse, error) + // Gets a logs-based metric. + GetLogMetric(ctx context.Context, in *GetLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) + // Creates a logs-based metric. + CreateLogMetric(ctx context.Context, in *CreateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) + // Creates or updates a logs-based metric. + UpdateLogMetric(ctx context.Context, in *UpdateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) + // Deletes a logs-based metric. + DeleteLogMetric(ctx context.Context, in *DeleteLogMetricRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) +} + +type metricsServiceV2Client struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceV2Client(cc *grpc.ClientConn) MetricsServiceV2Client { + return &metricsServiceV2Client{cc} +} + +func (c *metricsServiceV2Client) ListLogMetrics(ctx context.Context, in *ListLogMetricsRequest, opts ...grpc.CallOption) (*ListLogMetricsResponse, error) { + out := new(ListLogMetricsResponse) + err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/ListLogMetrics", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricsServiceV2Client) GetLogMetric(ctx context.Context, in *GetLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) { + out := new(LogMetric) + err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/GetLogMetric", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricsServiceV2Client) CreateLogMetric(ctx context.Context, in *CreateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) { + out := new(LogMetric) + err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/CreateLogMetric", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricsServiceV2Client) UpdateLogMetric(ctx context.Context, in *UpdateLogMetricRequest, opts ...grpc.CallOption) (*LogMetric, error) { + out := new(LogMetric) + err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/UpdateLogMetric", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricsServiceV2Client) DeleteLogMetric(ctx context.Context, in *DeleteLogMetricRequest, opts ...grpc.CallOption) (*google_protobuf5.Empty, error) { + out := new(google_protobuf5.Empty) + err := grpc.Invoke(ctx, "/google.logging.v2.MetricsServiceV2/DeleteLogMetric", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for MetricsServiceV2 service + +type MetricsServiceV2Server interface { + // Lists logs-based metrics. + ListLogMetrics(context.Context, *ListLogMetricsRequest) (*ListLogMetricsResponse, error) + // Gets a logs-based metric. + GetLogMetric(context.Context, *GetLogMetricRequest) (*LogMetric, error) + // Creates a logs-based metric. + CreateLogMetric(context.Context, *CreateLogMetricRequest) (*LogMetric, error) + // Creates or updates a logs-based metric. + UpdateLogMetric(context.Context, *UpdateLogMetricRequest) (*LogMetric, error) + // Deletes a logs-based metric. + DeleteLogMetric(context.Context, *DeleteLogMetricRequest) (*google_protobuf5.Empty, error) +} + +func RegisterMetricsServiceV2Server(s *grpc.Server, srv MetricsServiceV2Server) { + s.RegisterService(&_MetricsServiceV2_serviceDesc, srv) +} + +func _MetricsServiceV2_ListLogMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLogMetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceV2Server).ListLogMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.MetricsServiceV2/ListLogMetrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceV2Server).ListLogMetrics(ctx, req.(*ListLogMetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricsServiceV2_GetLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLogMetricRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceV2Server).GetLogMetric(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.MetricsServiceV2/GetLogMetric", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceV2Server).GetLogMetric(ctx, req.(*GetLogMetricRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricsServiceV2_CreateLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateLogMetricRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceV2Server).CreateLogMetric(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.MetricsServiceV2/CreateLogMetric", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceV2Server).CreateLogMetric(ctx, req.(*CreateLogMetricRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricsServiceV2_UpdateLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateLogMetricRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceV2Server).UpdateLogMetric(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.MetricsServiceV2/UpdateLogMetric", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceV2Server).UpdateLogMetric(ctx, req.(*UpdateLogMetricRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricsServiceV2_DeleteLogMetric_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteLogMetricRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceV2Server).DeleteLogMetric(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.logging.v2.MetricsServiceV2/DeleteLogMetric", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceV2Server).DeleteLogMetric(ctx, req.(*DeleteLogMetricRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsServiceV2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.logging.v2.MetricsServiceV2", + HandlerType: (*MetricsServiceV2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListLogMetrics", + Handler: _MetricsServiceV2_ListLogMetrics_Handler, + }, + { + MethodName: "GetLogMetric", + Handler: _MetricsServiceV2_GetLogMetric_Handler, + }, + { + MethodName: "CreateLogMetric", + Handler: _MetricsServiceV2_CreateLogMetric_Handler, + }, + { + MethodName: "UpdateLogMetric", + Handler: _MetricsServiceV2_UpdateLogMetric_Handler, + }, + { + MethodName: "DeleteLogMetric", + Handler: _MetricsServiceV2_DeleteLogMetric_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/logging/v2/logging_metrics.proto", +} + +func init() { proto.RegisterFile("google/logging/v2/logging_metrics.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 835 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xc1, 0x6e, 0xdb, 0x46, + 0x10, 0xed, 0xca, 0xb6, 0x1c, 0x8f, 0x1b, 0x4b, 0xd9, 0x24, 0x8a, 0xa0, 0x28, 0x88, 0xca, 0x83, + 0xa4, 0xf8, 0x40, 0x36, 0x6c, 0x61, 0xa4, 0x29, 0x7a, 0x88, 0x62, 0x23, 0x28, 0xa0, 0xb4, 0x06, + 0xd3, 0xea, 0x50, 0x14, 0x20, 0x28, 0x69, 0x44, 0x6c, 0x45, 0x71, 0x59, 0xee, 0x4a, 0xb0, 0x53, + 0xe4, 0x52, 0xa4, 0xa7, 0x02, 0x3d, 0xb4, 0x1f, 0xd0, 0x7b, 0x7f, 0xa7, 0xfd, 0x84, 0x7e, 0x44, + 0x8f, 0x05, 0x97, 0x4b, 0x99, 0x91, 0x18, 0xcb, 0xf0, 0xc9, 0xbb, 0x33, 0x6f, 0xf6, 0xbd, 0x99, + 0x79, 0xb2, 0x04, 0x1d, 0x9f, 0x73, 0x3f, 0x40, 0x2b, 0xe0, 0xbe, 0xcf, 0x42, 0xdf, 0x5a, 0xd8, + 0xd9, 0xd1, 0x9d, 0xa1, 0x8c, 0xd9, 0x48, 0x98, 0x51, 0xcc, 0x25, 0xa7, 0xb7, 0x52, 0xa0, 0xa9, + 0xb3, 0xe6, 0xc2, 0x6e, 0x34, 0x75, 0xad, 0x17, 0x31, 0xcb, 0x0b, 0x43, 0x2e, 0x3d, 0xc9, 0x78, + 0xa8, 0x0b, 0x1a, 0x0f, 0x72, 0xd9, 0x31, 0x13, 0x32, 0x66, 0xc3, 0x79, 0x92, 0xd7, 0xe9, 0x7b, + 0xb9, 0x74, 0xca, 0xa4, 0x13, 0xf7, 0x75, 0x42, 0xdd, 0x86, 0xf3, 0x89, 0x85, 0xb3, 0x48, 0x9e, + 0xeb, 0x64, 0x6b, 0x35, 0x39, 0x61, 0x18, 0x8c, 0xdd, 0x99, 0x27, 0xa6, 0x29, 0xc2, 0xf8, 0x73, + 0x1b, 0xf6, 0xfa, 0xdc, 0x7f, 0xa9, 0x9e, 0xa4, 0x14, 0xb6, 0x43, 0x6f, 0x86, 0x75, 0xd2, 0x22, + 0xdd, 0x3d, 0x47, 0x9d, 0x69, 0x0b, 0xf6, 0xc7, 0x28, 0x46, 0x31, 0x8b, 0x12, 0x39, 0xf5, 0x92, + 0x4a, 0xe5, 0x43, 0xb4, 0x06, 0xe5, 0x09, 0x0b, 0x24, 0xc6, 0xf5, 0x2d, 0x95, 0xd4, 0x37, 0xfa, + 0x25, 0xdc, 0x4a, 0xa5, 0xba, 0x19, 0x9a, 0xc7, 0xf5, 0x9d, 0x16, 0xe9, 0xee, 0xdb, 0x4d, 0x53, + 0xcf, 0xc7, 0x8b, 0x98, 0x99, 0x92, 0x1f, 0x2f, 0x31, 0x4e, 0x75, 0xb6, 0x12, 0xa1, 0x1d, 0xa8, + 0x2c, 0xbc, 0x60, 0x8e, 0x2e, 0x9e, 0xc9, 0xd8, 0x1b, 0x25, 0x0f, 0x95, 0x15, 0xd7, 0x81, 0x0a, + 0x9f, 0x64, 0x51, 0xfa, 0x3d, 0x54, 0x03, 0x6f, 0x88, 0xc1, 0x05, 0x50, 0xd4, 0x77, 0x5b, 0x5b, + 0xdd, 0x7d, 0xfb, 0xb1, 0xb9, 0xb6, 0x12, 0x73, 0xd9, 0xb9, 0xd9, 0x4f, 0x8a, 0x96, 0xcf, 0x88, + 0x93, 0x50, 0xc6, 0xe7, 0x4e, 0x25, 0x78, 0x37, 0x4a, 0x5f, 0xc2, 0xc1, 0x70, 0x3e, 0x9a, 0xa2, + 0x74, 0xb9, 0x6a, 0x5d, 0xd4, 0x6f, 0xa8, 0x76, 0xda, 0xf9, 0x76, 0x8e, 0xf3, 0xdb, 0xeb, 0x29, + 0xf8, 0xd7, 0x29, 0xda, 0xb9, 0x39, 0xcc, 0x5f, 0xe9, 0x33, 0xd8, 0x5d, 0x60, 0x2c, 0x92, 0xb1, + 0x6e, 0xb7, 0x48, 0xf7, 0xc0, 0xee, 0x5c, 0xaa, 0xf1, 0x59, 0xc4, 0x06, 0x29, 0xdc, 0xc9, 0xea, + 0x1a, 0x3d, 0xb8, 0x53, 0x24, 0x9d, 0x56, 0x61, 0x6b, 0x8a, 0xe7, 0x7a, 0x91, 0xc9, 0x91, 0xde, + 0x81, 0x1d, 0x35, 0x2b, 0xbd, 0xc1, 0xf4, 0xf2, 0xb4, 0xf4, 0x84, 0x18, 0x4d, 0x80, 0x8b, 0xa7, + 0x69, 0x19, 0x4a, 0x03, 0xbb, 0xfa, 0x81, 0xfa, 0xfb, 0xb8, 0x4a, 0x8c, 0x29, 0xdc, 0xed, 0x33, + 0x21, 0x97, 0x32, 0x84, 0x83, 0x3f, 0xce, 0x51, 0xc8, 0x64, 0xed, 0x91, 0x17, 0x63, 0x28, 0x35, + 0x8b, 0xbe, 0xd1, 0x07, 0x00, 0x91, 0xe7, 0xa3, 0x2b, 0xf9, 0x14, 0x33, 0xbf, 0xec, 0x25, 0x91, + 0x6f, 0x92, 0x00, 0xbd, 0x0f, 0xea, 0xe2, 0x0a, 0xf6, 0x1a, 0x95, 0x61, 0x76, 0x9c, 0x1b, 0x49, + 0xe0, 0x15, 0x7b, 0x8d, 0xc6, 0x19, 0xd4, 0x56, 0xc9, 0x44, 0xc4, 0x43, 0x81, 0xf4, 0x08, 0x76, + 0xf5, 0x27, 0xac, 0x4e, 0xd4, 0x3e, 0x9b, 0x97, 0xcd, 0xca, 0xc9, 0xc0, 0xb4, 0x0d, 0x95, 0x10, + 0xcf, 0xa4, 0xbb, 0x26, 0xe9, 0x66, 0x12, 0x3e, 0xcd, 0x64, 0x19, 0x47, 0x70, 0xfb, 0x05, 0x5e, + 0x10, 0x67, 0x4d, 0x3e, 0x84, 0x7d, 0xed, 0xe1, 0xdc, 0x07, 0x03, 0xd2, 0xd0, 0x57, 0xde, 0x0c, + 0x8d, 0x09, 0xd4, 0x9e, 0xc7, 0xe8, 0x49, 0x5c, 0x2b, 0x7d, 0xdf, 0x7c, 0x3e, 0x85, 0x72, 0x5a, + 0xaf, 0x84, 0x6c, 0x6a, 0x44, 0x63, 0x0d, 0x0e, 0xb5, 0x6f, 0xa3, 0x71, 0x11, 0xcf, 0x26, 0x89, + 0xd7, 0x24, 0xfc, 0x0c, 0x6a, 0xc7, 0x18, 0xe0, 0x35, 0x08, 0xed, 0x7f, 0x76, 0xa0, 0xaa, 0xf7, + 0xf7, 0x0a, 0xe3, 0x05, 0x1b, 0xe1, 0xc0, 0xa6, 0xbf, 0x11, 0x38, 0x78, 0x77, 0xb7, 0xb4, 0x5b, + 0x24, 0xa4, 0xc8, 0x6b, 0x8d, 0x47, 0x57, 0x40, 0xa6, 0x46, 0x31, 0x3a, 0x3f, 0xff, 0xfd, 0xef, + 0x1f, 0xa5, 0x8f, 0xe8, 0xc3, 0xe4, 0x9f, 0xf3, 0x4f, 0xe9, 0xcc, 0xbf, 0x88, 0x62, 0xfe, 0x03, + 0x8e, 0xa4, 0xb0, 0x0e, 0xdf, 0x58, 0x99, 0x33, 0xde, 0x12, 0xf8, 0x30, 0xbf, 0x72, 0xda, 0x2e, + 0x20, 0x29, 0xf0, 0x44, 0xe3, 0xd2, 0xf9, 0x19, 0xa6, 0xe2, 0xef, 0xd2, 0xb6, 0xe2, 0xcf, 0x0d, + 0x2a, 0x27, 0x22, 0xd3, 0x60, 0x1d, 0xbe, 0xa1, 0xbf, 0x12, 0xa8, 0xac, 0x38, 0x88, 0x16, 0xb5, + 0x5b, 0xec, 0xb2, 0x0d, 0x62, 0x2c, 0x25, 0xe6, 0x91, 0xb1, 0x69, 0x18, 0x4f, 0xf5, 0xd6, 0xe9, + 0xef, 0x04, 0x2a, 0x2b, 0x3e, 0x2b, 0x54, 0x53, 0xec, 0xc5, 0x0d, 0x6a, 0x8e, 0x94, 0x9a, 0x8f, + 0x1b, 0x57, 0x1c, 0xcd, 0x52, 0xd4, 0x5b, 0x02, 0x95, 0x15, 0x2f, 0x16, 0x8a, 0x2a, 0xf6, 0x6b, + 0xa3, 0x96, 0x41, 0xb3, 0xaf, 0x41, 0xf3, 0x24, 0xf9, 0x8e, 0xcc, 0x36, 0x75, 0x78, 0x45, 0x39, + 0xbd, 0x5f, 0x08, 0xdc, 0x1d, 0xf1, 0xd9, 0x3a, 0x71, 0xef, 0x76, 0x3f, 0x3d, 0x6b, 0x2f, 0x9e, + 0x26, 0x3c, 0xa7, 0xe4, 0xbb, 0x27, 0x1a, 0xe9, 0xf3, 0xc0, 0x0b, 0x7d, 0x93, 0xc7, 0xbe, 0xe5, + 0x63, 0xa8, 0x54, 0x58, 0x69, 0xca, 0x8b, 0x98, 0xc8, 0xfd, 0x98, 0xf8, 0x5c, 0x1f, 0xff, 0x23, + 0xe4, 0xaf, 0xd2, 0xbd, 0x17, 0x69, 0xf5, 0xf3, 0x80, 0xcf, 0xc7, 0xa6, 0x66, 0x30, 0x07, 0xf6, + 0xb0, 0xac, 0x5e, 0xf8, 0xe4, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x07, 0x4b, 0xa6, 0x8d, + 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go new file mode 100644 index 000000000..4f947feab --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go @@ -0,0 +1,596 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/longrunning/operations.proto + +/* +Package longrunning is a generated protocol buffer package. + +It is generated from these files: + google/longrunning/operations.proto + +It has these top-level messages: + Operation + GetOperationRequest + ListOperationsRequest + ListOperationsResponse + CancelOperationRequest + DeleteOperationRequest +*/ +package longrunning + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// This resource represents a long-running operation that is the result of a +// network API call. +type Operation struct { + // The server-assigned name, which is only unique within the same service that + // originally returns it. If you use the default HTTP mapping, the + // `name` should have the format of `operations/some/unique/name`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Service-specific metadata associated with the operation. It typically + // contains progress information and common metadata such as create time. + // Some services might not provide such metadata. Any method that returns a + // long-running operation should document the metadata type, if any. + Metadata *google_protobuf1.Any `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` + // If the value is `false`, it means the operation is still in progress. + // If true, the operation is completed, and either `error` or `response` is + // available. + Done bool `protobuf:"varint,3,opt,name=done" json:"done,omitempty"` + // The operation result, which can be either an `error` or a valid `response`. + // If `done` == `false`, neither `error` nor `response` is set. + // If `done` == `true`, exactly one of `error` or `response` is set. + // + // Types that are valid to be assigned to Result: + // *Operation_Error + // *Operation_Response + Result isOperation_Result `protobuf_oneof:"result"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isOperation_Result interface { + isOperation_Result() +} + +type Operation_Error struct { + Error *google_rpc.Status `protobuf:"bytes,4,opt,name=error,oneof"` +} +type Operation_Response struct { + Response *google_protobuf1.Any `protobuf:"bytes,5,opt,name=response,oneof"` +} + +func (*Operation_Error) isOperation_Result() {} +func (*Operation_Response) isOperation_Result() {} + +func (m *Operation) GetResult() isOperation_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *Operation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Operation) GetMetadata() *google_protobuf1.Any { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Operation) GetDone() bool { + if m != nil { + return m.Done + } + return false +} + +func (m *Operation) GetError() *google_rpc.Status { + if x, ok := m.GetResult().(*Operation_Error); ok { + return x.Error + } + return nil +} + +func (m *Operation) GetResponse() *google_protobuf1.Any { + if x, ok := m.GetResult().(*Operation_Response); ok { + return x.Response + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Operation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Operation_OneofMarshaler, _Operation_OneofUnmarshaler, _Operation_OneofSizer, []interface{}{ + (*Operation_Error)(nil), + (*Operation_Response)(nil), + } +} + +func _Operation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Operation) + // result + switch x := m.Result.(type) { + case *Operation_Error: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Error); err != nil { + return err + } + case *Operation_Response: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Operation.Result has unexpected type %T", x) + } + return nil +} + +func _Operation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Operation) + switch tag { + case 4: // result.error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_rpc.Status) + err := b.DecodeMessage(msg) + m.Result = &Operation_Error{msg} + return true, err + case 5: // result.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Any) + err := b.DecodeMessage(msg) + m.Result = &Operation_Response{msg} + return true, err + default: + return false, nil + } +} + +func _Operation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Operation) + // result + switch x := m.Result.(type) { + case *Operation_Error: + s := proto.Size(x.Error) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Operation_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +type GetOperationRequest struct { + // The name of the operation resource. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} } +func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) } +func (*GetOperationRequest) ProtoMessage() {} +func (*GetOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +type ListOperationsRequest struct { + // The name of the operation collection. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The standard list filter. + Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListOperationsRequest) Reset() { *m = ListOperationsRequest{} } +func (m *ListOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListOperationsRequest) ProtoMessage() {} +func (*ListOperationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListOperationsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListOperationsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListOperationsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListOperationsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +type ListOperationsResponse struct { + // A list of operations that matches the specified filter in the request. + Operations []*Operation `protobuf:"bytes,1,rep,name=operations" json:"operations,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListOperationsResponse) Reset() { *m = ListOperationsResponse{} } +func (m *ListOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListOperationsResponse) ProtoMessage() {} +func (*ListOperationsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListOperationsResponse) GetOperations() []*Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListOperationsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +type CancelOperationRequest struct { + // The name of the operation resource to be cancelled. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} } +func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) } +func (*CancelOperationRequest) ProtoMessage() {} +func (*CancelOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CancelOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +type DeleteOperationRequest struct { + // The name of the operation resource to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteOperationRequest) Reset() { *m = DeleteOperationRequest{} } +func (m *DeleteOperationRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteOperationRequest) ProtoMessage() {} +func (*DeleteOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DeleteOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*Operation)(nil), "google.longrunning.Operation") + proto.RegisterType((*GetOperationRequest)(nil), "google.longrunning.GetOperationRequest") + proto.RegisterType((*ListOperationsRequest)(nil), "google.longrunning.ListOperationsRequest") + proto.RegisterType((*ListOperationsResponse)(nil), "google.longrunning.ListOperationsResponse") + proto.RegisterType((*CancelOperationRequest)(nil), "google.longrunning.CancelOperationRequest") + proto.RegisterType((*DeleteOperationRequest)(nil), "google.longrunning.DeleteOperationRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Operations service + +type OperationsClient interface { + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns `UNIMPLEMENTED`. + // + // NOTE: the `name` binding below allows API services to override the binding + // to use different resource name schemes, such as `users/*/operations`. + ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result at intervals as recommended by the API + // service. + GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type operationsClient struct { + cc *grpc.ClientConn +} + +func NewOperationsClient(cc *grpc.ClientConn) OperationsClient { + return &operationsClient{cc} +} + +func (c *operationsClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { + out := new(ListOperationsResponse) + err := grpc.Invoke(ctx, "/google.longrunning.Operations/ListOperations", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := grpc.Invoke(ctx, "/google.longrunning.Operations/GetOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Operations service + +type OperationsServer interface { + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns `UNIMPLEMENTED`. + // + // NOTE: the `name` binding below allows API services to override the binding + // to use different resource name schemes, such as `users/*/operations`. + ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result at intervals as recommended by the API + // service. + GetOperation(context.Context, *GetOperationRequest) (*Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + DeleteOperation(context.Context, *DeleteOperationRequest) (*google_protobuf2.Empty, error) + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + CancelOperation(context.Context, *CancelOperationRequest) (*google_protobuf2.Empty, error) +} + +func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) { + s.RegisterService(&_Operations_serviceDesc, srv) +} + +func _Operations_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).ListOperations(ctx, req.(*ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).GetOperation(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).DeleteOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/DeleteOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).DeleteOperation(ctx, req.(*DeleteOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).CancelOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/CancelOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).CancelOperation(ctx, req.(*CancelOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Operations_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.longrunning.Operations", + HandlerType: (*OperationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListOperations", + Handler: _Operations_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _Operations_GetOperation_Handler, + }, + { + MethodName: "DeleteOperation", + Handler: _Operations_DeleteOperation_Handler, + }, + { + MethodName: "CancelOperation", + Handler: _Operations_CancelOperation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/longrunning/operations.proto", +} + +func init() { proto.RegisterFile("google/longrunning/operations.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 586 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0xd3, 0x24, 0x4a, 0xa6, 0x40, 0xa4, 0x85, 0xba, 0xc6, 0x25, 0x22, 0x32, 0x08, 0x52, + 0xab, 0xb2, 0x21, 0xdc, 0x8a, 0x72, 0x20, 0x80, 0xda, 0x43, 0x25, 0x22, 0x97, 0x13, 0x97, 0x6a, + 0x9b, 0x4e, 0x2d, 0x0b, 0x67, 0xd7, 0xac, 0x37, 0xd0, 0x16, 0x55, 0x11, 0x1c, 0x38, 0x71, 0xe3, + 0x2f, 0xf8, 0x19, 0x0e, 0xfc, 0x02, 0x1f, 0x82, 0xbc, 0x76, 0x62, 0x93, 0x3a, 0x28, 0xb7, 0xf5, + 0xcc, 0x9b, 0x79, 0xf3, 0xde, 0xce, 0x1a, 0x1e, 0xf8, 0x9c, 0xfb, 0x21, 0xba, 0x21, 0x67, 0xbe, + 0x98, 0x30, 0x16, 0x30, 0xdf, 0xe5, 0x11, 0x0a, 0x2a, 0x03, 0xce, 0x62, 0x27, 0x12, 0x5c, 0x72, + 0x42, 0x52, 0x90, 0x53, 0x00, 0x99, 0xf7, 0xb2, 0x42, 0x1a, 0x05, 0x2e, 0x65, 0x8c, 0xcb, 0x62, + 0x85, 0x79, 0x37, 0xcb, 0xaa, 0xaf, 0x93, 0xc9, 0x99, 0x4b, 0xd9, 0x45, 0x96, 0xda, 0x5e, 0x4c, + 0xe1, 0x38, 0x92, 0xb3, 0xe4, 0x56, 0x96, 0x14, 0xd1, 0xc8, 0x8d, 0x25, 0x95, 0x93, 0xac, 0xa1, + 0xf5, 0x4b, 0x83, 0xe6, 0x9b, 0xd9, 0x5c, 0x84, 0x40, 0x95, 0xd1, 0x31, 0x1a, 0x5a, 0x47, 0xeb, + 0x36, 0x3d, 0x75, 0x26, 0x4f, 0xa0, 0x31, 0x46, 0x49, 0x4f, 0xa9, 0xa4, 0x46, 0xa5, 0xa3, 0x75, + 0x37, 0x7a, 0x77, 0x9c, 0x6c, 0xee, 0x19, 0x95, 0xf3, 0x82, 0x5d, 0x78, 0x73, 0x54, 0xd2, 0xe5, + 0x94, 0x33, 0x34, 0xd6, 0x3b, 0x5a, 0xb7, 0xe1, 0xa9, 0x33, 0xb1, 0xa1, 0x86, 0x42, 0x70, 0x61, + 0x54, 0x55, 0x0b, 0x32, 0x6b, 0x21, 0xa2, 0x91, 0x73, 0xa4, 0x06, 0x3a, 0x58, 0xf3, 0x52, 0x08, + 0xe9, 0x41, 0x43, 0x60, 0x1c, 0x71, 0x16, 0xa3, 0x51, 0x5b, 0xce, 0x78, 0xb0, 0xe6, 0xcd, 0x71, + 0x83, 0x06, 0xd4, 0x05, 0xc6, 0x93, 0x50, 0x5a, 0x3b, 0x70, 0x7b, 0x1f, 0xe5, 0x5c, 0x93, 0x87, + 0x1f, 0x26, 0x18, 0xcb, 0x32, 0x69, 0xd6, 0x14, 0x36, 0x0f, 0x83, 0x38, 0xc7, 0xc6, 0x8b, 0xe0, + 0x6a, 0xc1, 0x07, 0x1d, 0xea, 0x67, 0x41, 0x28, 0x51, 0x64, 0x2d, 0xb2, 0x2f, 0xb2, 0x0d, 0xcd, + 0x88, 0xfa, 0x78, 0x1c, 0x07, 0x97, 0xa8, 0x0c, 0xaa, 0x79, 0x8d, 0x24, 0x70, 0x14, 0x5c, 0x22, + 0x69, 0x03, 0xa8, 0xa4, 0xe4, 0xef, 0x91, 0x29, 0x43, 0x9a, 0x9e, 0x82, 0xbf, 0x4d, 0x02, 0xd6, + 0x14, 0xf4, 0xc5, 0x01, 0x52, 0x3d, 0xa4, 0x0f, 0x90, 0xaf, 0x8b, 0xa1, 0x75, 0xd6, 0xbb, 0x1b, + 0xbd, 0xb6, 0x73, 0x7d, 0x5f, 0x9c, 0x5c, 0x68, 0xa1, 0x80, 0x3c, 0x82, 0x16, 0xc3, 0x73, 0x79, + 0x5c, 0x20, 0xaf, 0x28, 0xf2, 0x9b, 0x49, 0x78, 0x38, 0x1f, 0x60, 0x17, 0xf4, 0x97, 0x94, 0x8d, + 0x30, 0x5c, 0xc9, 0xaf, 0x5d, 0xd0, 0x5f, 0x61, 0x88, 0x12, 0x57, 0x41, 0xf7, 0xbe, 0x57, 0x01, + 0x72, 0x65, 0xe4, 0x9b, 0x06, 0xb7, 0xfe, 0x15, 0x4b, 0x76, 0xca, 0x04, 0x95, 0xde, 0x88, 0x69, + 0xaf, 0x02, 0x4d, 0xbd, 0xb3, 0xda, 0x5f, 0x7f, 0xff, 0xf9, 0x51, 0xd9, 0x22, 0x9b, 0xee, 0xc7, + 0xa7, 0xee, 0xe7, 0x64, 0x96, 0x7e, 0x6e, 0xcd, 0x15, 0x39, 0x87, 0x1b, 0xc5, 0x05, 0x21, 0x8f, + 0xcb, 0x5a, 0x97, 0xac, 0x90, 0xf9, 0x7f, 0xff, 0xad, 0x8e, 0xa2, 0x35, 0x89, 0x51, 0x46, 0xeb, + 0xda, 0xf6, 0x15, 0xf9, 0x04, 0xad, 0x05, 0xff, 0x48, 0xa9, 0xae, 0x72, 0x93, 0x4d, 0xfd, 0xda, + 0x2b, 0x78, 0x9d, 0x3c, 0xf1, 0x19, 0xb1, 0xbd, 0x9c, 0xf8, 0x8b, 0x06, 0xad, 0x85, 0x7b, 0x2e, + 0x67, 0x2e, 0x5f, 0x86, 0xa5, 0xcc, 0xb6, 0x62, 0x7e, 0x68, 0xdd, 0x5f, 0xc6, 0xbc, 0x37, 0x52, + 0x0d, 0xf7, 0x34, 0x7b, 0x30, 0x05, 0x7d, 0xc4, 0xc7, 0x25, 0xa4, 0x83, 0x56, 0x7e, 0x87, 0xc3, + 0xa4, 0xff, 0x50, 0x7b, 0xd7, 0xcf, 0x60, 0x3e, 0x0f, 0x29, 0xf3, 0x1d, 0x2e, 0x7c, 0xd7, 0x47, + 0xa6, 0xd8, 0xdd, 0x34, 0x45, 0xa3, 0x20, 0x2e, 0xfe, 0x5d, 0x9f, 0x17, 0xce, 0x3f, 0x2b, 0x64, + 0x3f, 0xad, 0x3f, 0xe4, 0xcc, 0xf7, 0xd2, 0xe0, 0x49, 0x5d, 0x95, 0x3f, 0xfb, 0x1b, 0x00, 0x00, + 0xff, 0xff, 0x69, 0x4c, 0xa6, 0x3e, 0x9b, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go new file mode 100644 index 000000000..5a4310750 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -0,0 +1,701 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/common.proto + +/* +Package monitoring is a generated protocol buffer package. + +It is generated from these files: + google/monitoring/v3/common.proto + google/monitoring/v3/group.proto + google/monitoring/v3/group_service.proto + google/monitoring/v3/metric.proto + google/monitoring/v3/metric_service.proto + +It has these top-level messages: + TypedValue + TimeInterval + Aggregation + Group + ListGroupsRequest + ListGroupsResponse + GetGroupRequest + CreateGroupRequest + UpdateGroupRequest + DeleteGroupRequest + ListGroupMembersRequest + ListGroupMembersResponse + Point + TimeSeries + ListMonitoredResourceDescriptorsRequest + ListMonitoredResourceDescriptorsResponse + GetMonitoredResourceDescriptorRequest + ListMetricDescriptorsRequest + ListMetricDescriptorsResponse + GetMetricDescriptorRequest + CreateMetricDescriptorRequest + DeleteMetricDescriptorRequest + ListTimeSeriesRequest + ListTimeSeriesResponse + CreateTimeSeriesRequest + CreateTimeSeriesError +*/ +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api2 "google.golang.org/genproto/googleapis/api/distribution" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The Aligner describes how to bring the data points in a single +// time series into temporal alignment. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-time + // series reduction is requested. The value type of the result is + // the same as the value type of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to delta metric type. This alignment is valid + // for cumulative metrics and delta metrics. Aligning an existing + // delta metric to a delta metric requires that the alignment + // period be increased. The value type of the result is the same + // as the value type of the input. + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. This alignment is valid for + // cumulative metrics and delta metrics with numeric values. The output is a + // gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the + // period boundary. This alignment is valid for gauge + // metrics with numeric values. The value type of the result is the same + // as the value type of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by shifting the oldest data point before the period + // boundary to the boundary. This alignment is valid for gauge + // metrics. The value type of the result is the same as the + // value type of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align time series via aggregation. The resulting data point in + // the alignment period is the minimum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align time series via aggregation. The resulting data point in + // the alignment period is the maximum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align time series via aggregation. The resulting data point in + // the alignment period is the average or arithmetic mean of all + // data points in the period. This alignment is valid for gauge and delta + // metrics with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // or Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align time series via aggregation. The resulting data point in + // the alignment period is the sum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // and distribution values. The value type of the output is the + // same as the value type of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align time series via aggregation. The resulting data point in + // the alignment period is the standard deviation of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of True-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align time series via aggregation. The resulting data point in + // the alignment period is the fraction of True-valued data points in the + // period. This alignment is valid for gauge metrics with Boolean values. + // The output value is in the range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 99th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 95th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 50th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 5th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 +) + +var Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", +} +var Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, +} + +func (x Aggregation_Aligner) String() string { + return proto.EnumName(Aggregation_Aligner_name, int32(x)) +} +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// A Reducer describes how to aggregate data points from multiple +// time series into a single time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the aligner is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric or distribution values. The value type of the + // output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric and distribution values. The value type of + // the output is the same as the value type of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics with numeric or distribution values. The value type of + // the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the count of data points across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics of numeric, Boolean, distribution, and string value + // type. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the count of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the fraction of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The output value is in the + // range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing 99th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing 95th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing 50th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing 5th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +var Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", +} +var Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, +} + +func (x Aggregation_Reducer) String() string { + return proto.EnumName(Aggregation_Reducer_name, int32(x)) +} +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +// A single strongly-typed value. +type TypedValue struct { + // The typed value field. + // + // Types that are valid to be assigned to Value: + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` +} + +func (m *TypedValue) Reset() { *m = TypedValue{} } +func (m *TypedValue) String() string { return proto.CompactTextString(m) } +func (*TypedValue) ProtoMessage() {} +func (*TypedValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,oneof"` +} +type TypedValue_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,oneof"` +} +type TypedValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type TypedValue_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,oneof"` +} +type TypedValue_DistributionValue struct { + DistributionValue *google_api2.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} +func (*TypedValue_Int64Value) isTypedValue_Value() {} +func (*TypedValue_DoubleValue) isTypedValue_Value() {} +func (*TypedValue_StringValue) isTypedValue_Value() {} +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *TypedValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *TypedValue) GetInt64Value() int64 { + if x, ok := m.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *TypedValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *TypedValue) GetStringValue() string { + if x, ok := m.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *TypedValue) GetDistributionValue() *google_api2.Distribution { + if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TypedValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TypedValue_OneofMarshaler, _TypedValue_OneofUnmarshaler, _TypedValue_OneofSizer, []interface{}{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } +} + +func _TypedValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TypedValue_Int64Value: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *TypedValue_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *TypedValue_DistributionValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DistributionValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TypedValue.Value has unexpected type %T", x) + } + return nil +} + +func _TypedValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TypedValue) + switch tag { + case 1: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_BoolValue{x != 0} + return true, err + case 2: // value.int64_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_Int64Value{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &TypedValue_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &TypedValue_StringValue{x} + return true, err + case 5: // value.distribution_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_api2.Distribution) + err := b.DecodeMessage(msg) + m.Value = &TypedValue_DistributionValue{msg} + return true, err + default: + return false, nil + } +} + +func _TypedValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *TypedValue_Int64Value: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *TypedValue_StringValue: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *TypedValue_DistributionValue: + s := proto.Size(x.DistributionValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A time interval extending just after a start time through an end time. +// If the start time is the same as the end time, then the interval +// represents a single point in time. +type TimeInterval struct { + // Required. The end of the time interval. + EndTime *google_protobuf2.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *google_protobuf2.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *TimeInterval) GetEndTime() *google_protobuf2.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TimeInterval) GetStartTime() *google_protobuf2.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide different views of +// the data. Aggregation consists of an alignment step on individual time +// series (`per_series_aligner`) followed by an optional reduction of the data +// across different time series (`cross_series_reducer`). For more details, see +// [Aggregation](/monitoring/api/learn_more#aggregation). +type Aggregation struct { + // The alignment period for per-[time series][google.monitoring.v3.TimeSeries] + // alignment. If present, `alignmentPeriod` must be at least 60 + // seconds. After per-time series alignment, each time series will + // contain data points only on the period boundaries. If + // `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then + // this field is ignored. If `perSeriesAligner` is specified and + // does not equal `ALIGN_NONE`, then this field must be defined; + // otherwise an error is returned. + AlignmentPeriod *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod" json:"alignment_period,omitempty"` + // The approach to be used to align individual time series. Not all + // alignment functions may be applied to all time series, depending + // on the metric type and value type of the original time + // series. Alignment may change the metric type or the value type of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The approach to be used to combine time series. Not all reducer + // functions may be applied to all time series, depending on the + // metric type and the value type of the original time + // series. Reduction may change the metric type of value type of the + // time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `crossSeriesReducer` is + // specified. The `groupByFields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // function. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `crossSeriesReducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `groupByFields` are aggregated away. If + // `groupByFields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `crossSeriesReducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields" json:"group_by_fields,omitempty"` +} + +func (m *Aggregation) Reset() { *m = Aggregation{} } +func (m *Aggregation) String() string { return proto.CompactTextString(m) } +func (*Aggregation) ProtoMessage() {} +func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Aggregation) GetAlignmentPeriod() *google_protobuf3.Duration { + if m != nil { + return m.AlignmentPeriod + } + return nil +} + +func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if m != nil { + return m.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if m != nil { + return m.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (m *Aggregation) GetGroupByFields() []string { + if m != nil { + return m.GroupByFields + } + return nil +} + +func init() { + proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue") + proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval") + proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation") + proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value) +} + +func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 780 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x6a, 0xe3, 0x46, + 0x14, 0xb6, 0xec, 0x64, 0x1d, 0x1f, 0x39, 0xf1, 0x64, 0xd6, 0x4b, 0xdd, 0x40, 0xbb, 0x5e, 0x17, + 0x8a, 0x7b, 0x23, 0x87, 0xa4, 0x2e, 0x84, 0x42, 0x41, 0xb1, 0xb5, 0x1b, 0x83, 0x23, 0x9b, 0x59, + 0x25, 0x0d, 0xed, 0x85, 0x90, 0xa3, 0x59, 0x21, 0x90, 0x35, 0x62, 0x24, 0x19, 0x72, 0xd7, 0xab, + 0xbe, 0x43, 0x5f, 0x61, 0x1f, 0xa9, 0x0f, 0xd1, 0x67, 0x28, 0x9a, 0x19, 0xad, 0x94, 0xd6, 0xa5, + 0x7b, 0xf9, 0xfd, 0x9c, 0x6f, 0x74, 0xbe, 0x91, 0x6c, 0x78, 0x13, 0x30, 0x16, 0x44, 0x74, 0xb2, + 0x65, 0x71, 0x98, 0x31, 0x1e, 0xc6, 0xc1, 0x64, 0x77, 0x39, 0x79, 0x64, 0xdb, 0x2d, 0x8b, 0x8d, + 0x84, 0xb3, 0x8c, 0xe1, 0xbe, 0xb4, 0x18, 0x95, 0xc5, 0xd8, 0x5d, 0x9e, 0x7d, 0xa5, 0x06, 0xbd, + 0x24, 0x9c, 0xf8, 0x61, 0x9a, 0xf1, 0x70, 0x93, 0x67, 0x61, 0x39, 0x74, 0xf6, 0xb5, 0x92, 0x05, + 0xda, 0xe4, 0x1f, 0x26, 0x7e, 0xce, 0xbd, 0x9a, 0xfe, 0xfa, 0x9f, 0x7a, 0x16, 0x6e, 0x69, 0x9a, + 0x79, 0xdb, 0x44, 0x1a, 0x46, 0x7f, 0x69, 0x00, 0xce, 0x53, 0x42, 0xfd, 0x7b, 0x2f, 0xca, 0x29, + 0x7e, 0x0d, 0xb0, 0x61, 0x2c, 0x72, 0x77, 0x05, 0x1a, 0x68, 0x43, 0x6d, 0x7c, 0x74, 0xd3, 0x20, + 0x9d, 0x82, 0x93, 0x86, 0x37, 0xa0, 0x87, 0x71, 0xf6, 0xc3, 0xf7, 0xca, 0xd1, 0x1c, 0x6a, 0xe3, + 0xd6, 0x4d, 0x83, 0x80, 0x20, 0xa5, 0xe5, 0x1b, 0xe8, 0xfa, 0x2c, 0xdf, 0x44, 0x54, 0x79, 0x5a, + 0x43, 0x6d, 0xac, 0xdd, 0x34, 0x88, 0x2e, 0xd9, 0x4f, 0xa6, 0x62, 0x99, 0x38, 0x50, 0xa6, 0x83, + 0xa1, 0x36, 0xee, 0x14, 0x26, 0xc9, 0x4a, 0xd3, 0x02, 0x70, 0x7d, 0x67, 0x65, 0x3d, 0x1c, 0x6a, + 0x63, 0xfd, 0x62, 0x60, 0xa8, 0xbe, 0xbc, 0x24, 0x34, 0xe6, 0x35, 0xd7, 0x4d, 0x83, 0x9c, 0xd6, + 0xa7, 0x44, 0xd4, 0x75, 0x1b, 0x0e, 0xc5, 0xf4, 0xe8, 0x37, 0x0d, 0xba, 0x4e, 0xb8, 0xa5, 0x8b, + 0x38, 0xa3, 0x7c, 0xe7, 0x45, 0x78, 0x0a, 0x47, 0x34, 0xf6, 0xdd, 0xa2, 0x18, 0xb1, 0x8e, 0x7e, + 0x71, 0x56, 0x46, 0x97, 0xad, 0x19, 0x4e, 0xd9, 0x1a, 0x69, 0xd3, 0xd8, 0x2f, 0x10, 0xbe, 0x02, + 0x48, 0x33, 0x8f, 0x67, 0x72, 0x50, 0xfb, 0xdf, 0xc1, 0x8e, 0x70, 0x17, 0x78, 0xf4, 0xb1, 0x0d, + 0xba, 0x19, 0x04, 0x9c, 0x06, 0xe2, 0xaa, 0xf0, 0x1c, 0x90, 0x17, 0x85, 0x41, 0xbc, 0xa5, 0x71, + 0xe6, 0x26, 0x94, 0x87, 0xcc, 0x57, 0x81, 0x5f, 0xfe, 0x2b, 0x70, 0xae, 0xee, 0x97, 0xf4, 0x3e, + 0x8d, 0xac, 0xc5, 0x04, 0xfe, 0x19, 0x70, 0x42, 0xb9, 0x9b, 0x52, 0x1e, 0xd2, 0xd4, 0x15, 0x2a, + 0xe5, 0x62, 0xa3, 0x93, 0x8b, 0xef, 0x8c, 0x7d, 0x2f, 0x97, 0x51, 0x7b, 0x08, 0xc3, 0x94, 0x03, + 0x04, 0x25, 0x94, 0xbf, 0x17, 0x19, 0x8a, 0xc1, 0xbf, 0x42, 0xff, 0x91, 0xb3, 0x34, 0x2d, 0xa3, + 0x39, 0xf5, 0xf3, 0x47, 0xca, 0xc5, 0x95, 0x7d, 0x56, 0x34, 0x91, 0x03, 0x04, 0x8b, 0x18, 0x19, + 0xae, 0x38, 0xfc, 0x2d, 0xf4, 0x02, 0xce, 0xf2, 0xc4, 0xdd, 0x3c, 0xb9, 0x1f, 0x42, 0x1a, 0xf9, + 0xe9, 0xe0, 0x70, 0xd8, 0x1a, 0x77, 0xc8, 0xb1, 0xa0, 0xaf, 0x9f, 0xde, 0x0a, 0x72, 0xf4, 0x67, + 0x13, 0xda, 0xe5, 0x03, 0x9d, 0x00, 0x98, 0xcb, 0xc5, 0x3b, 0xdb, 0xb5, 0x57, 0xb6, 0x85, 0x1a, + 0xb8, 0x07, 0xba, 0xc4, 0x73, 0x6b, 0xe9, 0x98, 0x48, 0xab, 0x0c, 0xc4, 0x74, 0x2c, 0xd4, 0xc4, + 0xaf, 0xe0, 0x54, 0xe2, 0x85, 0xed, 0x58, 0x64, 0xbd, 0x5a, 0x16, 0x74, 0x0b, 0xf7, 0x01, 0xa9, + 0x1c, 0xeb, 0xc1, 0x71, 0x57, 0xcb, 0xb9, 0x45, 0xd0, 0x01, 0x3e, 0x86, 0x8e, 0x64, 0x6f, 0x17, + 0x36, 0x82, 0x1a, 0x34, 0x1f, 0x90, 0x5e, 0x45, 0xdf, 0x5a, 0xa6, 0x8d, 0xba, 0xd5, 0xd9, 0xb3, + 0xd5, 0x9d, 0xed, 0xa0, 0xe3, 0xca, 0xff, 0xfe, 0xee, 0x16, 0x9d, 0x60, 0x04, 0x5d, 0x05, 0x9d, + 0xf9, 0xdc, 0xba, 0x47, 0xbd, 0xea, 0x54, 0x31, 0xe1, 0x3a, 0xe4, 0xce, 0x42, 0x08, 0x7f, 0x01, + 0x2f, 0x25, 0xfb, 0x96, 0x98, 0x33, 0x67, 0xb1, 0xb2, 0xa5, 0x70, 0x5a, 0x09, 0x6b, 0x8b, 0xcc, + 0x2c, 0xdb, 0x59, 0x2c, 0x2d, 0xf7, 0xea, 0x0a, 0xe1, 0xfd, 0xc2, 0x14, 0xbd, 0xdc, 0x2b, 0x4c, + 0xcf, 0x51, 0x7f, 0xaf, 0x70, 0x3e, 0x45, 0xaf, 0x46, 0x7f, 0x34, 0xa1, 0x5d, 0x5e, 0x48, 0x0f, + 0x74, 0x62, 0xcd, 0xef, 0x66, 0x56, 0xad, 0x5d, 0x45, 0x88, 0x95, 0x45, 0xbb, 0x25, 0xb1, 0xb0, + 0x51, 0xb3, 0x8e, 0xcd, 0x07, 0xd4, 0xaa, 0xe1, 0xa2, 0x82, 0x03, 0x7c, 0x0a, 0xc7, 0x25, 0x96, + 0x1d, 0x1c, 0x16, 0xad, 0x28, 0x4a, 0xd6, 0xf6, 0xa2, 0xb8, 0xa2, 0x3a, 0x23, 0xb7, 0x6f, 0xe3, + 0x01, 0xf4, 0x15, 0xfd, 0xbc, 0x97, 0xa3, 0x9a, 0xf2, 0xbc, 0x98, 0xce, 0x7f, 0x28, 0x53, 0x04, + 0xfb, 0x95, 0xe9, 0x39, 0xd2, 0xf7, 0x2b, 0xe7, 0x53, 0xd4, 0xbd, 0xfe, 0x5d, 0x83, 0xc1, 0x23, + 0xdb, 0xee, 0x7d, 0xcb, 0xaf, 0xf5, 0x99, 0xf8, 0x05, 0x5f, 0x17, 0x5f, 0xe7, 0x5a, 0xfb, 0xe5, + 0x27, 0x65, 0x0a, 0x58, 0xe4, 0xc5, 0x81, 0xc1, 0x78, 0x30, 0x09, 0x68, 0x2c, 0xbe, 0xdd, 0x89, + 0x94, 0xbc, 0x24, 0x4c, 0x9f, 0xff, 0x09, 0xfc, 0x58, 0xa1, 0x8f, 0xcd, 0xb3, 0x77, 0x32, 0x60, + 0x16, 0xb1, 0xdc, 0x37, 0x6e, 0xab, 0xb3, 0xee, 0x2f, 0x37, 0x2f, 0x44, 0xce, 0xe5, 0xdf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x53, 0x47, 0x59, 0x88, 0x4b, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go new file mode 100644 index 000000000..6a34e344a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + // Output only. The name of this group. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `{group_id}` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. + // The format is `"projects/{project_id_or_number}/groups/{group_id}"`. + // For groups with no parent, `parentName` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this group. + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster" json:"is_cluster,omitempty"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Group) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Group) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Group) GetParentName() string { + if m != nil { + return m.ParentName + } + return "" +} + +func (m *Group) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *Group) GetIsCluster() bool { + if m != nil { + return m.IsCluster + } + return false +} + +func init() { + proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group") +} + +func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0x03, 0x31, + 0x10, 0x86, 0x49, 0xb5, 0x8b, 0x9d, 0x7a, 0x0a, 0x22, 0x8b, 0x20, 0xae, 0x9e, 0x7a, 0x4a, 0x0e, + 0x7b, 0x14, 0x3c, 0xb4, 0x87, 0x9e, 0x94, 0xd2, 0x83, 0x07, 0x2f, 0x25, 0xb6, 0x31, 0x04, 0xb2, + 0x99, 0x90, 0xec, 0x2e, 0xf8, 0x00, 0xbe, 0x82, 0x0f, 0xe1, 0x53, 0xca, 0x4e, 0x16, 0x16, 0xc1, + 0x5b, 0xe6, 0xff, 0x3e, 0x26, 0x33, 0x03, 0x95, 0x41, 0x34, 0x4e, 0xcb, 0x06, 0xbd, 0x6d, 0x31, + 0x5a, 0x6f, 0x64, 0x5f, 0x4b, 0x13, 0xb1, 0x0b, 0x22, 0x44, 0x6c, 0x91, 0x5f, 0x65, 0x43, 0x4c, + 0x86, 0xe8, 0xeb, 0x87, 0x6f, 0x06, 0xf3, 0xed, 0x60, 0x71, 0x0e, 0xe7, 0x5e, 0x35, 0xba, 0x64, + 0x15, 0x5b, 0x2d, 0xf6, 0xf4, 0xe6, 0xf7, 0x70, 0x79, 0xb2, 0x29, 0x38, 0xf5, 0x79, 0x20, 0x36, + 0x23, 0xb6, 0x1c, 0xb3, 0x97, 0x41, 0xb9, 0x83, 0x65, 0x50, 0x51, 0xfb, 0x36, 0x1b, 0x67, 0x64, + 0x40, 0x8e, 0x48, 0xb8, 0x86, 0xe2, 0xc3, 0xba, 0x56, 0xc7, 0x72, 0x4e, 0x6c, 0xac, 0xf8, 0x2d, + 0x80, 0x4d, 0x87, 0xa3, 0xeb, 0xd2, 0xc0, 0x8a, 0x8a, 0xad, 0x2e, 0xf6, 0x0b, 0x9b, 0x36, 0x39, + 0x58, 0x7f, 0x31, 0x28, 0x8f, 0xd8, 0x88, 0xff, 0xa6, 0x5e, 0x03, 0x8d, 0xbc, 0x1b, 0xf6, 0xda, + 0xb1, 0xb7, 0xa7, 0xd1, 0x31, 0xe8, 0x94, 0x37, 0x02, 0xa3, 0x91, 0x46, 0x7b, 0xda, 0x5a, 0x66, + 0xa4, 0x82, 0x4d, 0x7f, 0x4f, 0xf3, 0x38, 0x55, 0x3f, 0xb3, 0x9b, 0x6d, 0x6e, 0xb0, 0x71, 0xd8, + 0x9d, 0xc4, 0xf3, 0xf4, 0xd5, 0x6b, 0xfd, 0x5e, 0x50, 0x9f, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, + 0x0c, 0x22, 0x2a, 0x57, 0x61, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go new file mode 100644 index 000000000..f5f847824 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -0,0 +1,753 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group_service.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The `ListGroup` request. +type ListGroupsRequest struct { + // The project whose groups are to be listed. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,7,opt,name=name" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit the + // groups returned based on their parent-child relationship with the specified + // group. If no filter is specified, all groups are returned. + // + // Types that are valid to be assigned to Filter: + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} } +func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupsRequest) ProtoMessage() {} +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,oneof"` +} +type ListGroupsRequest_AncestorsOfGroup struct { + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,oneof"` +} +type ListGroupsRequest_DescendantsOfGroup struct { + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ListGroupsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListGroupsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListGroupsRequest_OneofMarshaler, _ListGroupsRequest_OneofUnmarshaler, _ListGroupsRequest_OneofSizer, []interface{}{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } +} + +func _ListGroupsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DescendantsOfGroup) + case nil: + default: + return fmt.Errorf("ListGroupsRequest.Filter has unexpected type %T", x) + } + return nil +} + +func _ListGroupsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListGroupsRequest) + switch tag { + case 2: // filter.children_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_ChildrenOfGroup{x} + return true, err + case 3: // filter.ancestors_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_AncestorsOfGroup{x} + return true, err + case 4: // filter.descendants_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_DescendantsOfGroup{x} + return true, err + default: + return false, nil + } +} + +func _ListGroupsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ChildrenOfGroup))) + n += len(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AncestorsOfGroup))) + n += len(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.DescendantsOfGroup))) + n += len(x.DescendantsOfGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The `ListGroups` response. +type ListGroupsResponse struct { + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} } +func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupsResponse) ProtoMessage() {} +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ListGroupsResponse) GetGroup() []*Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *ListGroupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + // The group to retrieve. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } +func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } +func (*GetGroupRequest) ProtoMessage() {} +func (*GetGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *GetGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + // The project in which to create the group. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // A group definition. It is an error to define the `name` field because + // the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly" json:"validate_only,omitempty"` +} + +func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} } +func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*CreateGroupRequest) ProtoMessage() {} +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *CreateGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *CreateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + // The new definition of the group. All fields of the existing group, + // excepting `name`, are replaced with the corresponding fields of this group. + Group *Group `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly" json:"validate_only,omitempty"` +} + +func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } +func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGroupRequest) ProtoMessage() {} +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *UpdateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *UpdateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. You can only delete a group if it has no children. +type DeleteGroupRequest struct { + // The group to delete. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} } +func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteGroupRequest) ProtoMessage() {} +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *DeleteGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + // The group whose members are listed. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,7,opt,name=name" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // An optional [list filter](/monitoring/api/learn_more#filtering) describing + // the members to be returned. The filter may reference the type, labels, and + // metadata of monitored resources that comprise the group. + // For example, to return only resources representing Compute Engine VM + // instances, use this filter: + // + // resource.type = "gce_instance" + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval" json:"interval,omitempty"` +} + +func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} } +func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersRequest) ProtoMessage() {} +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *ListGroupMembersRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListGroupMembersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupMembersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListGroupMembersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListGroupMembersRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + // A set of monitored resources in the group. + Members []*google_api4.MonitoredResource `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` +} + +func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} } +func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersResponse) ProtoMessage() {} +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *ListGroupMembersResponse) GetMembers() []*google_api4.MonitoredResource { + if m != nil { + return m.Members + } + return nil +} + +func (m *ListGroupMembersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListGroupMembersResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func init() { + proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest") + proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse") + proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest") + proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest") + proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest") + proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest") + proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest") + proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for GroupService service + +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc *grpc.ClientConn +} + +func NewGroupServiceClient(cc *grpc.ClientConn) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for GroupService service + +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*google_protobuf4.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} + +func init() { proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 818 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x2e, 0x2d, 0x59, 0x96, 0x46, 0x36, 0x6c, 0x2f, 0x0c, 0x57, 0xa0, 0x7f, 0xa0, 0xd2, 0x3f, + 0x15, 0xdc, 0x9a, 0x44, 0xa5, 0x43, 0x81, 0x16, 0xf5, 0xc1, 0x6e, 0xe1, 0x16, 0xa8, 0x61, 0x83, + 0x76, 0x7b, 0xe8, 0x45, 0xa0, 0xa5, 0x11, 0xcb, 0x96, 0xdc, 0x65, 0xc9, 0x95, 0x5a, 0xbb, 0x30, + 0x90, 0x04, 0xc8, 0x21, 0x40, 0x6e, 0xb9, 0xe5, 0x96, 0x6b, 0x5e, 0x21, 0xa7, 0x3c, 0x43, 0x5e, + 0x21, 0xef, 0x91, 0x80, 0xcb, 0x5d, 0x89, 0xfa, 0xb5, 0x2f, 0xb9, 0x91, 0xbb, 0xdf, 0xec, 0xf7, + 0xcd, 0xec, 0x37, 0xb3, 0x50, 0x73, 0x19, 0x73, 0x7d, 0xb4, 0x02, 0x46, 0x3d, 0xce, 0x22, 0x8f, + 0xba, 0x56, 0xaf, 0x61, 0xb9, 0x11, 0xeb, 0x86, 0xcd, 0x18, 0xa3, 0x9e, 0xd7, 0x42, 0x33, 0x8c, + 0x18, 0x67, 0x64, 0x2d, 0x45, 0x9a, 0x03, 0xa4, 0xd9, 0x6b, 0xe8, 0x9b, 0x32, 0xde, 0x09, 0x3d, + 0xcb, 0xa1, 0x94, 0x71, 0x87, 0x7b, 0x8c, 0xc6, 0x69, 0x8c, 0xbe, 0x93, 0xd9, 0x95, 0x71, 0xd8, + 0x6e, 0x46, 0x18, 0xb3, 0x6e, 0xa4, 0x0e, 0xd6, 0xbf, 0x98, 0x28, 0xa1, 0xc5, 0x82, 0x80, 0x51, + 0x09, 0xa9, 0x4e, 0x57, 0x29, 0x11, 0x1b, 0x12, 0x21, 0xfe, 0xae, 0xbb, 0x1d, 0x0b, 0x83, 0x90, + 0xdf, 0xa4, 0x9b, 0xc6, 0x07, 0x0d, 0x56, 0x7f, 0xf5, 0x62, 0x7e, 0x9a, 0x04, 0xc4, 0x36, 0xfe, + 0xd3, 0xc5, 0x98, 0x13, 0x02, 0x79, 0xea, 0x04, 0x58, 0x59, 0xa8, 0x6a, 0xb5, 0x92, 0x2d, 0xbe, + 0xc9, 0xd7, 0xb0, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0x11, 0xd2, 0x26, 0xeb, 0x34, 0x05, 0x43, 0x65, + 0x2e, 0x01, 0xfc, 0xfc, 0x99, 0xbd, 0xac, 0xb6, 0xce, 0x3b, 0xe2, 0x24, 0x62, 0x02, 0x71, 0x68, + 0x0b, 0x63, 0xce, 0xa2, 0x78, 0x00, 0xcf, 0x49, 0xf8, 0x4a, 0x7f, 0x4f, 0xe1, 0xeb, 0xb0, 0xd6, + 0xc6, 0xb8, 0x85, 0xb4, 0xed, 0x50, 0x9e, 0x89, 0xc8, 0xcb, 0x08, 0x92, 0xd9, 0x55, 0x31, 0x1b, + 0x50, 0x0a, 0x1d, 0x17, 0x9b, 0xb1, 0x77, 0x8b, 0x95, 0xf9, 0xaa, 0x56, 0x9b, 0xb7, 0x8b, 0xc9, + 0xc2, 0xa5, 0x77, 0x8b, 0x64, 0x0b, 0x40, 0x6c, 0x72, 0xf6, 0x37, 0xd2, 0x4a, 0x41, 0x24, 0x22, + 0xe0, 0x57, 0xc9, 0xc2, 0x71, 0x11, 0x0a, 0x1d, 0xcf, 0xe7, 0x18, 0x19, 0x0c, 0x48, 0xb6, 0x00, + 0x71, 0xc8, 0x68, 0x8c, 0xe4, 0x1b, 0x98, 0x4f, 0x05, 0x68, 0xd5, 0x5c, 0xad, 0x5c, 0xdf, 0x30, + 0x27, 0x5d, 0xb1, 0x29, 0x82, 0xec, 0x14, 0x49, 0xf6, 0x61, 0x99, 0xe2, 0x7f, 0xbc, 0x99, 0xa1, + 0x15, 0xe5, 0xb1, 0x97, 0x92, 0xe5, 0x0b, 0x45, 0x6d, 0xec, 0xc1, 0xf2, 0x29, 0xa6, 0x7c, 0xa3, + 0xf5, 0xce, 0x0d, 0xea, 0x6d, 0x3c, 0xd2, 0x80, 0x9c, 0x44, 0xe8, 0x70, 0x9c, 0x08, 0xcd, 0x67, + 0xae, 0xa6, 0x2f, 0x36, 0xe1, 0x7b, 0x98, 0xd8, 0x1d, 0x58, 0xea, 0x39, 0xbe, 0xd7, 0x76, 0x38, + 0x36, 0x19, 0xf5, 0x6f, 0x04, 0x75, 0xd1, 0x5e, 0x54, 0x8b, 0xe7, 0xd4, 0xbf, 0x31, 0x7c, 0x20, + 0xbf, 0x85, 0xed, 0x51, 0x05, 0x9f, 0x8a, 0xad, 0x06, 0xe4, 0x47, 0xf4, 0x71, 0x4a, 0xbe, 0xd9, + 0xd2, 0xbc, 0xd5, 0xe0, 0xf3, 0xfe, 0x9d, 0x9d, 0x61, 0x70, 0x8d, 0xd1, 0x4c, 0xeb, 0x0e, 0x19, + 0x25, 0x37, 0xd3, 0x28, 0xf9, 0x11, 0xa3, 0x90, 0x75, 0x65, 0x14, 0xe1, 0xb0, 0x92, 0x2d, 0xff, + 0xc8, 0x11, 0x14, 0x3d, 0xca, 0x31, 0xea, 0x39, 0xbe, 0x70, 0x57, 0xb9, 0x6e, 0x4c, 0x2e, 0xc4, + 0x95, 0x17, 0xe0, 0x2f, 0x12, 0x69, 0xf7, 0x63, 0x8c, 0x97, 0x1a, 0x54, 0xc6, 0x73, 0x90, 0xee, + 0xfb, 0x16, 0x16, 0x82, 0x74, 0x49, 0xfa, 0x6f, 0x4b, 0x9d, 0xed, 0x84, 0x9e, 0x79, 0xa6, 0xc6, + 0x85, 0x2d, 0xa7, 0x85, 0xad, 0xd0, 0x0f, 0xf5, 0x60, 0x92, 0x34, 0x67, 0xdc, 0xf1, 0xb3, 0x25, + 0x29, 0x89, 0x95, 0xa4, 0x26, 0xf5, 0x37, 0x05, 0x58, 0x14, 0xc2, 0x2e, 0xd3, 0x39, 0x47, 0x9e, + 0x6a, 0x00, 0x83, 0x2e, 0x21, 0x5f, 0x4e, 0x4e, 0x75, 0x6c, 0x90, 0xe8, 0xb5, 0xfb, 0x81, 0x69, + 0xca, 0xc6, 0xee, 0x93, 0x77, 0xef, 0x5f, 0xcc, 0x6d, 0x93, 0xcd, 0x64, 0x7c, 0xfd, 0x9f, 0x5c, + 0xdb, 0x0f, 0x61, 0xc4, 0xfe, 0xc2, 0x16, 0x8f, 0xad, 0x83, 0xbb, 0x74, 0xa0, 0xc5, 0xa4, 0x07, + 0x45, 0xd5, 0x3b, 0x64, 0x6f, 0x8a, 0xf1, 0x86, 0x7b, 0x4b, 0x9f, 0xe5, 0x4f, 0x63, 0x5f, 0xb0, + 0x56, 0xc9, 0xf6, 0x24, 0x56, 0x49, 0x6a, 0x1d, 0xdc, 0x91, 0xc7, 0x1a, 0x94, 0x33, 0xcd, 0x48, + 0xa6, 0xe4, 0x35, 0xde, 0xaf, 0xb3, 0xe9, 0xbf, 0x12, 0xf4, 0x7b, 0xc6, 0xcc, 0xa4, 0xbf, 0x93, + 0x4d, 0xf4, 0x4c, 0x83, 0x72, 0xa6, 0x1d, 0xa7, 0x69, 0x18, 0xef, 0xd8, 0xd9, 0x1a, 0x1a, 0x42, + 0xc3, 0xa1, 0xbe, 0x2b, 0x34, 0xa4, 0x0f, 0xc7, 0xd4, 0x42, 0x28, 0x2d, 0xff, 0x42, 0x39, 0xd3, + 0xab, 0xd3, 0xa4, 0x8c, 0xb7, 0xb3, 0xbe, 0xae, 0x90, 0xea, 0x35, 0x32, 0x7f, 0x4a, 0x5e, 0x23, + 0x75, 0x11, 0x07, 0xf7, 0x5d, 0xc4, 0x2b, 0x0d, 0x56, 0x46, 0xdb, 0x86, 0x1c, 0xde, 0xe3, 0xb2, + 0xe1, 0x11, 0xa1, 0x9b, 0x0f, 0x85, 0x4b, 0x6b, 0x9a, 0x42, 0x5b, 0x8d, 0xec, 0xcf, 0xd6, 0x66, + 0xc9, 0x26, 0x3c, 0x7e, 0xae, 0x41, 0xa5, 0xc5, 0x82, 0x89, 0x2c, 0xc7, 0xab, 0xd9, 0xbe, 0xba, + 0x48, 0x8a, 0x70, 0xa1, 0xfd, 0x71, 0x24, 0xa1, 0x2e, 0xf3, 0x1d, 0xea, 0x9a, 0x2c, 0x72, 0x2d, + 0x17, 0xa9, 0x28, 0x91, 0x95, 0x6e, 0x39, 0xa1, 0x17, 0x0f, 0xbf, 0xf1, 0xdf, 0x0f, 0xfe, 0x5e, + 0xcf, 0xe9, 0xa7, 0xe9, 0x01, 0x27, 0x3e, 0xeb, 0xb6, 0xd5, 0x80, 0x48, 0x18, 0x7f, 0x6f, 0x5c, + 0x17, 0xc4, 0x39, 0x8d, 0x8f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xec, 0x77, 0x3f, 0xd0, 0x08, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go new file mode 100644 index 000000000..9bcaf05b2 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api5 "google.golang.org/genproto/googleapis/api/metric" +import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A single data point in a time series. +type Point struct { + // The time interval to which the data point applies. For GAUGE metrics, only + // the end time of the interval is used. For DELTA metrics, the start and end + // time should specify a non-zero interval, with subsequent points specifying + // contiguous and non-overlapping intervals. For CUMULATIVE metrics, the + // start and end time should specify a non-zero interval, with subsequent + // points specifying the same start time and increasing end times, until an + // event resets the cumulative value to zero and sets a new start time for the + // following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *Point) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *Point) GetValue() *TypedValue { + if m != nil { + return m.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *google_api5.Metric `protobuf:"bytes,1,opt,name=metric" json:"metric,omitempty"` + // The associated resource. A fully-specified monitored resource used to + // identify the time series. + Resource *google_api4.MonitoredResource `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind google_api5.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType google_api5.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, the order of + // the points is specified by the list method. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points" json:"points,omitempty"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *TimeSeries) GetMetric() *google_api5.Metric { + if m != nil { + return m.Metric + } + return nil +} + +func (m *TimeSeries) GetResource() *google_api4.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *TimeSeries) GetMetricKind() google_api5.MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return google_api5.MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *TimeSeries) GetValueType() google_api5.MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return google_api5.MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +func init() { + proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point") + proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries") +} + +func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 384 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x51, 0x4b, 0xeb, 0x30, + 0x14, 0xc7, 0xe9, 0x76, 0x37, 0x76, 0x53, 0xb8, 0x0f, 0xe1, 0xc2, 0x2d, 0xbb, 0x08, 0x75, 0x82, + 0x0e, 0x1f, 0x5a, 0x58, 0x41, 0x10, 0x61, 0x0f, 0x53, 0x51, 0x11, 0x61, 0x44, 0xd9, 0x83, 0x2f, + 0xa3, 0xb6, 0xa1, 0x04, 0xdb, 0x9c, 0x90, 0x66, 0x85, 0x3d, 0xf9, 0xe6, 0x87, 0xf1, 0x1b, 0xf8, + 0xed, 0xa4, 0x49, 0xba, 0x39, 0xac, 0xbe, 0xa5, 0x3d, 0xbf, 0xff, 0xff, 0x9f, 0x73, 0x4e, 0xd0, + 0x7e, 0x06, 0x90, 0xe5, 0x34, 0x2c, 0x80, 0x33, 0x05, 0x92, 0xf1, 0x2c, 0xac, 0xa2, 0xb0, 0xa0, + 0x4a, 0xb2, 0x24, 0x10, 0x12, 0x14, 0xe0, 0xbf, 0x06, 0x09, 0xb6, 0x48, 0x50, 0x45, 0xc3, 0x7f, + 0x56, 0x18, 0x0b, 0xb6, 0x83, 0x0f, 0x0f, 0x3e, 0x17, 0x8c, 0x84, 0xa6, 0x4b, 0x49, 0x4b, 0x58, + 0xc9, 0x84, 0x5a, 0xa8, 0x3d, 0x36, 0x81, 0xa2, 0x00, 0x6e, 0x90, 0xd1, 0x0b, 0xea, 0xcd, 0x81, + 0x71, 0x85, 0xa7, 0x68, 0xc0, 0xb8, 0xa2, 0xb2, 0x8a, 0x73, 0xcf, 0xf1, 0x9d, 0xb1, 0x3b, 0x19, + 0x05, 0x6d, 0x57, 0x0a, 0x1e, 0x58, 0x41, 0x6f, 0x2c, 0x49, 0x36, 0x1a, 0x7c, 0x82, 0x7a, 0x55, + 0x9c, 0xaf, 0xa8, 0xd7, 0xd1, 0x62, 0xff, 0x1b, 0xf1, 0x5a, 0xd0, 0x74, 0x51, 0x73, 0xc4, 0xe0, + 0xa3, 0xf7, 0x0e, 0x42, 0xb5, 0xe5, 0x3d, 0x95, 0x8c, 0x96, 0xf8, 0x18, 0xf5, 0x4d, 0x9f, 0xf6, + 0x12, 0xb8, 0xf1, 0x89, 0x05, 0x0b, 0xee, 0x74, 0x85, 0x58, 0x02, 0x9f, 0xa2, 0x41, 0xd3, 0xb0, + 0x4d, 0xdd, 0xdb, 0xa1, 0x9b, 0xb1, 0x10, 0x0b, 0x91, 0x0d, 0x8e, 0xaf, 0x91, 0x6b, 0x4c, 0x96, + 0xcf, 0x8c, 0xa7, 0x5e, 0xd7, 0x77, 0xc6, 0x7f, 0x26, 0x47, 0x5f, 0xb3, 0x2e, 0x68, 0x99, 0x48, + 0x26, 0x14, 0x48, 0xfb, 0xe3, 0x96, 0xf1, 0x94, 0xa0, 0x62, 0x73, 0xc6, 0x97, 0x08, 0xe9, 0x46, + 0x96, 0x6a, 0x2d, 0xa8, 0xf7, 0x4b, 0x1b, 0x1d, 0xfe, 0x68, 0xa4, 0xdb, 0xaf, 0x07, 0x41, 0x7e, + 0x57, 0xcd, 0x11, 0x47, 0xa8, 0x2f, 0xea, 0x3d, 0x94, 0x5e, 0xcf, 0xef, 0x8e, 0xdd, 0xc9, 0xff, + 0xf6, 0xf9, 0xe9, 0x5d, 0x11, 0x8b, 0xce, 0x5e, 0x1d, 0xe4, 0x25, 0x50, 0xb4, 0xa2, 0x33, 0xd7, + 0x04, 0xcf, 0xeb, 0x35, 0xcf, 0x9d, 0xc7, 0xa9, 0x85, 0x32, 0xc8, 0x63, 0x9e, 0x05, 0x20, 0xb3, + 0x30, 0xa3, 0x5c, 0x3f, 0x82, 0xd0, 0x94, 0x62, 0xc1, 0xca, 0xdd, 0xa7, 0x72, 0xb6, 0xfd, 0x7a, + 0xeb, 0x0c, 0xaf, 0x8c, 0xc1, 0x79, 0x0e, 0xab, 0xb4, 0x19, 0x6e, 0x9d, 0xb5, 0x88, 0x9e, 0xfa, + 0xda, 0x27, 0xfa, 0x08, 0x00, 0x00, 0xff, 0xff, 0xd5, 0xab, 0x3f, 0xd9, 0xe8, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go new file mode 100644 index 000000000..269d30ed8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -0,0 +1,928 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric_service.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_api5 "google.golang.org/genproto/googleapis/api/metric" +import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Controls which fields are returned by `ListTimeSeries`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +var ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", +} +var ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x)) +} +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor4, []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"` + // An optional [filter](/monitoring/api/v3/filters) describing + // the descriptors to be returned. The filter can reference + // the descriptor's type and labels. For example, the + // following filter returns only Google Compute Engine descriptors + // that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListMonitoredResourceDescriptorsRequest) Reset() { + *m = ListMonitoredResourceDescriptorsRequest{} +} +func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor4, []int{0} +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMonitoredResourcDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*google_api4.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListMonitoredResourceDescriptorsResponse) Reset() { + *m = ListMonitoredResourceDescriptorsResponse{} +} +func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor4, []int{1} +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*google_api4.MonitoredResourceDescriptor { + if m != nil { + return m.ResourceDescriptors + } + return nil +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + // The monitored resource descriptor to get. The format is + // `"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"`. + // The `{resource_type}` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonitoredResourceDescriptorRequest{} } +func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor4, []int{2} +} + +func (m *GetMonitoredResourceDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } +func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsRequest) ProtoMessage() {} +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *ListMetricDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMetricDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*google_api5.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } +func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsResponse) ProtoMessage() {} +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } + +func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*google_api5.MetricDescriptor { + if m != nil { + return m.MetricDescriptors + } + return nil +} + +func (m *ListMetricDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example value of `{metric_id}` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} } +func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMetricDescriptorRequest) ProtoMessage() {} +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } + +func (m *GetMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The new [custom metric](/monitoring/custom-metrics) + // descriptor. + MetricDescriptor *google_api5.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor" json:"metric_descriptor,omitempty"` +} + +func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} } +func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*CreateMetricDescriptorRequest) ProtoMessage() {} +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } + +func (m *CreateMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateMetricDescriptorRequest) GetMetricDescriptor() *google_api5.MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example of `{metric_id}` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} } +func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } + +func (m *DeleteMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // "projects/{project_id_or_number}". + Name string `protobuf:"bytes,10,opt,name=name" json:"name,omitempty"` + // A [monitoring filter](/monitoring/api/v3/filters) that specifies which time + // series should be returned. The filter must specify a single metric type, + // and can additionally specify metric labels and other information. For + // example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.label.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + // The time interval for which results should be returned. Only time series + // that contain data points in the specified interval are included + // in the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval" json:"interval,omitempty"` + // By default, the raw time series data is returned. + // Use this field to combine multiple time series for different + // views of the data. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation" json:"aggregation,omitempty"` + // Specifies the order in which the points of the time series should + // be returned. By default, results are not ordered. Currently, + // this field must be left blank. + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` + // Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. + // When `view` field sets to `FULL`, it limits the number of `Points` server + // will return; if `view` field is `HEADERS`, it limits the number of + // `TimeSeries` server will return. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} } +func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesRequest) ProtoMessage() {} +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } + +func (m *ListTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListTimeSeriesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if m != nil { + return m.Aggregation + } + return nil +} + +func (m *ListTimeSeriesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if m != nil { + return m.View + } + return ListTimeSeriesRequest_FULL +} + +func (m *ListTimeSeriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTimeSeriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} } +func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesResponse) ProtoMessage() {} +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } + +func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *ListTimeSeriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + // The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries" json:"time_series,omitempty"` +} + +func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} } +func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesRequest) ProtoMessage() {} +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } + +func (m *CreateTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +// Describes the result of a failed request to write data to a time series. +type CreateTimeSeriesError struct { + // The time series, including the `Metric`, `MonitoredResource`, + // and `Point`s (including timestamp and value) that resulted + // in the error. This field provides all of the context that + // would be needed to retry the operation. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries" json:"time_series,omitempty"` + // The status of the requested write operation. + Status *google_rpc.Status `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} } +func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesError) ProtoMessage() {} +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} } + +func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *CreateTimeSeriesError) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsRequest") + proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsResponse") + proto.RegisterType((*GetMonitoredResourceDescriptorRequest)(nil), "google.monitoring.v3.GetMonitoredResourceDescriptorRequest") + proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "google.monitoring.v3.ListMetricDescriptorsRequest") + proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "google.monitoring.v3.ListMetricDescriptorsResponse") + proto.RegisterType((*GetMetricDescriptorRequest)(nil), "google.monitoring.v3.GetMetricDescriptorRequest") + proto.RegisterType((*CreateMetricDescriptorRequest)(nil), "google.monitoring.v3.CreateMetricDescriptorRequest") + proto.RegisterType((*DeleteMetricDescriptorRequest)(nil), "google.monitoring.v3.DeleteMetricDescriptorRequest") + proto.RegisterType((*ListTimeSeriesRequest)(nil), "google.monitoring.v3.ListTimeSeriesRequest") + proto.RegisterType((*ListTimeSeriesResponse)(nil), "google.monitoring.v3.ListTimeSeriesResponse") + proto.RegisterType((*CreateTimeSeriesRequest)(nil), "google.monitoring.v3.CreateTimeSeriesRequest") + proto.RegisterType((*CreateTimeSeriesError)(nil), "google.monitoring.v3.CreateTimeSeriesError") + proto.RegisterEnum("google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView", ListTimeSeriesRequest_TimeSeriesView_name, ListTimeSeriesRequest_TimeSeriesView_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for MetricService service + +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*google_api4.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) +} + +type metricServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricServiceClient(cc *grpc.ClientConn) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*google_api4.MonitoredResourceDescriptor, error) { + out := new(google_api4.MonitoredResourceDescriptor) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error) { + out := new(google_api5.MetricDescriptor) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*google_api5.MetricDescriptor, error) { + out := new(google_api5.MetricDescriptor) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for MetricService service + +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*google_api4.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*google_api5.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*google_api5.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*google_protobuf4.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*google_protobuf4.Empty, error) +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} + +func init() { proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 1001 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4f, 0x73, 0xdb, 0x44, + 0x14, 0x67, 0x93, 0x34, 0x71, 0x9e, 0xa7, 0x21, 0xdd, 0xb6, 0xae, 0x51, 0x13, 0xc6, 0x15, 0x53, + 0xe2, 0xba, 0x45, 0xea, 0xd8, 0x1d, 0x0e, 0x49, 0x9b, 0x99, 0xfc, 0xa3, 0x74, 0x08, 0x4c, 0x46, + 0x2e, 0x3d, 0xf4, 0xe2, 0x51, 0x9c, 0x57, 0xcd, 0x82, 0xa5, 0x15, 0xab, 0xb5, 0x4b, 0xca, 0x84, + 0x03, 0xcc, 0xe4, 0xc6, 0x81, 0x01, 0x66, 0xe0, 0x2b, 0x70, 0x80, 0xe1, 0x3b, 0xf0, 0x0d, 0x38, + 0x73, 0xe3, 0x2b, 0x70, 0x67, 0xb4, 0x92, 0x63, 0x5b, 0x96, 0x64, 0x9b, 0x4b, 0x6f, 0x96, 0xf6, + 0xfd, 0xf9, 0xbd, 0xdf, 0x7b, 0xfb, 0x7e, 0x32, 0xdc, 0x71, 0x38, 0x77, 0x3a, 0x68, 0xba, 0xdc, + 0x63, 0x92, 0x0b, 0xe6, 0x39, 0x66, 0xaf, 0x61, 0xba, 0x28, 0x05, 0x6b, 0xb7, 0x02, 0x14, 0x3d, + 0xd6, 0x46, 0xc3, 0x17, 0x5c, 0x72, 0x7a, 0x2d, 0x32, 0x35, 0x06, 0xa6, 0x46, 0xaf, 0xa1, 0xad, + 0xc5, 0x01, 0x6c, 0x9f, 0x99, 0xb6, 0xe7, 0x71, 0x69, 0x4b, 0xc6, 0xbd, 0x20, 0xf2, 0xd1, 0x6e, + 0x0c, 0x9d, 0x46, 0x41, 0xe3, 0x83, 0x77, 0x86, 0x0f, 0xa2, 0x80, 0x78, 0xd2, 0x12, 0x18, 0xf0, + 0xae, 0xe8, 0x67, 0xd4, 0x6e, 0xa5, 0x82, 0x6b, 0x73, 0xd7, 0xe5, 0x5e, 0xae, 0xc9, 0x48, 0xaa, + 0x9b, 0xb1, 0x89, 0x7a, 0x3a, 0xee, 0xbe, 0x30, 0xd1, 0xf5, 0xe5, 0x69, 0x02, 0xa0, 0xf0, 0xdb, + 0x66, 0x20, 0x6d, 0xd9, 0x8d, 0x91, 0xeb, 0xdf, 0x13, 0xd8, 0x38, 0x64, 0x81, 0xfc, 0xb8, 0x0f, + 0xce, 0x8a, 0xb1, 0xed, 0x63, 0xd0, 0x16, 0xcc, 0x97, 0x5c, 0x04, 0x16, 0x7e, 0xd1, 0xc5, 0x40, + 0x52, 0x0a, 0x0b, 0x9e, 0xed, 0x62, 0xf9, 0x52, 0x85, 0x54, 0x97, 0x2d, 0xf5, 0x9b, 0x96, 0x60, + 0xf1, 0x05, 0xeb, 0x48, 0x14, 0xe5, 0x39, 0xf5, 0x36, 0x7e, 0xa2, 0x37, 0x61, 0xd9, 0xb7, 0x1d, + 0x6c, 0x05, 0xec, 0x15, 0x96, 0xe7, 0x2b, 0xa4, 0x7a, 0xc9, 0x2a, 0x84, 0x2f, 0x9a, 0xec, 0x15, + 0xd2, 0x75, 0x00, 0x75, 0x28, 0xf9, 0xe7, 0xe8, 0x95, 0x17, 0x94, 0xa3, 0x32, 0x7f, 0x1a, 0xbe, + 0xd0, 0x7f, 0x23, 0x50, 0x9d, 0x8c, 0x29, 0xf0, 0xb9, 0x17, 0x20, 0x7d, 0x0e, 0xd7, 0xfa, 0x74, + 0xb6, 0x4e, 0x06, 0xe7, 0x65, 0x52, 0x99, 0xaf, 0x16, 0xeb, 0x1b, 0x46, 0xdc, 0x4d, 0xdb, 0x67, + 0x46, 0x4e, 0x3c, 0xeb, 0xaa, 0x18, 0xcf, 0x41, 0xdf, 0x85, 0x37, 0x3d, 0xfc, 0x52, 0xb6, 0x86, + 0xc0, 0x46, 0x55, 0x5e, 0x0e, 0x5f, 0x1f, 0x5d, 0x00, 0xde, 0x82, 0xdb, 0x8f, 0x31, 0x0f, 0x6e, + 0x92, 0xc1, 0xf9, 0x01, 0x83, 0xfa, 0x39, 0x81, 0x35, 0x55, 0xad, 0x6a, 0xe6, 0x6b, 0xa4, 0xfd, + 0x47, 0x02, 0xeb, 0x19, 0x40, 0x62, 0xae, 0x3f, 0x02, 0x1a, 0x5f, 0x99, 0x71, 0xa6, 0xd7, 0x46, + 0x98, 0x4e, 0x84, 0xb0, 0xae, 0xb8, 0xc9, 0xa0, 0x53, 0x93, 0x7b, 0x1f, 0xb4, 0x90, 0xdc, 0x64, + 0xc4, 0x1c, 0x46, 0xbf, 0x86, 0xf5, 0x3d, 0x81, 0xb6, 0xc4, 0x19, 0x9c, 0xe8, 0x13, 0xb8, 0x32, + 0x56, 0x9b, 0x02, 0x34, 0xa9, 0xb4, 0xd5, 0x64, 0x69, 0x7a, 0x03, 0xd6, 0xf7, 0xb1, 0x83, 0x33, + 0xe5, 0xd7, 0x7f, 0x9e, 0x87, 0xeb, 0x21, 0xfb, 0x4f, 0x99, 0x8b, 0x4d, 0x14, 0x0c, 0xc7, 0xfa, + 0x0f, 0x53, 0xf4, 0x7f, 0x1b, 0x0a, 0xcc, 0x93, 0x28, 0x7a, 0x76, 0x47, 0x35, 0xb8, 0x58, 0xd7, + 0x8d, 0xb4, 0x7d, 0x66, 0x84, 0x69, 0x9e, 0xc4, 0x96, 0xd6, 0x85, 0x0f, 0xdd, 0x83, 0xa2, 0xed, + 0x38, 0x02, 0x1d, 0xb5, 0xde, 0xd4, 0xc8, 0x15, 0xeb, 0xb7, 0xd2, 0x43, 0xec, 0x0c, 0x0c, 0xad, + 0x61, 0x2f, 0xfa, 0x16, 0x14, 0xb8, 0x38, 0x41, 0xd1, 0x3a, 0x3e, 0x2d, 0x2f, 0x2a, 0x78, 0x4b, + 0xea, 0x79, 0xf7, 0x94, 0x7e, 0x02, 0x0b, 0x3d, 0x86, 0x2f, 0xcb, 0x4b, 0x15, 0x52, 0x5d, 0xa9, + 0x6f, 0xa6, 0x07, 0x4e, 0xa5, 0xc1, 0x18, 0xbc, 0x79, 0xc6, 0xf0, 0xa5, 0xa5, 0xe2, 0x8c, 0xce, + 0x7b, 0x21, 0x77, 0xde, 0x97, 0x93, 0xf3, 0xbe, 0x01, 0x2b, 0xa3, 0x31, 0x69, 0x01, 0x16, 0x3e, + 0xf8, 0xf4, 0xf0, 0x70, 0xf5, 0x0d, 0x5a, 0x84, 0xa5, 0x0f, 0x0f, 0x76, 0xf6, 0x0f, 0xac, 0xe6, + 0x2a, 0xd1, 0xbf, 0x25, 0x50, 0x4a, 0x62, 0x8a, 0x6f, 0xc4, 0x0e, 0x14, 0x25, 0x73, 0x31, 0x94, + 0x10, 0x86, 0xfd, 0xab, 0x50, 0xc9, 0xa6, 0x3c, 0x76, 0x07, 0x79, 0xf1, 0x7b, 0xea, 0x7b, 0xe0, + 0xc3, 0x8d, 0x68, 0xaa, 0xb3, 0x27, 0x64, 0x78, 0x9e, 0x13, 0xc8, 0xe6, 0x66, 0x47, 0x16, 0x6e, + 0xa6, 0xeb, 0xc9, 0x94, 0x07, 0x42, 0x70, 0x31, 0x5e, 0x36, 0x99, 0xb9, 0xec, 0x1a, 0x2c, 0x46, + 0x42, 0x14, 0x5f, 0x32, 0xda, 0xf7, 0x16, 0x7e, 0xdb, 0x68, 0xaa, 0x13, 0x2b, 0xb6, 0xa8, 0xff, + 0x0b, 0x70, 0x39, 0xba, 0x4b, 0xcd, 0x48, 0xaa, 0xe9, 0xdf, 0x04, 0x2a, 0x93, 0x24, 0x82, 0x3e, + 0xca, 0x1e, 0xaf, 0x29, 0xe4, 0x4e, 0xdb, 0xfe, 0xbf, 0xee, 0xd1, 0x6c, 0xe8, 0x9b, 0xdf, 0xfc, + 0xf5, 0xcf, 0x0f, 0x73, 0x0f, 0x68, 0x3d, 0x94, 0xea, 0xaf, 0xc2, 0xa6, 0x3c, 0xf2, 0x05, 0xff, + 0x0c, 0xdb, 0x32, 0x30, 0x6b, 0x67, 0x83, 0xcf, 0x81, 0x34, 0xe8, 0x7f, 0x12, 0x78, 0x3b, 0x5f, + 0x52, 0xe8, 0x56, 0x3a, 0xbc, 0xa9, 0x84, 0x48, 0x9b, 0x56, 0x17, 0xf5, 0x87, 0xaa, 0x88, 0xf7, + 0xe9, 0x83, 0xb4, 0x22, 0x72, 0x6b, 0x30, 0x6b, 0x67, 0xf4, 0x0f, 0x12, 0x2d, 0xb5, 0x31, 0x49, + 0xa1, 0xf5, 0x1c, 0x72, 0x33, 0x84, 0x50, 0x6b, 0xcc, 0xe4, 0x13, 0x77, 0xc1, 0x54, 0x05, 0xdc, + 0xa1, 0x1b, 0x19, 0x5d, 0x18, 0x43, 0xf6, 0x0b, 0x81, 0xab, 0x29, 0x82, 0x43, 0xef, 0x67, 0xf3, + 0x9d, 0xbe, 0xe6, 0xb5, 0x5c, 0xdd, 0xd0, 0xeb, 0x0a, 0xd8, 0x3d, 0x5a, 0x4b, 0x67, 0x36, 0x89, + 0xcb, 0xac, 0xd5, 0xce, 0xe8, 0xef, 0x04, 0x4a, 0xe9, 0xd2, 0x46, 0x33, 0xc8, 0xc9, 0x15, 0xc2, + 0x09, 0x08, 0x77, 0x15, 0xc2, 0x87, 0xfa, 0xb4, 0xd4, 0x6d, 0x8e, 0x2b, 0x68, 0xc8, 0x66, 0x29, + 0x5d, 0x0c, 0xb3, 0x10, 0xe7, 0x4a, 0xa7, 0x56, 0xea, 0x3b, 0xf5, 0x3f, 0x73, 0x8d, 0x83, 0xf0, + 0x33, 0xb7, 0xcf, 0x66, 0x6d, 0x16, 0x36, 0x7f, 0x22, 0xb0, 0x32, 0xba, 0xd7, 0xe9, 0xdd, 0x19, + 0x14, 0x49, 0xbb, 0x37, 0x9d, 0x71, 0x3c, 0x88, 0x55, 0x85, 0x50, 0xa7, 0x95, 0x74, 0x36, 0x87, + 0x56, 0xe3, 0x39, 0x81, 0xd5, 0xe4, 0xde, 0xa5, 0xef, 0xe5, 0xf5, 0x77, 0x1c, 0x5b, 0x16, 0x4f, + 0x77, 0x15, 0x8a, 0xdb, 0xfa, 0x44, 0x14, 0x9b, 0xa4, 0xb6, 0xfb, 0x1d, 0x81, 0x72, 0x9b, 0xbb, + 0xa9, 0x99, 0x77, 0xe9, 0xc8, 0x46, 0x3e, 0x0a, 0xd3, 0x1c, 0x91, 0xe7, 0xdb, 0xb1, 0xad, 0xc3, + 0x3b, 0xb6, 0xe7, 0x18, 0x5c, 0x38, 0xa6, 0x83, 0x9e, 0x02, 0x61, 0x46, 0x47, 0xb6, 0xcf, 0x82, + 0xd1, 0xff, 0x31, 0x5b, 0x83, 0xa7, 0x5f, 0xe7, 0xb4, 0xc7, 0x51, 0x80, 0xbd, 0x0e, 0xef, 0x9e, + 0xf4, 0x57, 0x53, 0x98, 0xf2, 0x59, 0xe3, 0x78, 0x51, 0xc5, 0x69, 0xfc, 0x17, 0x00, 0x00, 0xff, + 0xff, 0x3c, 0x75, 0xce, 0xa1, 0xce, 0x0d, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/dlp.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/dlp.pb.go new file mode 100644 index 000000000..479913d22 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/dlp.pb.go @@ -0,0 +1,2238 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/privacy/dlp/v2beta1/dlp.proto + +/* +Package dlp is a generated protocol buffer package. + +It is generated from these files: + google/privacy/dlp/v2beta1/dlp.proto + google/privacy/dlp/v2beta1/storage.proto + +It has these top-level messages: + InspectConfig + OperationConfig + ContentItem + Table + InspectResult + Finding + Location + TableLocation + Range + ImageLocation + RedactContentRequest + Color + RedactContentResponse + InspectContentRequest + InspectContentResponse + CreateInspectOperationRequest + OutputStorageConfig + InfoTypeStatistics + InspectOperationMetadata + InspectOperationResult + ListInspectFindingsRequest + ListInspectFindingsResponse + InfoTypeDescription + ListInfoTypesRequest + ListInfoTypesResponse + CategoryDescription + ListRootCategoriesRequest + ListRootCategoriesResponse + Value + InfoType + FieldId + PartitionId + KindExpression + PropertyReference + Projection + DatastoreOptions + CloudStorageOptions + CloudStoragePath + BigQueryOptions + StorageConfig + CloudStorageKey + DatastoreKey + Key + RecordKey + BigQueryTable +*/ +package dlp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import _ "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_type "google.golang.org/genproto/googleapis/type/date" +import google_type1 "google.golang.org/genproto/googleapis/type/timeofday" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Categorization of results based on how likely they are to represent a match, +// based on the number of elements they contain which imply a match. +type Likelihood int32 + +const ( + // Default value; information with all likelihoods is included. + Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 + // Few matching elements. + Likelihood_VERY_UNLIKELY Likelihood = 1 + Likelihood_UNLIKELY Likelihood = 2 + // Some matching elements. + Likelihood_POSSIBLE Likelihood = 3 + Likelihood_LIKELY Likelihood = 4 + // Many matching elements. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "LIKELIHOOD_UNSPECIFIED", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "LIKELIHOOD_UNSPECIFIED": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Configuration description of the scanning process. +// When used with redactContent only info_types and min_likelihood are currently +// used. +type InspectConfig struct { + // Restricts what info_types to look for. The values must correspond to + // InfoType values returned by ListInfoTypes or found in documentation. + // Empty info_types runs all enabled detectors. + InfoTypes []*InfoType `protobuf:"bytes,1,rep,name=info_types,json=infoTypes" json:"info_types,omitempty"` + // Only returns findings equal or above this threshold. + MinLikelihood Likelihood `protobuf:"varint,2,opt,name=min_likelihood,json=minLikelihood,enum=google.privacy.dlp.v2beta1.Likelihood" json:"min_likelihood,omitempty"` + // Limits the number of findings per content item or long running operation. + MaxFindings int32 `protobuf:"varint,3,opt,name=max_findings,json=maxFindings" json:"max_findings,omitempty"` + // When true, a contextual quote from the data that triggered a finding is + // included in the response; see Finding.quote. + IncludeQuote bool `protobuf:"varint,4,opt,name=include_quote,json=includeQuote" json:"include_quote,omitempty"` + // When true, excludes type information of the findings. + ExcludeTypes bool `protobuf:"varint,6,opt,name=exclude_types,json=excludeTypes" json:"exclude_types,omitempty"` + // Configuration of findings limit given for specified info types. + InfoTypeLimits []*InspectConfig_InfoTypeLimit `protobuf:"bytes,7,rep,name=info_type_limits,json=infoTypeLimits" json:"info_type_limits,omitempty"` +} + +func (m *InspectConfig) Reset() { *m = InspectConfig{} } +func (m *InspectConfig) String() string { return proto.CompactTextString(m) } +func (*InspectConfig) ProtoMessage() {} +func (*InspectConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *InspectConfig) GetInfoTypes() []*InfoType { + if m != nil { + return m.InfoTypes + } + return nil +} + +func (m *InspectConfig) GetMinLikelihood() Likelihood { + if m != nil { + return m.MinLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *InspectConfig) GetMaxFindings() int32 { + if m != nil { + return m.MaxFindings + } + return 0 +} + +func (m *InspectConfig) GetIncludeQuote() bool { + if m != nil { + return m.IncludeQuote + } + return false +} + +func (m *InspectConfig) GetExcludeTypes() bool { + if m != nil { + return m.ExcludeTypes + } + return false +} + +func (m *InspectConfig) GetInfoTypeLimits() []*InspectConfig_InfoTypeLimit { + if m != nil { + return m.InfoTypeLimits + } + return nil +} + +// Max findings configuration per info type, per content item or long running +// operation. +type InspectConfig_InfoTypeLimit struct { + // Type of information the findings limit applies to. Only one limit per + // info_type should be provided. If InfoTypeLimit does not have an + // info_type, the DLP API applies the limit against all info_types that are + // found but not specified in another InfoTypeLimit. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Max findings limit for the given infoType. + MaxFindings int32 `protobuf:"varint,2,opt,name=max_findings,json=maxFindings" json:"max_findings,omitempty"` +} + +func (m *InspectConfig_InfoTypeLimit) Reset() { *m = InspectConfig_InfoTypeLimit{} } +func (m *InspectConfig_InfoTypeLimit) String() string { return proto.CompactTextString(m) } +func (*InspectConfig_InfoTypeLimit) ProtoMessage() {} +func (*InspectConfig_InfoTypeLimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +func (m *InspectConfig_InfoTypeLimit) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *InspectConfig_InfoTypeLimit) GetMaxFindings() int32 { + if m != nil { + return m.MaxFindings + } + return 0 +} + +// Additional configuration for inspect long running operations. +type OperationConfig struct { + // Max number of findings per file, Datastore entity, or database row. + MaxItemFindings int64 `protobuf:"varint,1,opt,name=max_item_findings,json=maxItemFindings" json:"max_item_findings,omitempty"` +} + +func (m *OperationConfig) Reset() { *m = OperationConfig{} } +func (m *OperationConfig) String() string { return proto.CompactTextString(m) } +func (*OperationConfig) ProtoMessage() {} +func (*OperationConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *OperationConfig) GetMaxItemFindings() int64 { + if m != nil { + return m.MaxItemFindings + } + return 0 +} + +// Container structure for the content to inspect. +type ContentItem struct { + // Type of the content, as defined in Content-Type HTTP header. + // Supported types are: all "text" types, octet streams, PNG images, + // JPEG images. + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // Data of the item either in the byte array or UTF-8 string form. + // + // Types that are valid to be assigned to DataItem: + // *ContentItem_Data + // *ContentItem_Value + // *ContentItem_Table + DataItem isContentItem_DataItem `protobuf_oneof:"data_item"` +} + +func (m *ContentItem) Reset() { *m = ContentItem{} } +func (m *ContentItem) String() string { return proto.CompactTextString(m) } +func (*ContentItem) ProtoMessage() {} +func (*ContentItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isContentItem_DataItem interface { + isContentItem_DataItem() +} + +type ContentItem_Data struct { + Data []byte `protobuf:"bytes,2,opt,name=data,proto3,oneof"` +} +type ContentItem_Value struct { + Value string `protobuf:"bytes,3,opt,name=value,oneof"` +} +type ContentItem_Table struct { + Table *Table `protobuf:"bytes,4,opt,name=table,oneof"` +} + +func (*ContentItem_Data) isContentItem_DataItem() {} +func (*ContentItem_Value) isContentItem_DataItem() {} +func (*ContentItem_Table) isContentItem_DataItem() {} + +func (m *ContentItem) GetDataItem() isContentItem_DataItem { + if m != nil { + return m.DataItem + } + return nil +} + +func (m *ContentItem) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ContentItem) GetData() []byte { + if x, ok := m.GetDataItem().(*ContentItem_Data); ok { + return x.Data + } + return nil +} + +func (m *ContentItem) GetValue() string { + if x, ok := m.GetDataItem().(*ContentItem_Value); ok { + return x.Value + } + return "" +} + +func (m *ContentItem) GetTable() *Table { + if x, ok := m.GetDataItem().(*ContentItem_Table); ok { + return x.Table + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ContentItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ContentItem_OneofMarshaler, _ContentItem_OneofUnmarshaler, _ContentItem_OneofSizer, []interface{}{ + (*ContentItem_Data)(nil), + (*ContentItem_Value)(nil), + (*ContentItem_Table)(nil), + } +} + +func _ContentItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ContentItem) + // data_item + switch x := m.DataItem.(type) { + case *ContentItem_Data: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *ContentItem_Value: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Value) + case *ContentItem_Table: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Table); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ContentItem.DataItem has unexpected type %T", x) + } + return nil +} + +func _ContentItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ContentItem) + switch tag { + case 2: // data_item.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.DataItem = &ContentItem_Data{x} + return true, err + case 3: // data_item.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.DataItem = &ContentItem_Value{x} + return true, err + case 4: // data_item.table + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Table) + err := b.DecodeMessage(msg) + m.DataItem = &ContentItem_Table{msg} + return true, err + default: + return false, nil + } +} + +func _ContentItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ContentItem) + // data_item + switch x := m.DataItem.(type) { + case *ContentItem_Data: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *ContentItem_Value: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *ContentItem_Table: + s := proto.Size(x.Table) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Structured content to inspect. Up to 50,000 `Value`s per request allowed. +type Table struct { + Headers []*FieldId `protobuf:"bytes,1,rep,name=headers" json:"headers,omitempty"` + Rows []*Table_Row `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Table) GetHeaders() []*FieldId { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Table) GetRows() []*Table_Row { + if m != nil { + return m.Rows + } + return nil +} + +type Table_Row struct { + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *Table_Row) Reset() { *m = Table_Row{} } +func (m *Table_Row) String() string { return proto.CompactTextString(m) } +func (*Table_Row) ProtoMessage() {} +func (*Table_Row) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +func (m *Table_Row) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// All the findings for a single scanned item. +type InspectResult struct { + // List of findings for an item. + Findings []*Finding `protobuf:"bytes,1,rep,name=findings" json:"findings,omitempty"` + // If true, then this item might have more findings than were returned, + // and the findings returned are an arbitrary subset of all findings. + // The findings list might be truncated because the input items were too + // large, or because the server reached the maximum amount of resources + // allowed for a single API call. For best results, divide the input into + // smaller batches. + FindingsTruncated bool `protobuf:"varint,2,opt,name=findings_truncated,json=findingsTruncated" json:"findings_truncated,omitempty"` +} + +func (m *InspectResult) Reset() { *m = InspectResult{} } +func (m *InspectResult) String() string { return proto.CompactTextString(m) } +func (*InspectResult) ProtoMessage() {} +func (*InspectResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *InspectResult) GetFindings() []*Finding { + if m != nil { + return m.Findings + } + return nil +} + +func (m *InspectResult) GetFindingsTruncated() bool { + if m != nil { + return m.FindingsTruncated + } + return false +} + +// Container structure describing a single finding within a string or image. +type Finding struct { + // The specific string that may be potentially sensitive info. + Quote string `protobuf:"bytes,1,opt,name=quote" json:"quote,omitempty"` + // The specific type of info the string might be. + InfoType *InfoType `protobuf:"bytes,2,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Estimate of how likely it is that the info_type is correct. + Likelihood Likelihood `protobuf:"varint,3,opt,name=likelihood,enum=google.privacy.dlp.v2beta1.Likelihood" json:"likelihood,omitempty"` + // Location of the info found. + Location *Location `protobuf:"bytes,4,opt,name=location" json:"location,omitempty"` + // Timestamp when finding was detected. + CreateTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` +} + +func (m *Finding) Reset() { *m = Finding{} } +func (m *Finding) String() string { return proto.CompactTextString(m) } +func (*Finding) ProtoMessage() {} +func (*Finding) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Finding) GetQuote() string { + if m != nil { + return m.Quote + } + return "" +} + +func (m *Finding) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *Finding) GetLikelihood() Likelihood { + if m != nil { + return m.Likelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *Finding) GetLocation() *Location { + if m != nil { + return m.Location + } + return nil +} + +func (m *Finding) GetCreateTime() *google_protobuf3.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +// Specifies the location of a finding within its source item. +type Location struct { + // Zero-based byte offsets within a content item. + ByteRange *Range `protobuf:"bytes,1,opt,name=byte_range,json=byteRange" json:"byte_range,omitempty"` + // Character offsets within a content item, included when content type + // is a text. Default charset assumed to be UTF-8. + CodepointRange *Range `protobuf:"bytes,2,opt,name=codepoint_range,json=codepointRange" json:"codepoint_range,omitempty"` + // Location within an image's pixels. + ImageBoxes []*ImageLocation `protobuf:"bytes,3,rep,name=image_boxes,json=imageBoxes" json:"image_boxes,omitempty"` + // Key of the finding. + RecordKey *RecordKey `protobuf:"bytes,4,opt,name=record_key,json=recordKey" json:"record_key,omitempty"` + // Field id of the field containing the finding. + FieldId *FieldId `protobuf:"bytes,5,opt,name=field_id,json=fieldId" json:"field_id,omitempty"` + // Location within a `ContentItem.Table`. + TableLocation *TableLocation `protobuf:"bytes,6,opt,name=table_location,json=tableLocation" json:"table_location,omitempty"` +} + +func (m *Location) Reset() { *m = Location{} } +func (m *Location) String() string { return proto.CompactTextString(m) } +func (*Location) ProtoMessage() {} +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Location) GetByteRange() *Range { + if m != nil { + return m.ByteRange + } + return nil +} + +func (m *Location) GetCodepointRange() *Range { + if m != nil { + return m.CodepointRange + } + return nil +} + +func (m *Location) GetImageBoxes() []*ImageLocation { + if m != nil { + return m.ImageBoxes + } + return nil +} + +func (m *Location) GetRecordKey() *RecordKey { + if m != nil { + return m.RecordKey + } + return nil +} + +func (m *Location) GetFieldId() *FieldId { + if m != nil { + return m.FieldId + } + return nil +} + +func (m *Location) GetTableLocation() *TableLocation { + if m != nil { + return m.TableLocation + } + return nil +} + +// Location of a finding within a `ContentItem.Table`. +type TableLocation struct { + // The zero-based index of the row where the finding is located. + RowIndex int64 `protobuf:"varint,1,opt,name=row_index,json=rowIndex" json:"row_index,omitempty"` +} + +func (m *TableLocation) Reset() { *m = TableLocation{} } +func (m *TableLocation) String() string { return proto.CompactTextString(m) } +func (*TableLocation) ProtoMessage() {} +func (*TableLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *TableLocation) GetRowIndex() int64 { + if m != nil { + return m.RowIndex + } + return 0 +} + +// Generic half-open interval [start, end) +type Range struct { + // Index of the first character of the range (inclusive). + Start int64 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + // Index of the last character of the range (exclusive). + End int64 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` +} + +func (m *Range) Reset() { *m = Range{} } +func (m *Range) String() string { return proto.CompactTextString(m) } +func (*Range) ProtoMessage() {} +func (*Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Range) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Range) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +// Bounding box encompassing detected text within an image. +type ImageLocation struct { + // Top coordinate of the bounding box. (0,0) is upper left. + Top int32 `protobuf:"varint,1,opt,name=top" json:"top,omitempty"` + // Left coordinate of the bounding box. (0,0) is upper left. + Left int32 `protobuf:"varint,2,opt,name=left" json:"left,omitempty"` + // Width of the bounding box in pixels. + Width int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` + // Height of the bounding box in pixels. + Height int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` +} + +func (m *ImageLocation) Reset() { *m = ImageLocation{} } +func (m *ImageLocation) String() string { return proto.CompactTextString(m) } +func (*ImageLocation) ProtoMessage() {} +func (*ImageLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ImageLocation) GetTop() int32 { + if m != nil { + return m.Top + } + return 0 +} + +func (m *ImageLocation) GetLeft() int32 { + if m != nil { + return m.Left + } + return 0 +} + +func (m *ImageLocation) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *ImageLocation) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +// Request to search for potentially sensitive info in a list of items +// and replace it with a default or provided content. +type RedactContentRequest struct { + // Configuration for the inspector. + InspectConfig *InspectConfig `protobuf:"bytes,1,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The list of items to inspect. Up to 100 are allowed per request. + Items []*ContentItem `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` + // The strings to replace findings text findings with. Must specify at least + // one of these or one ImageRedactionConfig if redacting images. + ReplaceConfigs []*RedactContentRequest_ReplaceConfig `protobuf:"bytes,3,rep,name=replace_configs,json=replaceConfigs" json:"replace_configs,omitempty"` + // The configuration for specifying what content to redact from images. + ImageRedactionConfigs []*RedactContentRequest_ImageRedactionConfig `protobuf:"bytes,4,rep,name=image_redaction_configs,json=imageRedactionConfigs" json:"image_redaction_configs,omitempty"` +} + +func (m *RedactContentRequest) Reset() { *m = RedactContentRequest{} } +func (m *RedactContentRequest) String() string { return proto.CompactTextString(m) } +func (*RedactContentRequest) ProtoMessage() {} +func (*RedactContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *RedactContentRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *RedactContentRequest) GetItems() []*ContentItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *RedactContentRequest) GetReplaceConfigs() []*RedactContentRequest_ReplaceConfig { + if m != nil { + return m.ReplaceConfigs + } + return nil +} + +func (m *RedactContentRequest) GetImageRedactionConfigs() []*RedactContentRequest_ImageRedactionConfig { + if m != nil { + return m.ImageRedactionConfigs + } + return nil +} + +type RedactContentRequest_ReplaceConfig struct { + // Type of information to replace. Only one ReplaceConfig per info_type + // should be provided. If ReplaceConfig does not have an info_type, the DLP + // API matches it against all info_types that are found but not specified in + // another ReplaceConfig. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Content replacing sensitive information of given type. Max 256 chars. + ReplaceWith string `protobuf:"bytes,2,opt,name=replace_with,json=replaceWith" json:"replace_with,omitempty"` +} + +func (m *RedactContentRequest_ReplaceConfig) Reset() { *m = RedactContentRequest_ReplaceConfig{} } +func (m *RedactContentRequest_ReplaceConfig) String() string { return proto.CompactTextString(m) } +func (*RedactContentRequest_ReplaceConfig) ProtoMessage() {} +func (*RedactContentRequest_ReplaceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{10, 0} +} + +func (m *RedactContentRequest_ReplaceConfig) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *RedactContentRequest_ReplaceConfig) GetReplaceWith() string { + if m != nil { + return m.ReplaceWith + } + return "" +} + +// Configuration for determing how redaction of images should occur. +type RedactContentRequest_ImageRedactionConfig struct { + // Type of information to redact from images. + // + // Types that are valid to be assigned to Target: + // *RedactContentRequest_ImageRedactionConfig_InfoType + // *RedactContentRequest_ImageRedactionConfig_RedactAllText + Target isRedactContentRequest_ImageRedactionConfig_Target `protobuf_oneof:"target"` + // The color to use when redacting content from an image. If not specified, + // the default is black. + RedactionColor *Color `protobuf:"bytes,3,opt,name=redaction_color,json=redactionColor" json:"redaction_color,omitempty"` +} + +func (m *RedactContentRequest_ImageRedactionConfig) Reset() { + *m = RedactContentRequest_ImageRedactionConfig{} +} +func (m *RedactContentRequest_ImageRedactionConfig) String() string { return proto.CompactTextString(m) } +func (*RedactContentRequest_ImageRedactionConfig) ProtoMessage() {} +func (*RedactContentRequest_ImageRedactionConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{10, 1} +} + +type isRedactContentRequest_ImageRedactionConfig_Target interface { + isRedactContentRequest_ImageRedactionConfig_Target() +} + +type RedactContentRequest_ImageRedactionConfig_InfoType struct { + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType,oneof"` +} +type RedactContentRequest_ImageRedactionConfig_RedactAllText struct { + RedactAllText bool `protobuf:"varint,2,opt,name=redact_all_text,json=redactAllText,oneof"` +} + +func (*RedactContentRequest_ImageRedactionConfig_InfoType) isRedactContentRequest_ImageRedactionConfig_Target() { +} +func (*RedactContentRequest_ImageRedactionConfig_RedactAllText) isRedactContentRequest_ImageRedactionConfig_Target() { +} + +func (m *RedactContentRequest_ImageRedactionConfig) GetTarget() isRedactContentRequest_ImageRedactionConfig_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RedactContentRequest_ImageRedactionConfig) GetInfoType() *InfoType { + if x, ok := m.GetTarget().(*RedactContentRequest_ImageRedactionConfig_InfoType); ok { + return x.InfoType + } + return nil +} + +func (m *RedactContentRequest_ImageRedactionConfig) GetRedactAllText() bool { + if x, ok := m.GetTarget().(*RedactContentRequest_ImageRedactionConfig_RedactAllText); ok { + return x.RedactAllText + } + return false +} + +func (m *RedactContentRequest_ImageRedactionConfig) GetRedactionColor() *Color { + if m != nil { + return m.RedactionColor + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RedactContentRequest_ImageRedactionConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RedactContentRequest_ImageRedactionConfig_OneofMarshaler, _RedactContentRequest_ImageRedactionConfig_OneofUnmarshaler, _RedactContentRequest_ImageRedactionConfig_OneofSizer, []interface{}{ + (*RedactContentRequest_ImageRedactionConfig_InfoType)(nil), + (*RedactContentRequest_ImageRedactionConfig_RedactAllText)(nil), + } +} + +func _RedactContentRequest_ImageRedactionConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RedactContentRequest_ImageRedactionConfig) + // target + switch x := m.Target.(type) { + case *RedactContentRequest_ImageRedactionConfig_InfoType: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InfoType); err != nil { + return err + } + case *RedactContentRequest_ImageRedactionConfig_RedactAllText: + t := uint64(0) + if x.RedactAllText { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("RedactContentRequest_ImageRedactionConfig.Target has unexpected type %T", x) + } + return nil +} + +func _RedactContentRequest_ImageRedactionConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RedactContentRequest_ImageRedactionConfig) + switch tag { + case 1: // target.info_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InfoType) + err := b.DecodeMessage(msg) + m.Target = &RedactContentRequest_ImageRedactionConfig_InfoType{msg} + return true, err + case 2: // target.redact_all_text + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Target = &RedactContentRequest_ImageRedactionConfig_RedactAllText{x != 0} + return true, err + default: + return false, nil + } +} + +func _RedactContentRequest_ImageRedactionConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RedactContentRequest_ImageRedactionConfig) + // target + switch x := m.Target.(type) { + case *RedactContentRequest_ImageRedactionConfig_InfoType: + s := proto.Size(x.InfoType) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RedactContentRequest_ImageRedactionConfig_RedactAllText: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a color in the RGB color space. +type Color struct { + // The amount of red in the color as a value in the interval [0, 1]. + Red float32 `protobuf:"fixed32,1,opt,name=red" json:"red,omitempty"` + // The amount of green in the color as a value in the interval [0, 1]. + Green float32 `protobuf:"fixed32,2,opt,name=green" json:"green,omitempty"` + // The amount of blue in the color as a value in the interval [0, 1]. + Blue float32 `protobuf:"fixed32,3,opt,name=blue" json:"blue,omitempty"` +} + +func (m *Color) Reset() { *m = Color{} } +func (m *Color) String() string { return proto.CompactTextString(m) } +func (*Color) ProtoMessage() {} +func (*Color) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Color) GetRed() float32 { + if m != nil { + return m.Red + } + return 0 +} + +func (m *Color) GetGreen() float32 { + if m != nil { + return m.Green + } + return 0 +} + +func (m *Color) GetBlue() float32 { + if m != nil { + return m.Blue + } + return 0 +} + +// Results of redacting a list of items. +type RedactContentResponse struct { + // The redacted content. + Items []*ContentItem `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` +} + +func (m *RedactContentResponse) Reset() { *m = RedactContentResponse{} } +func (m *RedactContentResponse) String() string { return proto.CompactTextString(m) } +func (*RedactContentResponse) ProtoMessage() {} +func (*RedactContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *RedactContentResponse) GetItems() []*ContentItem { + if m != nil { + return m.Items + } + return nil +} + +// Request to search for potentially sensitive info in a list of items. +type InspectContentRequest struct { + // Configuration for the inspector. + InspectConfig *InspectConfig `protobuf:"bytes,1,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The list of items to inspect. Items in a single request are + // considered "related" unless inspect_config.independent_inputs is true. + // Up to 100 are allowed per request. + Items []*ContentItem `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` +} + +func (m *InspectContentRequest) Reset() { *m = InspectContentRequest{} } +func (m *InspectContentRequest) String() string { return proto.CompactTextString(m) } +func (*InspectContentRequest) ProtoMessage() {} +func (*InspectContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *InspectContentRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *InspectContentRequest) GetItems() []*ContentItem { + if m != nil { + return m.Items + } + return nil +} + +// Results of inspecting a list of items. +type InspectContentResponse struct { + // Each content_item from the request has a result in this list, in the + // same order as the request. + Results []*InspectResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *InspectContentResponse) Reset() { *m = InspectContentResponse{} } +func (m *InspectContentResponse) String() string { return proto.CompactTextString(m) } +func (*InspectContentResponse) ProtoMessage() {} +func (*InspectContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *InspectContentResponse) GetResults() []*InspectResult { + if m != nil { + return m.Results + } + return nil +} + +// Request for scheduling a scan of a data subset from a Google Platform data +// repository. +type CreateInspectOperationRequest struct { + // Configuration for the inspector. + InspectConfig *InspectConfig `protobuf:"bytes,1,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // Specification of the data set to process. + StorageConfig *StorageConfig `protobuf:"bytes,2,opt,name=storage_config,json=storageConfig" json:"storage_config,omitempty"` + // Optional location to store findings. The bucket must already exist and + // the Google APIs service account for DLP must have write permission to + // write to the given bucket. + //

Results are split over multiple csv files with each file name matching + // the pattern "[operation_id]_[count].csv", for example + // `3094877188788974909_1.csv`. The `operation_id` matches the + // identifier for the Operation, and the `count` is a counter used for + // tracking the number of files written.

The CSV file(s) contain the + // following columns regardless of storage type scanned:

  • id
  • info_type + //
  • likelihood
  • byte size of finding
  • quote
  • timestamp
    + //

    For Cloud Storage the next columns are:

  • file_path + //
  • start_offset
    + //

    For Cloud Datastore the next columns are:

  • project_id + //
  • namespace_id
  • path
  • column_name
  • offset
    + //

    For BigQuery the next columns are:

  • row_number
  • project_id + //
  • dataset_id
  • table_id + OutputConfig *OutputStorageConfig `protobuf:"bytes,3,opt,name=output_config,json=outputConfig" json:"output_config,omitempty"` + // Additional configuration settings for long running operations. + OperationConfig *OperationConfig `protobuf:"bytes,5,opt,name=operation_config,json=operationConfig" json:"operation_config,omitempty"` +} + +func (m *CreateInspectOperationRequest) Reset() { *m = CreateInspectOperationRequest{} } +func (m *CreateInspectOperationRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInspectOperationRequest) ProtoMessage() {} +func (*CreateInspectOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *CreateInspectOperationRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *CreateInspectOperationRequest) GetStorageConfig() *StorageConfig { + if m != nil { + return m.StorageConfig + } + return nil +} + +func (m *CreateInspectOperationRequest) GetOutputConfig() *OutputStorageConfig { + if m != nil { + return m.OutputConfig + } + return nil +} + +func (m *CreateInspectOperationRequest) GetOperationConfig() *OperationConfig { + if m != nil { + return m.OperationConfig + } + return nil +} + +// Cloud repository for storing output. +type OutputStorageConfig struct { + // Types that are valid to be assigned to Type: + // *OutputStorageConfig_Table + // *OutputStorageConfig_StoragePath + Type isOutputStorageConfig_Type `protobuf_oneof:"type"` +} + +func (m *OutputStorageConfig) Reset() { *m = OutputStorageConfig{} } +func (m *OutputStorageConfig) String() string { return proto.CompactTextString(m) } +func (*OutputStorageConfig) ProtoMessage() {} +func (*OutputStorageConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +type isOutputStorageConfig_Type interface { + isOutputStorageConfig_Type() +} + +type OutputStorageConfig_Table struct { + Table *BigQueryTable `protobuf:"bytes,1,opt,name=table,oneof"` +} +type OutputStorageConfig_StoragePath struct { + StoragePath *CloudStoragePath `protobuf:"bytes,2,opt,name=storage_path,json=storagePath,oneof"` +} + +func (*OutputStorageConfig_Table) isOutputStorageConfig_Type() {} +func (*OutputStorageConfig_StoragePath) isOutputStorageConfig_Type() {} + +func (m *OutputStorageConfig) GetType() isOutputStorageConfig_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *OutputStorageConfig) GetTable() *BigQueryTable { + if x, ok := m.GetType().(*OutputStorageConfig_Table); ok { + return x.Table + } + return nil +} + +func (m *OutputStorageConfig) GetStoragePath() *CloudStoragePath { + if x, ok := m.GetType().(*OutputStorageConfig_StoragePath); ok { + return x.StoragePath + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*OutputStorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _OutputStorageConfig_OneofMarshaler, _OutputStorageConfig_OneofUnmarshaler, _OutputStorageConfig_OneofSizer, []interface{}{ + (*OutputStorageConfig_Table)(nil), + (*OutputStorageConfig_StoragePath)(nil), + } +} + +func _OutputStorageConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*OutputStorageConfig) + // type + switch x := m.Type.(type) { + case *OutputStorageConfig_Table: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Table); err != nil { + return err + } + case *OutputStorageConfig_StoragePath: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StoragePath); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OutputStorageConfig.Type has unexpected type %T", x) + } + return nil +} + +func _OutputStorageConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*OutputStorageConfig) + switch tag { + case 1: // type.table + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryTable) + err := b.DecodeMessage(msg) + m.Type = &OutputStorageConfig_Table{msg} + return true, err + case 2: // type.storage_path + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudStoragePath) + err := b.DecodeMessage(msg) + m.Type = &OutputStorageConfig_StoragePath{msg} + return true, err + default: + return false, nil + } +} + +func _OutputStorageConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*OutputStorageConfig) + // type + switch x := m.Type.(type) { + case *OutputStorageConfig_Table: + s := proto.Size(x.Table) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OutputStorageConfig_StoragePath: + s := proto.Size(x.StoragePath) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Statistics regarding a specific InfoType. +type InfoTypeStatistics struct { + // The type of finding this stat is for. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Number of findings for this info type. + Count int64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` +} + +func (m *InfoTypeStatistics) Reset() { *m = InfoTypeStatistics{} } +func (m *InfoTypeStatistics) String() string { return proto.CompactTextString(m) } +func (*InfoTypeStatistics) ProtoMessage() {} +func (*InfoTypeStatistics) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *InfoTypeStatistics) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *InfoTypeStatistics) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +// Metadata returned within GetOperation for an inspect request. +type InspectOperationMetadata struct { + // Total size in bytes that were processed. + ProcessedBytes int64 `protobuf:"varint,1,opt,name=processed_bytes,json=processedBytes" json:"processed_bytes,omitempty"` + // Estimate of the number of bytes to process. + TotalEstimatedBytes int64 `protobuf:"varint,4,opt,name=total_estimated_bytes,json=totalEstimatedBytes" json:"total_estimated_bytes,omitempty"` + InfoTypeStats []*InfoTypeStatistics `protobuf:"bytes,2,rep,name=info_type_stats,json=infoTypeStats" json:"info_type_stats,omitempty"` + // The time which this request was started. + CreateTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The inspect config used to create the Operation. + RequestInspectConfig *InspectConfig `protobuf:"bytes,5,opt,name=request_inspect_config,json=requestInspectConfig" json:"request_inspect_config,omitempty"` + // The storage config used to create the Operation. + RequestStorageConfig *StorageConfig `protobuf:"bytes,6,opt,name=request_storage_config,json=requestStorageConfig" json:"request_storage_config,omitempty"` + // Optional location to store findings. + RequestOutputConfig *OutputStorageConfig `protobuf:"bytes,7,opt,name=request_output_config,json=requestOutputConfig" json:"request_output_config,omitempty"` +} + +func (m *InspectOperationMetadata) Reset() { *m = InspectOperationMetadata{} } +func (m *InspectOperationMetadata) String() string { return proto.CompactTextString(m) } +func (*InspectOperationMetadata) ProtoMessage() {} +func (*InspectOperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *InspectOperationMetadata) GetProcessedBytes() int64 { + if m != nil { + return m.ProcessedBytes + } + return 0 +} + +func (m *InspectOperationMetadata) GetTotalEstimatedBytes() int64 { + if m != nil { + return m.TotalEstimatedBytes + } + return 0 +} + +func (m *InspectOperationMetadata) GetInfoTypeStats() []*InfoTypeStatistics { + if m != nil { + return m.InfoTypeStats + } + return nil +} + +func (m *InspectOperationMetadata) GetCreateTime() *google_protobuf3.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *InspectOperationMetadata) GetRequestInspectConfig() *InspectConfig { + if m != nil { + return m.RequestInspectConfig + } + return nil +} + +func (m *InspectOperationMetadata) GetRequestStorageConfig() *StorageConfig { + if m != nil { + return m.RequestStorageConfig + } + return nil +} + +func (m *InspectOperationMetadata) GetRequestOutputConfig() *OutputStorageConfig { + if m != nil { + return m.RequestOutputConfig + } + return nil +} + +// The operational data. +type InspectOperationResult struct { + // The server-assigned name, which is only unique within the same service that + // originally returns it. If you use the default HTTP mapping, the + // `name` should have the format of `inspect/results/{id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *InspectOperationResult) Reset() { *m = InspectOperationResult{} } +func (m *InspectOperationResult) String() string { return proto.CompactTextString(m) } +func (*InspectOperationResult) ProtoMessage() {} +func (*InspectOperationResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *InspectOperationResult) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for the list of results in a given inspect operation. +type ListInspectFindingsRequest struct { + // Identifier of the results set returned as metadata of + // the longrunning operation created by a call to CreateInspectOperation. + // Should be in the format of `inspect/results/{id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Maximum number of results to return. + // If 0, the implementation selects a reasonable value. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListInspectFindingsResponse`; indicates + // that this is a continuation of a prior `ListInspectFindings` call, and that + // the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Restricts findings to items that match. Supports info_type and likelihood. + //

    Examples:
    + //

  • info_type=EMAIL_ADDRESS + //
  • info_type=PHONE_NUMBER,EMAIL_ADDRESS + //
  • likelihood=VERY_LIKELY + //
  • likelihood=VERY_LIKELY,LIKELY + //
  • info_type=EMAIL_ADDRESS,likelihood=VERY_LIKELY,LIKELY + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListInspectFindingsRequest) Reset() { *m = ListInspectFindingsRequest{} } +func (m *ListInspectFindingsRequest) String() string { return proto.CompactTextString(m) } +func (*ListInspectFindingsRequest) ProtoMessage() {} +func (*ListInspectFindingsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ListInspectFindingsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListInspectFindingsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInspectFindingsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListInspectFindingsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Response to the ListInspectFindings request. +type ListInspectFindingsResponse struct { + // The results. + Result *InspectResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` + // If not empty, indicates that there may be more results that match the + // request; this value should be passed in a new `ListInspectFindingsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInspectFindingsResponse) Reset() { *m = ListInspectFindingsResponse{} } +func (m *ListInspectFindingsResponse) String() string { return proto.CompactTextString(m) } +func (*ListInspectFindingsResponse) ProtoMessage() {} +func (*ListInspectFindingsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *ListInspectFindingsResponse) GetResult() *InspectResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *ListInspectFindingsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Info type description. +type InfoTypeDescription struct { + // Internal name of the info type. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Human readable form of the info type name. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // List of categories this info type belongs to. + Categories []*CategoryDescription `protobuf:"bytes,3,rep,name=categories" json:"categories,omitempty"` +} + +func (m *InfoTypeDescription) Reset() { *m = InfoTypeDescription{} } +func (m *InfoTypeDescription) String() string { return proto.CompactTextString(m) } +func (*InfoTypeDescription) ProtoMessage() {} +func (*InfoTypeDescription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *InfoTypeDescription) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InfoTypeDescription) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *InfoTypeDescription) GetCategories() []*CategoryDescription { + if m != nil { + return m.Categories + } + return nil +} + +// Request for the list of info types belonging to a given category, +// or all supported info types if no category is specified. +type ListInfoTypesRequest struct { + // Category name as returned by ListRootCategories. + Category string `protobuf:"bytes,1,opt,name=category" json:"category,omitempty"` + // Optional BCP-47 language code for localized info type friendly + // names. If omitted, or if localized strings are not available, + // en-US strings will be returned. + LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` +} + +func (m *ListInfoTypesRequest) Reset() { *m = ListInfoTypesRequest{} } +func (m *ListInfoTypesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInfoTypesRequest) ProtoMessage() {} +func (*ListInfoTypesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *ListInfoTypesRequest) GetCategory() string { + if m != nil { + return m.Category + } + return "" +} + +func (m *ListInfoTypesRequest) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +// Response to the ListInfoTypes request. +type ListInfoTypesResponse struct { + // Set of sensitive info types belonging to a category. + InfoTypes []*InfoTypeDescription `protobuf:"bytes,1,rep,name=info_types,json=infoTypes" json:"info_types,omitempty"` +} + +func (m *ListInfoTypesResponse) Reset() { *m = ListInfoTypesResponse{} } +func (m *ListInfoTypesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInfoTypesResponse) ProtoMessage() {} +func (*ListInfoTypesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *ListInfoTypesResponse) GetInfoTypes() []*InfoTypeDescription { + if m != nil { + return m.InfoTypes + } + return nil +} + +// Info Type Category description. +type CategoryDescription struct { + // Internal name of the category. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Human readable form of the category name. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *CategoryDescription) Reset() { *m = CategoryDescription{} } +func (m *CategoryDescription) String() string { return proto.CompactTextString(m) } +func (*CategoryDescription) ProtoMessage() {} +func (*CategoryDescription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *CategoryDescription) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CategoryDescription) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// Request for root categories of Info Types supported by the API. +// Example values might include "FINANCE", "HEALTH", "FAST", "DEFAULT". +type ListRootCategoriesRequest struct { + // Optional language code for localized friendly category names. + // If omitted or if localized strings are not available, + // en-US strings will be returned. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` +} + +func (m *ListRootCategoriesRequest) Reset() { *m = ListRootCategoriesRequest{} } +func (m *ListRootCategoriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListRootCategoriesRequest) ProtoMessage() {} +func (*ListRootCategoriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *ListRootCategoriesRequest) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +// Response for ListRootCategories request. +type ListRootCategoriesResponse struct { + // List of all into type categories supported by the API. + Categories []*CategoryDescription `protobuf:"bytes,1,rep,name=categories" json:"categories,omitempty"` +} + +func (m *ListRootCategoriesResponse) Reset() { *m = ListRootCategoriesResponse{} } +func (m *ListRootCategoriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListRootCategoriesResponse) ProtoMessage() {} +func (*ListRootCategoriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *ListRootCategoriesResponse) GetCategories() []*CategoryDescription { + if m != nil { + return m.Categories + } + return nil +} + +// Set of primitive values supported by the system. +type Value struct { + // Types that are valid to be assigned to Type: + // *Value_IntegerValue + // *Value_FloatValue + // *Value_StringValue + // *Value_BooleanValue + // *Value_TimestampValue + // *Value_TimeValue + // *Value_DateValue + Type isValue_Type `protobuf_oneof:"type"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type isValue_Type interface { + isValue_Type() +} + +type Value_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,1,opt,name=integer_value,json=integerValue,oneof"` +} +type Value_FloatValue struct { + FloatValue float64 `protobuf:"fixed64,2,opt,name=float_value,json=floatValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BooleanValue struct { + BooleanValue bool `protobuf:"varint,4,opt,name=boolean_value,json=booleanValue,oneof"` +} +type Value_TimestampValue struct { + TimestampValue *google_protobuf3.Timestamp `protobuf:"bytes,5,opt,name=timestamp_value,json=timestampValue,oneof"` +} +type Value_TimeValue struct { + TimeValue *google_type1.TimeOfDay `protobuf:"bytes,6,opt,name=time_value,json=timeValue,oneof"` +} +type Value_DateValue struct { + DateValue *google_type.Date `protobuf:"bytes,7,opt,name=date_value,json=dateValue,oneof"` +} + +func (*Value_IntegerValue) isValue_Type() {} +func (*Value_FloatValue) isValue_Type() {} +func (*Value_StringValue) isValue_Type() {} +func (*Value_BooleanValue) isValue_Type() {} +func (*Value_TimestampValue) isValue_Type() {} +func (*Value_TimeValue) isValue_Type() {} +func (*Value_DateValue) isValue_Type() {} + +func (m *Value) GetType() isValue_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *Value) GetIntegerValue() int64 { + if x, ok := m.GetType().(*Value_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Value) GetFloatValue() float64 { + if x, ok := m.GetType().(*Value_FloatValue); ok { + return x.FloatValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetType().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBooleanValue() bool { + if x, ok := m.GetType().(*Value_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Value) GetTimestampValue() *google_protobuf3.Timestamp { + if x, ok := m.GetType().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (m *Value) GetTimeValue() *google_type1.TimeOfDay { + if x, ok := m.GetType().(*Value_TimeValue); ok { + return x.TimeValue + } + return nil +} + +func (m *Value) GetDateValue() *google_type.Date { + if x, ok := m.GetType().(*Value_DateValue); ok { + return x.DateValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_IntegerValue)(nil), + (*Value_FloatValue)(nil), + (*Value_StringValue)(nil), + (*Value_BooleanValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_TimeValue)(nil), + (*Value_DateValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // type + switch x := m.Type.(type) { + case *Value_IntegerValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntegerValue)) + case *Value_FloatValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.FloatValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_TimestampValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampValue); err != nil { + return err + } + case *Value_TimeValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimeValue); err != nil { + return err + } + case *Value_DateValue: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Type has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // type.integer_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_IntegerValue{int64(x)} + return true, err + case 2: // type.float_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Type = &Value_FloatValue{math.Float64frombits(x)} + return true, err + case 3: // type.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Type = &Value_StringValue{x} + return true, err + case 4: // type.boolean_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_BooleanValue{x != 0} + return true, err + case 5: // type.timestamp_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Timestamp) + err := b.DecodeMessage(msg) + m.Type = &Value_TimestampValue{msg} + return true, err + case 6: // type.time_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type1.TimeOfDay) + err := b.DecodeMessage(msg) + m.Type = &Value_TimeValue{msg} + return true, err + case 7: // type.date_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.Date) + err := b.DecodeMessage(msg) + m.Type = &Value_DateValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // type + switch x := m.Type.(type) { + case *Value_IntegerValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Value_FloatValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BooleanValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_TimestampValue: + s := proto.Size(x.TimestampValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_TimeValue: + s := proto.Size(x.TimeValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_DateValue: + s := proto.Size(x.DateValue) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*InspectConfig)(nil), "google.privacy.dlp.v2beta1.InspectConfig") + proto.RegisterType((*InspectConfig_InfoTypeLimit)(nil), "google.privacy.dlp.v2beta1.InspectConfig.InfoTypeLimit") + proto.RegisterType((*OperationConfig)(nil), "google.privacy.dlp.v2beta1.OperationConfig") + proto.RegisterType((*ContentItem)(nil), "google.privacy.dlp.v2beta1.ContentItem") + proto.RegisterType((*Table)(nil), "google.privacy.dlp.v2beta1.Table") + proto.RegisterType((*Table_Row)(nil), "google.privacy.dlp.v2beta1.Table.Row") + proto.RegisterType((*InspectResult)(nil), "google.privacy.dlp.v2beta1.InspectResult") + proto.RegisterType((*Finding)(nil), "google.privacy.dlp.v2beta1.Finding") + proto.RegisterType((*Location)(nil), "google.privacy.dlp.v2beta1.Location") + proto.RegisterType((*TableLocation)(nil), "google.privacy.dlp.v2beta1.TableLocation") + proto.RegisterType((*Range)(nil), "google.privacy.dlp.v2beta1.Range") + proto.RegisterType((*ImageLocation)(nil), "google.privacy.dlp.v2beta1.ImageLocation") + proto.RegisterType((*RedactContentRequest)(nil), "google.privacy.dlp.v2beta1.RedactContentRequest") + proto.RegisterType((*RedactContentRequest_ReplaceConfig)(nil), "google.privacy.dlp.v2beta1.RedactContentRequest.ReplaceConfig") + proto.RegisterType((*RedactContentRequest_ImageRedactionConfig)(nil), "google.privacy.dlp.v2beta1.RedactContentRequest.ImageRedactionConfig") + proto.RegisterType((*Color)(nil), "google.privacy.dlp.v2beta1.Color") + proto.RegisterType((*RedactContentResponse)(nil), "google.privacy.dlp.v2beta1.RedactContentResponse") + proto.RegisterType((*InspectContentRequest)(nil), "google.privacy.dlp.v2beta1.InspectContentRequest") + proto.RegisterType((*InspectContentResponse)(nil), "google.privacy.dlp.v2beta1.InspectContentResponse") + proto.RegisterType((*CreateInspectOperationRequest)(nil), "google.privacy.dlp.v2beta1.CreateInspectOperationRequest") + proto.RegisterType((*OutputStorageConfig)(nil), "google.privacy.dlp.v2beta1.OutputStorageConfig") + proto.RegisterType((*InfoTypeStatistics)(nil), "google.privacy.dlp.v2beta1.InfoTypeStatistics") + proto.RegisterType((*InspectOperationMetadata)(nil), "google.privacy.dlp.v2beta1.InspectOperationMetadata") + proto.RegisterType((*InspectOperationResult)(nil), "google.privacy.dlp.v2beta1.InspectOperationResult") + proto.RegisterType((*ListInspectFindingsRequest)(nil), "google.privacy.dlp.v2beta1.ListInspectFindingsRequest") + proto.RegisterType((*ListInspectFindingsResponse)(nil), "google.privacy.dlp.v2beta1.ListInspectFindingsResponse") + proto.RegisterType((*InfoTypeDescription)(nil), "google.privacy.dlp.v2beta1.InfoTypeDescription") + proto.RegisterType((*ListInfoTypesRequest)(nil), "google.privacy.dlp.v2beta1.ListInfoTypesRequest") + proto.RegisterType((*ListInfoTypesResponse)(nil), "google.privacy.dlp.v2beta1.ListInfoTypesResponse") + proto.RegisterType((*CategoryDescription)(nil), "google.privacy.dlp.v2beta1.CategoryDescription") + proto.RegisterType((*ListRootCategoriesRequest)(nil), "google.privacy.dlp.v2beta1.ListRootCategoriesRequest") + proto.RegisterType((*ListRootCategoriesResponse)(nil), "google.privacy.dlp.v2beta1.ListRootCategoriesResponse") + proto.RegisterType((*Value)(nil), "google.privacy.dlp.v2beta1.Value") + proto.RegisterEnum("google.privacy.dlp.v2beta1.Likelihood", Likelihood_name, Likelihood_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DlpService service + +type DlpServiceClient interface { + // Finds potentially sensitive info in a list of strings. + // This method has limits on input size, processing time, and output size. + InspectContent(ctx context.Context, in *InspectContentRequest, opts ...grpc.CallOption) (*InspectContentResponse, error) + // Redacts potentially sensitive info from a list of strings. + // This method has limits on input size, processing time, and output size. + RedactContent(ctx context.Context, in *RedactContentRequest, opts ...grpc.CallOption) (*RedactContentResponse, error) + // Schedules a job scanning content in a Google Cloud Platform data + // repository. + CreateInspectOperation(ctx context.Context, in *CreateInspectOperationRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Returns list of results for given inspect operation result set id. + ListInspectFindings(ctx context.Context, in *ListInspectFindingsRequest, opts ...grpc.CallOption) (*ListInspectFindingsResponse, error) + // Returns sensitive information types for given category. + ListInfoTypes(ctx context.Context, in *ListInfoTypesRequest, opts ...grpc.CallOption) (*ListInfoTypesResponse, error) + // Returns the list of root categories of sensitive information. + ListRootCategories(ctx context.Context, in *ListRootCategoriesRequest, opts ...grpc.CallOption) (*ListRootCategoriesResponse, error) +} + +type dlpServiceClient struct { + cc *grpc.ClientConn +} + +func NewDlpServiceClient(cc *grpc.ClientConn) DlpServiceClient { + return &dlpServiceClient{cc} +} + +func (c *dlpServiceClient) InspectContent(ctx context.Context, in *InspectContentRequest, opts ...grpc.CallOption) (*InspectContentResponse, error) { + out := new(InspectContentResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/InspectContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) RedactContent(ctx context.Context, in *RedactContentRequest, opts ...grpc.CallOption) (*RedactContentResponse, error) { + out := new(RedactContentResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/RedactContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CreateInspectOperation(ctx context.Context, in *CreateInspectOperationRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/CreateInspectOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListInspectFindings(ctx context.Context, in *ListInspectFindingsRequest, opts ...grpc.CallOption) (*ListInspectFindingsResponse, error) { + out := new(ListInspectFindingsResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/ListInspectFindings", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListInfoTypes(ctx context.Context, in *ListInfoTypesRequest, opts ...grpc.CallOption) (*ListInfoTypesResponse, error) { + out := new(ListInfoTypesResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/ListInfoTypes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListRootCategories(ctx context.Context, in *ListRootCategoriesRequest, opts ...grpc.CallOption) (*ListRootCategoriesResponse, error) { + out := new(ListRootCategoriesResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2beta1.DlpService/ListRootCategories", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DlpService service + +type DlpServiceServer interface { + // Finds potentially sensitive info in a list of strings. + // This method has limits on input size, processing time, and output size. + InspectContent(context.Context, *InspectContentRequest) (*InspectContentResponse, error) + // Redacts potentially sensitive info from a list of strings. + // This method has limits on input size, processing time, and output size. + RedactContent(context.Context, *RedactContentRequest) (*RedactContentResponse, error) + // Schedules a job scanning content in a Google Cloud Platform data + // repository. + CreateInspectOperation(context.Context, *CreateInspectOperationRequest) (*google_longrunning.Operation, error) + // Returns list of results for given inspect operation result set id. + ListInspectFindings(context.Context, *ListInspectFindingsRequest) (*ListInspectFindingsResponse, error) + // Returns sensitive information types for given category. + ListInfoTypes(context.Context, *ListInfoTypesRequest) (*ListInfoTypesResponse, error) + // Returns the list of root categories of sensitive information. + ListRootCategories(context.Context, *ListRootCategoriesRequest) (*ListRootCategoriesResponse, error) +} + +func RegisterDlpServiceServer(s *grpc.Server, srv DlpServiceServer) { + s.RegisterService(&_DlpService_serviceDesc, srv) +} + +func _DlpService_InspectContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InspectContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).InspectContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/InspectContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).InspectContent(ctx, req.(*InspectContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_RedactContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RedactContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).RedactContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/RedactContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).RedactContent(ctx, req.(*RedactContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CreateInspectOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInspectOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateInspectOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/CreateInspectOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateInspectOperation(ctx, req.(*CreateInspectOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListInspectFindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInspectFindingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListInspectFindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/ListInspectFindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListInspectFindings(ctx, req.(*ListInspectFindingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListInfoTypes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInfoTypesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListInfoTypes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/ListInfoTypes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListInfoTypes(ctx, req.(*ListInfoTypesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListRootCategories_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRootCategoriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListRootCategories(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2beta1.DlpService/ListRootCategories", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListRootCategories(ctx, req.(*ListRootCategoriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DlpService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.privacy.dlp.v2beta1.DlpService", + HandlerType: (*DlpServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "InspectContent", + Handler: _DlpService_InspectContent_Handler, + }, + { + MethodName: "RedactContent", + Handler: _DlpService_RedactContent_Handler, + }, + { + MethodName: "CreateInspectOperation", + Handler: _DlpService_CreateInspectOperation_Handler, + }, + { + MethodName: "ListInspectFindings", + Handler: _DlpService_ListInspectFindings_Handler, + }, + { + MethodName: "ListInfoTypes", + Handler: _DlpService_ListInfoTypes_Handler, + }, + { + MethodName: "ListRootCategories", + Handler: _DlpService_ListRootCategories_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/privacy/dlp/v2beta1/dlp.proto", +} + +func init() { proto.RegisterFile("google/privacy/dlp/v2beta1/dlp.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2313 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x52, 0xa2, 0x44, 0x3e, 0x8a, 0x92, 0x3c, 0xfa, 0x61, 0x85, 0x8e, 0xbf, 0x91, 0x57, + 0x89, 0x23, 0xeb, 0xeb, 0x92, 0x31, 0x8b, 0xda, 0x70, 0x02, 0xa5, 0xb6, 0x28, 0xb9, 0x52, 0xa3, + 0x58, 0xf2, 0x48, 0x51, 0x90, 0x02, 0xc5, 0x62, 0xc5, 0x1d, 0x51, 0x03, 0x2f, 0x77, 0x36, 0xbb, + 0x43, 0x4b, 0x4c, 0x60, 0x14, 0x28, 0x8a, 0xa2, 0xf7, 0x1e, 0x5a, 0x14, 0xed, 0xad, 0x87, 0xb6, + 0xe8, 0x2d, 0xb7, 0xa2, 0xb7, 0xfe, 0x09, 0x3d, 0x15, 0xbd, 0xe6, 0xd4, 0x5b, 0x2f, 0x3d, 0x17, + 0xf3, 0x6b, 0xb9, 0x94, 0xe8, 0x95, 0xe8, 0xa6, 0x40, 0x6f, 0x33, 0x6f, 0xde, 0xe7, 0xcd, 0x9b, + 0x37, 0xef, 0xd7, 0xec, 0xc2, 0xdb, 0x2d, 0xc6, 0x5a, 0x3e, 0xa9, 0x85, 0x11, 0x7d, 0xe1, 0x36, + 0xbb, 0x35, 0xcf, 0x0f, 0x6b, 0x2f, 0xea, 0x47, 0x84, 0xbb, 0xf7, 0xc4, 0xb8, 0x1a, 0x46, 0x8c, + 0x33, 0x54, 0x51, 0x5c, 0x55, 0xcd, 0x55, 0x15, 0x2b, 0x9a, 0xab, 0xf2, 0xa6, 0x96, 0xe0, 0x86, + 0xb4, 0xe6, 0x06, 0x01, 0xe3, 0x2e, 0xa7, 0x2c, 0x88, 0x15, 0xb2, 0xb2, 0xac, 0x57, 0x7d, 0x16, + 0xb4, 0xa2, 0x4e, 0x10, 0xd0, 0xa0, 0x55, 0x63, 0x21, 0x89, 0xfa, 0x98, 0x56, 0x32, 0x94, 0x88, + 0x39, 0x8b, 0xdc, 0x16, 0xd1, 0x9c, 0x37, 0x12, 0x4e, 0xc6, 0xd9, 0x51, 0xe7, 0xb8, 0x46, 0xda, + 0x21, 0xef, 0xea, 0xc5, 0xb7, 0xce, 0x2f, 0x72, 0xda, 0x26, 0x31, 0x77, 0xdb, 0xfa, 0x18, 0x95, + 0x05, 0xcd, 0xc0, 0xbb, 0x21, 0xa9, 0x79, 0x2e, 0x3f, 0x2f, 0x55, 0xd2, 0x05, 0x88, 0x1d, 0x7b, + 0xae, 0x96, 0x6a, 0xff, 0x63, 0x14, 0xca, 0xdb, 0x41, 0x1c, 0x92, 0x26, 0x6f, 0xb0, 0xe0, 0x98, + 0xb6, 0x50, 0x03, 0x80, 0x06, 0xc7, 0xcc, 0x11, 0xec, 0xf1, 0xa2, 0xb5, 0x34, 0xba, 0x52, 0xaa, + 0xbf, 0x5d, 0x7d, 0xb5, 0x89, 0xaa, 0xdb, 0xc1, 0x31, 0x3b, 0xe8, 0x86, 0x04, 0x17, 0xa9, 0x1e, + 0xc5, 0xe8, 0x63, 0x98, 0x6a, 0xd3, 0xc0, 0xf1, 0xe9, 0x73, 0xe2, 0xd3, 0x13, 0xc6, 0xbc, 0xc5, + 0xdc, 0x92, 0xb5, 0x32, 0x55, 0xbf, 0x9d, 0x25, 0x68, 0x27, 0xe1, 0xc6, 0xe5, 0x36, 0x0d, 0x7a, + 0x53, 0x74, 0x0b, 0x26, 0xdb, 0xee, 0x99, 0x73, 0x4c, 0x03, 0x8f, 0x06, 0xad, 0x78, 0x71, 0x74, + 0xc9, 0x5a, 0xc9, 0xe3, 0x52, 0xdb, 0x3d, 0x7b, 0xa2, 0x49, 0x68, 0x19, 0xca, 0x34, 0x68, 0xfa, + 0x1d, 0x8f, 0x38, 0x9f, 0x77, 0x18, 0x27, 0x8b, 0x63, 0x4b, 0xd6, 0x4a, 0x01, 0x4f, 0x6a, 0xe2, + 0x33, 0x41, 0x13, 0x4c, 0xe4, 0x4c, 0x31, 0xa9, 0xe3, 0x8d, 0x2b, 0x26, 0x4d, 0x54, 0xba, 0xbb, + 0x30, 0x93, 0x18, 0xc0, 0xf1, 0x69, 0x9b, 0xf2, 0x78, 0x71, 0x42, 0x9a, 0xe1, 0x41, 0xb6, 0x19, + 0x52, 0x56, 0x4c, 0x8c, 0xb2, 0x23, 0xf0, 0x78, 0x8a, 0xa6, 0xa7, 0x71, 0xa5, 0x23, 0x8c, 0x9e, + 0xa2, 0xa0, 0xc7, 0x50, 0x4c, 0xf6, 0x5c, 0xb4, 0x96, 0xac, 0x2b, 0xdb, 0xbc, 0x60, 0x24, 0x5f, + 0xb0, 0x51, 0xee, 0x82, 0x8d, 0xec, 0x35, 0x98, 0xde, 0x35, 0xde, 0xa9, 0x6f, 0x7b, 0x15, 0xae, + 0x09, 0x14, 0xe5, 0xa4, 0xdd, 0x83, 0x0a, 0x05, 0x46, 0xf1, 0x74, 0xdb, 0x3d, 0xdb, 0xe6, 0xa4, + 0x9d, 0xc0, 0x7f, 0x61, 0x41, 0xa9, 0xc1, 0x02, 0x4e, 0x02, 0x2e, 0xe8, 0x08, 0xc1, 0x58, 0xa2, + 0x6f, 0x11, 0xcb, 0x31, 0x9a, 0x83, 0x31, 0xcf, 0xe5, 0xae, 0xdc, 0x7d, 0x72, 0x6b, 0x04, 0xcb, + 0x19, 0x5a, 0x80, 0xfc, 0x0b, 0xd7, 0xef, 0x10, 0x79, 0x71, 0xc5, 0xad, 0x11, 0xac, 0xa6, 0xe8, + 0x21, 0xe4, 0xb9, 0x7b, 0xe4, 0xab, 0xcb, 0x2a, 0xd5, 0x6f, 0x65, 0x1d, 0xf9, 0x40, 0x30, 0x0a, + 0xa8, 0x44, 0xac, 0x97, 0xa0, 0x28, 0x44, 0x4b, 0xcd, 0xed, 0xbf, 0x58, 0x90, 0x97, 0xeb, 0x68, + 0x0d, 0x26, 0x4e, 0x88, 0xeb, 0x91, 0xc8, 0xb8, 0xee, 0x72, 0x96, 0xcc, 0x27, 0x94, 0xf8, 0xde, + 0xb6, 0x87, 0x0d, 0x06, 0x3d, 0x84, 0xb1, 0x88, 0x9d, 0x0a, 0xe3, 0x09, 0xec, 0x3b, 0x97, 0xea, + 0x53, 0xc5, 0xec, 0x14, 0x4b, 0x48, 0xe5, 0x11, 0x8c, 0x62, 0x76, 0x8a, 0x1e, 0xc2, 0xb8, 0x3c, + 0x9b, 0xd9, 0x3f, 0xf3, 0x4c, 0x87, 0x82, 0x13, 0x6b, 0x80, 0xfd, 0xa3, 0x24, 0x14, 0x31, 0x89, + 0x3b, 0x3e, 0x47, 0xdf, 0x85, 0x42, 0xea, 0x4e, 0xae, 0x70, 0x1a, 0xc9, 0x8b, 0x13, 0x10, 0xfa, + 0x16, 0x20, 0x33, 0x76, 0x78, 0xd4, 0x09, 0x9a, 0x2e, 0x27, 0x2a, 0x14, 0x0b, 0xf8, 0x9a, 0x59, + 0x39, 0x30, 0x0b, 0xf6, 0xef, 0x73, 0x30, 0xa1, 0x85, 0xa0, 0x39, 0xc8, 0xab, 0x38, 0x52, 0xb7, + 0xab, 0x26, 0xfd, 0x7e, 0x9a, 0x7b, 0x2d, 0x3f, 0x7d, 0x02, 0x90, 0x4a, 0x0b, 0xa3, 0x43, 0xa5, + 0x85, 0x14, 0x12, 0x3d, 0x82, 0x82, 0xcf, 0x9a, 0xd2, 0x97, 0xb5, 0xfb, 0x64, 0x6a, 0xb2, 0xa3, + 0x79, 0x71, 0x82, 0x42, 0x1f, 0x40, 0xa9, 0x19, 0x11, 0x97, 0x13, 0x47, 0x64, 0x45, 0x99, 0x0b, + 0x4a, 0xf5, 0x4a, 0x4f, 0x88, 0xca, 0xb3, 0xd5, 0x03, 0x93, 0x67, 0x31, 0x28, 0x76, 0x41, 0xb0, + 0xff, 0x3c, 0x0a, 0x05, 0x23, 0x13, 0x3d, 0x02, 0x38, 0xea, 0x72, 0xe2, 0x44, 0x6e, 0xd0, 0x32, + 0xf1, 0x9b, 0x79, 0xf1, 0x58, 0x30, 0xe2, 0xa2, 0x00, 0xc9, 0x21, 0xfa, 0x3e, 0x4c, 0x37, 0x99, + 0x47, 0x42, 0x46, 0x03, 0xae, 0xc5, 0xe4, 0xae, 0x2a, 0x66, 0x2a, 0x41, 0x1a, 0x59, 0x25, 0xda, + 0x76, 0x5b, 0xc4, 0x39, 0x62, 0x67, 0x44, 0x24, 0x4b, 0xe1, 0x39, 0x77, 0x32, 0xaf, 0x49, 0xb0, + 0x27, 0x16, 0x02, 0x89, 0x5e, 0x17, 0x60, 0xb4, 0x01, 0x10, 0x91, 0x26, 0x8b, 0x3c, 0xe7, 0x39, + 0xe9, 0x6a, 0x3b, 0x67, 0x86, 0x05, 0x96, 0xdc, 0x1f, 0x91, 0x2e, 0x2e, 0x46, 0x66, 0x88, 0x3e, + 0x14, 0x8e, 0x4c, 0x7c, 0xcf, 0xa1, 0xde, 0x62, 0x5e, 0xca, 0xb8, 0x5a, 0x58, 0x1e, 0xab, 0x01, + 0xda, 0x83, 0x29, 0x19, 0xf5, 0x4e, 0x72, 0xe3, 0xea, 0xb2, 0xee, 0x5c, 0x1a, 0xa0, 0xc9, 0xa1, + 0xca, 0x3c, 0x3d, 0xb5, 0xef, 0x42, 0xb9, 0x6f, 0x1d, 0xdd, 0x80, 0x62, 0xc4, 0x4e, 0x1d, 0x1a, + 0x78, 0xe4, 0x4c, 0x27, 0xc0, 0x42, 0xc4, 0x4e, 0xb7, 0xc5, 0xdc, 0xae, 0x41, 0x5e, 0x99, 0x76, + 0x0e, 0xf2, 0x31, 0x77, 0x23, 0xae, 0x39, 0xd4, 0x04, 0xcd, 0xc0, 0x28, 0x09, 0x54, 0x5c, 0x8d, + 0x62, 0x31, 0xb4, 0x9b, 0x50, 0xee, 0xb3, 0xa9, 0x60, 0xe1, 0x2c, 0x94, 0xb0, 0x3c, 0x16, 0x43, + 0x91, 0x3d, 0x7d, 0x72, 0xcc, 0x75, 0x9e, 0x96, 0x63, 0x21, 0xfe, 0x94, 0x7a, 0xfc, 0x44, 0x17, + 0x38, 0x35, 0x41, 0x0b, 0x30, 0x7e, 0x42, 0x68, 0xeb, 0x84, 0x4b, 0xfb, 0xe7, 0xb1, 0x9e, 0xd9, + 0x5f, 0xe7, 0x61, 0x0e, 0x13, 0xcf, 0x95, 0x45, 0x47, 0x64, 0x65, 0x4c, 0x3e, 0xef, 0x90, 0x98, + 0x0b, 0x73, 0x51, 0x95, 0x48, 0x9c, 0xa6, 0x4c, 0xf3, 0xda, 0x25, 0xef, 0x5c, 0xb9, 0x7e, 0xe1, + 0x32, 0xed, 0x6b, 0x0a, 0xd6, 0x20, 0x2f, 0x12, 0xad, 0x49, 0x8c, 0xef, 0x66, 0x09, 0x4a, 0x95, + 0x08, 0xac, 0x50, 0xa8, 0x05, 0xd3, 0x11, 0x09, 0x7d, 0xb7, 0x49, 0xb4, 0x42, 0xc6, 0x2b, 0x3f, + 0xcc, 0x76, 0xa5, 0x8b, 0x67, 0xab, 0x62, 0x25, 0x47, 0xab, 0x39, 0x15, 0xa5, 0xa7, 0x31, 0x7a, + 0x09, 0xd7, 0x95, 0xeb, 0x47, 0x12, 0x4b, 0x59, 0x90, 0x6c, 0x38, 0x26, 0x37, 0xdc, 0x1c, 0x7a, + 0x43, 0x79, 0x8f, 0xd8, 0x88, 0xd3, 0xfb, 0xce, 0xd3, 0x01, 0x54, 0x59, 0xd7, 0xfb, 0xf4, 0xfb, + 0x86, 0xea, 0xba, 0xb1, 0xdd, 0x29, 0xe5, 0x27, 0xd2, 0x5f, 0x8a, 0xb8, 0xa4, 0x69, 0x9f, 0x52, + 0x7e, 0x52, 0xf9, 0xbb, 0x05, 0x73, 0x83, 0xd4, 0x44, 0x8d, 0xd7, 0xdc, 0x7e, 0x6b, 0x24, 0xa5, + 0xc0, 0x8a, 0xb8, 0x3c, 0x21, 0xd7, 0x71, 0x7d, 0xdf, 0xe1, 0xe4, 0x4c, 0xf9, 0x6c, 0x61, 0x6b, + 0x04, 0x97, 0xd5, 0xc2, 0x63, 0xdf, 0x3f, 0x20, 0x67, 0x5c, 0x24, 0xb1, 0xb4, 0xdd, 0x7d, 0x16, + 0x49, 0x47, 0xbe, 0x24, 0x89, 0x35, 0x04, 0xa3, 0xb8, 0xc9, 0x44, 0x77, 0x9f, 0x45, 0xeb, 0x05, + 0x18, 0xe7, 0x6e, 0xd4, 0x22, 0xdc, 0x6e, 0x40, 0x5e, 0x92, 0x44, 0x0c, 0x45, 0xc4, 0x93, 0xe7, + 0xc8, 0x61, 0x31, 0x14, 0xf1, 0xd2, 0x8a, 0x08, 0x09, 0xa4, 0x42, 0x39, 0xac, 0x26, 0x22, 0xb2, + 0x8e, 0x4c, 0xb3, 0x91, 0xc3, 0x72, 0x6c, 0x1f, 0xc2, 0xfc, 0xb9, 0xdb, 0x8d, 0x43, 0x16, 0xc4, + 0xa4, 0xe7, 0xd9, 0xd6, 0xeb, 0x78, 0xb6, 0xfd, 0x3b, 0x0b, 0xe6, 0x7b, 0x91, 0xf3, 0xbf, 0x1c, + 0x84, 0xf6, 0x0f, 0x61, 0xe1, 0xbc, 0xa6, 0xda, 0x06, 0x0d, 0x98, 0x88, 0x64, 0xc7, 0x61, 0xac, + 0x70, 0x15, 0x1d, 0x55, 0x8f, 0x82, 0x0d, 0xd2, 0xfe, 0x67, 0x0e, 0x6e, 0x36, 0x64, 0x7d, 0xd4, + 0x0c, 0x49, 0xab, 0xf9, 0xdf, 0xb3, 0xc8, 0x1e, 0x4c, 0xe9, 0x17, 0x94, 0x91, 0x98, 0xbb, 0x5c, + 0xe2, 0xbe, 0x42, 0x18, 0x89, 0x71, 0x7a, 0x8a, 0x0e, 0xa0, 0xcc, 0x3a, 0x3c, 0xec, 0x24, 0x2a, + 0x2a, 0x07, 0xae, 0x65, 0x09, 0xdc, 0x95, 0x80, 0x7e, 0xb1, 0x93, 0x4a, 0x8a, 0x96, 0x7a, 0x08, + 0x33, 0xc9, 0xb3, 0xd0, 0x08, 0x56, 0x75, 0xf0, 0xff, 0x33, 0x05, 0xf7, 0x37, 0xeb, 0x78, 0x9a, + 0xf5, 0x13, 0xec, 0xaf, 0x2c, 0x98, 0x1d, 0xb0, 0x3b, 0x7a, 0x6c, 0xfa, 0xea, 0x2b, 0x18, 0x78, + 0x9d, 0xb6, 0x9e, 0x75, 0x48, 0xd4, 0xed, 0xef, 0xaf, 0xd1, 0x33, 0x98, 0x34, 0xa6, 0x0d, 0x5d, + 0x9d, 0x76, 0x4a, 0xf5, 0xbb, 0x99, 0x3e, 0xe7, 0xb3, 0x8e, 0xa7, 0x15, 0xd9, 0x73, 0xf9, 0xc9, + 0xd6, 0x08, 0x2e, 0xc5, 0xbd, 0xe9, 0xfa, 0xb8, 0x7a, 0x2f, 0xd8, 0x6d, 0x40, 0x26, 0xd1, 0xec, + 0x8b, 0xf7, 0x74, 0xcc, 0x69, 0x33, 0xfe, 0x26, 0x52, 0xe5, 0x1c, 0xe4, 0x9b, 0xac, 0x13, 0x70, + 0x5d, 0x89, 0xd5, 0xc4, 0xfe, 0x6a, 0x0c, 0x16, 0xcf, 0xbb, 0xe4, 0xc7, 0x84, 0xbb, 0xf2, 0x65, + 0xf2, 0x2e, 0x4c, 0x87, 0x11, 0x6b, 0x92, 0x38, 0x26, 0x9e, 0x23, 0xda, 0x31, 0xf3, 0xfa, 0x99, + 0x4a, 0xc8, 0xeb, 0x82, 0x8a, 0xea, 0x30, 0xcf, 0x19, 0x77, 0x7d, 0x87, 0xc4, 0x9c, 0xb6, 0x45, + 0xbb, 0xac, 0xd9, 0xc7, 0x24, 0xfb, 0xac, 0x5c, 0xdc, 0x34, 0x6b, 0x0a, 0x73, 0x08, 0xd3, 0xbd, + 0x97, 0x64, 0xcc, 0x5d, 0x6e, 0x42, 0xb7, 0x7a, 0x95, 0x83, 0xf5, 0x6c, 0x23, 0xdc, 0xbe, 0x47, + 0x8b, 0xcf, 0x37, 0xae, 0xa3, 0xc3, 0x34, 0xae, 0xc8, 0x81, 0x85, 0x48, 0x05, 0xa4, 0x73, 0x2e, + 0x1a, 0xf3, 0xc3, 0x46, 0xe3, 0x9c, 0x16, 0xd4, 0xff, 0x01, 0x21, 0xb5, 0xc1, 0xb9, 0xe0, 0x1c, + 0x1f, 0x36, 0x38, 0xcd, 0x06, 0xfd, 0xde, 0xdd, 0x84, 0x79, 0xb3, 0x41, 0x7f, 0xac, 0x4e, 0xbc, + 0x5e, 0xac, 0xce, 0x6a, 0x69, 0xbb, 0xa9, 0x90, 0xb5, 0xef, 0x26, 0xd9, 0x32, 0x95, 0xc7, 0xe4, + 0xab, 0x0c, 0xc1, 0x58, 0xe0, 0xb6, 0x93, 0x67, 0xaf, 0x18, 0xdb, 0x3f, 0xb1, 0xa0, 0xb2, 0x43, + 0x13, 0x4b, 0x98, 0x27, 0xb3, 0xc9, 0x7c, 0x03, 0x20, 0xa2, 0xe1, 0x0c, 0x85, 0x6d, 0x62, 0xfa, + 0x05, 0xd1, 0x4d, 0x60, 0x41, 0x10, 0xf6, 0xe9, 0x17, 0x04, 0xdd, 0x04, 0x90, 0x8b, 0x9c, 0x3d, + 0x27, 0x81, 0x7a, 0x35, 0x63, 0xc9, 0x7e, 0x20, 0x08, 0xa2, 0x23, 0x3c, 0xa6, 0x3e, 0x27, 0x91, + 0xf4, 0xbe, 0x22, 0xd6, 0x33, 0xfb, 0x67, 0x16, 0xdc, 0x18, 0xa8, 0x86, 0x4e, 0xf4, 0x8f, 0x61, + 0x5c, 0xa5, 0xeb, 0x21, 0x32, 0xaf, 0xce, 0xf3, 0x1a, 0x88, 0x6e, 0xc3, 0x74, 0x40, 0xce, 0xb8, + 0x93, 0x52, 0x4f, 0x75, 0x24, 0x65, 0x41, 0xde, 0x33, 0x2a, 0xda, 0xbf, 0xb1, 0x60, 0xd6, 0x78, + 0xf2, 0x06, 0x89, 0x9b, 0x11, 0x0d, 0x65, 0x23, 0x3c, 0xc8, 0x14, 0xb7, 0x60, 0xd2, 0xa3, 0x71, + 0xe8, 0xbb, 0x5d, 0x47, 0xae, 0xe9, 0x16, 0x47, 0xd3, 0x9e, 0x0a, 0x96, 0x5d, 0x00, 0xf1, 0x46, + 0x6d, 0xb1, 0x88, 0x26, 0x4f, 0x9a, 0xcc, 0x8b, 0x6e, 0x28, 0xee, 0x6e, 0x6a, 0x6f, 0x9c, 0x12, + 0x61, 0x7f, 0x0a, 0x73, 0xca, 0x52, 0xfa, 0x93, 0x95, 0xb9, 0xaa, 0x0a, 0x14, 0x34, 0x57, 0x57, + 0xeb, 0x98, 0xcc, 0xd1, 0x32, 0x94, 0x7d, 0x37, 0x68, 0x75, 0x94, 0x4b, 0x7b, 0x46, 0xd1, 0x49, + 0x43, 0x6c, 0x30, 0x8f, 0xd8, 0x2d, 0x98, 0x3f, 0x27, 0x58, 0x1b, 0xff, 0xe9, 0x80, 0x0f, 0x6b, + 0xb5, 0xab, 0x24, 0x82, 0xf4, 0x11, 0x7a, 0xdf, 0xd8, 0xec, 0x1d, 0x98, 0x1d, 0x70, 0xc8, 0xd7, + 0x34, 0xb0, 0xfd, 0x08, 0xde, 0x10, 0x6a, 0x63, 0xc6, 0x78, 0x23, 0xb1, 0x92, 0x31, 0xca, 0x85, + 0x83, 0x5b, 0x03, 0x0e, 0xde, 0x56, 0x21, 0x70, 0x5e, 0x82, 0x3e, 0x7d, 0xff, 0x05, 0x5a, 0xff, + 0xf9, 0x05, 0xfe, 0x2d, 0x07, 0x79, 0xf9, 0xfd, 0x04, 0xbd, 0x03, 0x65, 0x1a, 0x70, 0xd2, 0x22, + 0x91, 0xa3, 0xbe, 0x32, 0xc9, 0x0c, 0xbe, 0x35, 0x82, 0x27, 0x35, 0x59, 0xb1, 0xdd, 0x82, 0xd2, + 0xb1, 0xcf, 0x5c, 0xae, 0x99, 0x84, 0x0d, 0xac, 0xad, 0x11, 0x0c, 0x92, 0xa8, 0x58, 0x96, 0x45, + 0xd1, 0x8b, 0x68, 0xd0, 0x72, 0xfa, 0x3f, 0x57, 0x95, 0x14, 0x35, 0xd9, 0xee, 0x88, 0x31, 0x9f, + 0xb8, 0x81, 0xe6, 0x1a, 0xd3, 0xdd, 0xf0, 0xa4, 0x26, 0x2b, 0xb6, 0x4d, 0x98, 0x4e, 0xbe, 0xd0, + 0x6a, 0xc6, 0xfc, 0x65, 0x89, 0x7a, 0x6b, 0x04, 0x4f, 0x25, 0x20, 0x25, 0xe6, 0x01, 0x80, 0xa0, + 0x68, 0x09, 0x2a, 0x83, 0x2e, 0x18, 0x09, 0xc2, 0x95, 0x24, 0x7a, 0xf7, 0x78, 0xc3, 0xed, 0x6e, + 0x8d, 0xe0, 0xa2, 0xe0, 0x55, 0xc0, 0x3a, 0x80, 0x27, 0x4a, 0x84, 0x02, 0xaa, 0xd4, 0x78, 0xad, + 0x0f, 0xb8, 0xe1, 0x72, 0x51, 0xf0, 0x8b, 0x82, 0x4d, 0x62, 0x4c, 0x85, 0x5e, 0xe5, 0x00, 0xa9, + 0xaf, 0xaf, 0x15, 0x58, 0xd8, 0xd9, 0xfe, 0x68, 0x73, 0x67, 0x7b, 0x6b, 0x77, 0x77, 0xc3, 0xf9, + 0xe4, 0xe9, 0xfe, 0xde, 0x66, 0x63, 0xfb, 0xc9, 0xf6, 0xe6, 0xc6, 0xcc, 0x08, 0xba, 0x06, 0xe5, + 0xc3, 0x4d, 0xfc, 0x99, 0xf3, 0xc9, 0x53, 0xc9, 0xf2, 0xd9, 0x8c, 0x85, 0x26, 0xa1, 0x90, 0xcc, + 0x72, 0x62, 0xb6, 0xb7, 0xbb, 0xbf, 0xbf, 0xbd, 0xbe, 0xb3, 0x39, 0x33, 0x8a, 0x00, 0xc6, 0xf5, + 0xca, 0x18, 0x9a, 0x86, 0x92, 0x84, 0x6a, 0x42, 0xbe, 0xfe, 0xaf, 0x09, 0x80, 0x0d, 0x3f, 0xdc, + 0x27, 0xd1, 0x0b, 0xda, 0x24, 0xe8, 0xd7, 0x16, 0x4c, 0xf5, 0x37, 0xac, 0xe8, 0xde, 0xd5, 0x6a, + 0x53, 0xaa, 0x0d, 0xaf, 0xd4, 0x87, 0x81, 0x28, 0x5f, 0xb5, 0x97, 0x7f, 0xfc, 0xd7, 0xaf, 0x7f, + 0x9e, 0xbb, 0x69, 0x2f, 0x26, 0xdf, 0xe9, 0x9b, 0x8a, 0xe3, 0x7d, 0x5d, 0x31, 0xdf, 0xb7, 0x56, + 0xd1, 0x2f, 0x2d, 0xf1, 0xd8, 0x4b, 0x3d, 0x29, 0xd0, 0x7b, 0xc3, 0xbe, 0x2d, 0x2b, 0xf7, 0x86, + 0x40, 0x68, 0xdd, 0x6c, 0xa9, 0xdb, 0x9b, 0xf6, 0xf5, 0x0b, 0xba, 0xa9, 0x07, 0x94, 0x50, 0xed, + 0x57, 0x16, 0x2c, 0x0c, 0x6e, 0xc5, 0xd1, 0xc3, 0xcc, 0x90, 0xcb, 0x6a, 0xdf, 0x2b, 0x37, 0x0d, + 0x34, 0xf5, 0xb7, 0xa3, 0xd7, 0xa2, 0xda, 0xb7, 0xa5, 0x62, 0x4b, 0xf6, 0x8d, 0x44, 0x31, 0x6d, + 0xac, 0xd4, 0x1f, 0x11, 0xa1, 0xdc, 0x9f, 0x2c, 0x98, 0x1d, 0x50, 0xa3, 0xd0, 0xfd, 0xec, 0x6f, + 0x80, 0xaf, 0xaa, 0xad, 0x95, 0x07, 0x43, 0xe3, 0xb4, 0x25, 0xeb, 0x52, 0xe1, 0xbb, 0x68, 0x35, + 0x51, 0xf8, 0x4b, 0x91, 0x1c, 0xd7, 0x8c, 0xda, 0xfa, 0x61, 0x53, 0x5b, 0x7d, 0x59, 0x4b, 0x3e, + 0xa8, 0xfe, 0xd1, 0x82, 0x72, 0x5f, 0x76, 0xcf, 0xbe, 0xf4, 0x41, 0x15, 0x26, 0xfb, 0xd2, 0x07, + 0x96, 0x0e, 0xfb, 0xbe, 0x54, 0xf5, 0x3d, 0x54, 0x4d, 0x54, 0x8d, 0xfa, 0xb2, 0x6c, 0xed, 0x4b, + 0x53, 0xa3, 0xd6, 0x56, 0x5f, 0xd6, 0x7a, 0xbf, 0x61, 0x7e, 0x6b, 0x01, 0xba, 0x98, 0x93, 0xd1, + 0x77, 0x2e, 0xd3, 0x60, 0x60, 0x15, 0xa8, 0xdc, 0x1f, 0x16, 0xa6, 0xb5, 0x7f, 0x4b, 0x6a, 0xff, + 0x06, 0xba, 0xfe, 0x0a, 0xed, 0xd7, 0x7f, 0x6a, 0xc1, 0xff, 0x35, 0x59, 0x3b, 0x43, 0xfc, 0x7a, + 0x61, 0xc3, 0x0f, 0xf7, 0x44, 0xbe, 0xdc, 0xb3, 0x7e, 0xb0, 0xa6, 0xf9, 0x5a, 0x4c, 0xd4, 0x9f, + 0x2a, 0x8b, 0x5a, 0xb5, 0x16, 0x09, 0x64, 0x36, 0xad, 0xa9, 0x25, 0x37, 0xa4, 0xf1, 0xa0, 0xff, + 0x6d, 0x1f, 0x78, 0x7e, 0xf8, 0x87, 0xdc, 0xe2, 0xf7, 0x14, 0x5e, 0xbe, 0x57, 0xaa, 0x1b, 0x7e, + 0x58, 0x3d, 0xac, 0xaf, 0x8b, 0xe5, 0xa3, 0x71, 0x29, 0xe4, 0xdb, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x01, 0xfb, 0x51, 0xc8, 0x38, 0x1c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/storage.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/storage.pb.go new file mode 100644 index 000000000..043ba03f8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/storage.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/privacy/dlp/v2beta1/storage.proto + +package dlp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Type of information detected by the API. +type InfoType struct { + // Name of the information type. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *InfoType) Reset() { *m = InfoType{} } +func (m *InfoType) String() string { return proto.CompactTextString(m) } +func (*InfoType) ProtoMessage() {} +func (*InfoType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *InfoType) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// General identifier of a data field in a storage service. +type FieldId struct { + // Name describing the field. + ColumnName string `protobuf:"bytes,1,opt,name=column_name,json=columnName" json:"column_name,omitempty"` +} + +func (m *FieldId) Reset() { *m = FieldId{} } +func (m *FieldId) String() string { return proto.CompactTextString(m) } +func (*FieldId) ProtoMessage() {} +func (*FieldId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *FieldId) GetColumnName() string { + if m != nil { + return m.ColumnName + } + return "" +} + +// Datastore partition ID. +// A partition ID identifies a grouping of entities. The grouping is always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +type PartitionId struct { + // The ID of the project to which the entities belong. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // If not empty, the ID of the namespace to which the entities belong. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId" json:"namespace_id,omitempty"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} +func (*PartitionId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *PartitionId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PartitionId) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +// A representation of a Datastore kind. +type KindExpression struct { + // The name of the kind. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} +func (*KindExpression) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *KindExpression) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A reference to a property relative to the Datastore kind expressions. +type PropertyReference struct { + // The name of the property. + // If name includes "."s, it may be interpreted as a property name path. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *PropertyReference) Reset() { *m = PropertyReference{} } +func (m *PropertyReference) String() string { return proto.CompactTextString(m) } +func (*PropertyReference) ProtoMessage() {} +func (*PropertyReference) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *PropertyReference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A representation of a Datastore property in a projection. +type Projection struct { + // The property to project. + Property *PropertyReference `protobuf:"bytes,1,opt,name=property" json:"property,omitempty"` +} + +func (m *Projection) Reset() { *m = Projection{} } +func (m *Projection) String() string { return proto.CompactTextString(m) } +func (*Projection) ProtoMessage() {} +func (*Projection) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *Projection) GetProperty() *PropertyReference { + if m != nil { + return m.Property + } + return nil +} + +// Options defining a data set within Google Cloud Datastore. +type DatastoreOptions struct { + // A partition ID identifies a grouping of entities. The grouping is always + // by project and namespace, however the namespace ID may be empty. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The kind to process. + Kind *KindExpression `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` + // Properties to scan. If none are specified, all properties will be scanned + // by default. + Projection []*Projection `protobuf:"bytes,3,rep,name=projection" json:"projection,omitempty"` +} + +func (m *DatastoreOptions) Reset() { *m = DatastoreOptions{} } +func (m *DatastoreOptions) String() string { return proto.CompactTextString(m) } +func (*DatastoreOptions) ProtoMessage() {} +func (*DatastoreOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *DatastoreOptions) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *DatastoreOptions) GetKind() *KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +func (m *DatastoreOptions) GetProjection() []*Projection { + if m != nil { + return m.Projection + } + return nil +} + +// Options defining a file or a set of files (path ending with *) within +// a Google Cloud Storage bucket. +type CloudStorageOptions struct { + FileSet *CloudStorageOptions_FileSet `protobuf:"bytes,1,opt,name=file_set,json=fileSet" json:"file_set,omitempty"` +} + +func (m *CloudStorageOptions) Reset() { *m = CloudStorageOptions{} } +func (m *CloudStorageOptions) String() string { return proto.CompactTextString(m) } +func (*CloudStorageOptions) ProtoMessage() {} +func (*CloudStorageOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *CloudStorageOptions) GetFileSet() *CloudStorageOptions_FileSet { + if m != nil { + return m.FileSet + } + return nil +} + +// Set of files to scan. +type CloudStorageOptions_FileSet struct { + // The url, in the format `gs:///`. Trailing wildcard in the + // path is allowed. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` +} + +func (m *CloudStorageOptions_FileSet) Reset() { *m = CloudStorageOptions_FileSet{} } +func (m *CloudStorageOptions_FileSet) String() string { return proto.CompactTextString(m) } +func (*CloudStorageOptions_FileSet) ProtoMessage() {} +func (*CloudStorageOptions_FileSet) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7, 0} } + +func (m *CloudStorageOptions_FileSet) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// A location in Cloud Storage. +type CloudStoragePath struct { + // The url, in the format of `gs://bucket/`. + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` +} + +func (m *CloudStoragePath) Reset() { *m = CloudStoragePath{} } +func (m *CloudStoragePath) String() string { return proto.CompactTextString(m) } +func (*CloudStoragePath) ProtoMessage() {} +func (*CloudStoragePath) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *CloudStoragePath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +// Options defining BigQuery table and row identifiers. +type BigQueryOptions struct { + // Complete BigQuery table reference. + TableReference *BigQueryTable `protobuf:"bytes,1,opt,name=table_reference,json=tableReference" json:"table_reference,omitempty"` + // References to fields uniquely identifying rows within the table. + // Nested fields in the format, like `person.birthdate.year`, are allowed. + IdentifyingFields []*FieldId `protobuf:"bytes,2,rep,name=identifying_fields,json=identifyingFields" json:"identifying_fields,omitempty"` +} + +func (m *BigQueryOptions) Reset() { *m = BigQueryOptions{} } +func (m *BigQueryOptions) String() string { return proto.CompactTextString(m) } +func (*BigQueryOptions) ProtoMessage() {} +func (*BigQueryOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *BigQueryOptions) GetTableReference() *BigQueryTable { + if m != nil { + return m.TableReference + } + return nil +} + +func (m *BigQueryOptions) GetIdentifyingFields() []*FieldId { + if m != nil { + return m.IdentifyingFields + } + return nil +} + +// Shared message indicating Cloud storage type. +type StorageConfig struct { + // Types that are valid to be assigned to Type: + // *StorageConfig_DatastoreOptions + // *StorageConfig_CloudStorageOptions + // *StorageConfig_BigQueryOptions + Type isStorageConfig_Type `protobuf_oneof:"type"` +} + +func (m *StorageConfig) Reset() { *m = StorageConfig{} } +func (m *StorageConfig) String() string { return proto.CompactTextString(m) } +func (*StorageConfig) ProtoMessage() {} +func (*StorageConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +type isStorageConfig_Type interface { + isStorageConfig_Type() +} + +type StorageConfig_DatastoreOptions struct { + DatastoreOptions *DatastoreOptions `protobuf:"bytes,2,opt,name=datastore_options,json=datastoreOptions,oneof"` +} +type StorageConfig_CloudStorageOptions struct { + CloudStorageOptions *CloudStorageOptions `protobuf:"bytes,3,opt,name=cloud_storage_options,json=cloudStorageOptions,oneof"` +} +type StorageConfig_BigQueryOptions struct { + BigQueryOptions *BigQueryOptions `protobuf:"bytes,4,opt,name=big_query_options,json=bigQueryOptions,oneof"` +} + +func (*StorageConfig_DatastoreOptions) isStorageConfig_Type() {} +func (*StorageConfig_CloudStorageOptions) isStorageConfig_Type() {} +func (*StorageConfig_BigQueryOptions) isStorageConfig_Type() {} + +func (m *StorageConfig) GetType() isStorageConfig_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *StorageConfig) GetDatastoreOptions() *DatastoreOptions { + if x, ok := m.GetType().(*StorageConfig_DatastoreOptions); ok { + return x.DatastoreOptions + } + return nil +} + +func (m *StorageConfig) GetCloudStorageOptions() *CloudStorageOptions { + if x, ok := m.GetType().(*StorageConfig_CloudStorageOptions); ok { + return x.CloudStorageOptions + } + return nil +} + +func (m *StorageConfig) GetBigQueryOptions() *BigQueryOptions { + if x, ok := m.GetType().(*StorageConfig_BigQueryOptions); ok { + return x.BigQueryOptions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StorageConfig_OneofMarshaler, _StorageConfig_OneofUnmarshaler, _StorageConfig_OneofSizer, []interface{}{ + (*StorageConfig_DatastoreOptions)(nil), + (*StorageConfig_CloudStorageOptions)(nil), + (*StorageConfig_BigQueryOptions)(nil), + } +} + +func _StorageConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StorageConfig) + // type + switch x := m.Type.(type) { + case *StorageConfig_DatastoreOptions: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatastoreOptions); err != nil { + return err + } + case *StorageConfig_CloudStorageOptions: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CloudStorageOptions); err != nil { + return err + } + case *StorageConfig_BigQueryOptions: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BigQueryOptions); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StorageConfig.Type has unexpected type %T", x) + } + return nil +} + +func _StorageConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StorageConfig) + switch tag { + case 2: // type.datastore_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatastoreOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_DatastoreOptions{msg} + return true, err + case 3: // type.cloud_storage_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudStorageOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_CloudStorageOptions{msg} + return true, err + case 4: // type.big_query_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_BigQueryOptions{msg} + return true, err + default: + return false, nil + } +} + +func _StorageConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StorageConfig) + // type + switch x := m.Type.(type) { + case *StorageConfig_DatastoreOptions: + s := proto.Size(x.DatastoreOptions) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StorageConfig_CloudStorageOptions: + s := proto.Size(x.CloudStorageOptions) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StorageConfig_BigQueryOptions: + s := proto.Size(x.BigQueryOptions) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Record key for a finding in a Cloud Storage file. +type CloudStorageKey struct { + // Path to the file. + FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath" json:"file_path,omitempty"` + // Byte offset of the referenced data in the file. + StartOffset int64 `protobuf:"varint,2,opt,name=start_offset,json=startOffset" json:"start_offset,omitempty"` +} + +func (m *CloudStorageKey) Reset() { *m = CloudStorageKey{} } +func (m *CloudStorageKey) String() string { return proto.CompactTextString(m) } +func (*CloudStorageKey) ProtoMessage() {} +func (*CloudStorageKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *CloudStorageKey) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *CloudStorageKey) GetStartOffset() int64 { + if m != nil { + return m.StartOffset + } + return 0 +} + +// Record key for a finding in Cloud Datastore. +type DatastoreKey struct { + // Datastore entity key. + EntityKey *Key `protobuf:"bytes,1,opt,name=entity_key,json=entityKey" json:"entity_key,omitempty"` +} + +func (m *DatastoreKey) Reset() { *m = DatastoreKey{} } +func (m *DatastoreKey) String() string { return proto.CompactTextString(m) } +func (*DatastoreKey) ProtoMessage() {} +func (*DatastoreKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DatastoreKey) GetEntityKey() *Key { + if m != nil { + return m.EntityKey + } + return nil +} + +// A unique identifier for a Datastore entity. +// If a key's partition ID or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's _ancestors_. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPath() []*Key_PathElement { + if m != nil { + return m.Path + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 encoded. + // Cannot be `""`. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The type of ID. + // + // Types that are valid to be assigned to IdType: + // *Key_PathElement_Id + // *Key_PathElement_Name + IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} +func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13, 0} } + +type isKey_PathElement_IdType interface { + isKey_PathElement_IdType() +} + +type Key_PathElement_Id struct { + Id int64 `protobuf:"varint,2,opt,name=id,oneof"` +} +type Key_PathElement_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,oneof"` +} + +func (*Key_PathElement_Id) isKey_PathElement_IdType() {} +func (*Key_PathElement_Name) isKey_PathElement_IdType() {} + +func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { + if m != nil { + return m.IdType + } + return nil +} + +func (m *Key_PathElement) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { + return x.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { + return x.Name + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ + (*Key_PathElement_Id)(nil), + (*Key_PathElement_Name)(nil), + } +} + +func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) + } + return nil +} + +func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key_PathElement) + switch tag { + case 2: // id_type.id + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.IdType = &Key_PathElement_Id{int64(x)} + return true, err + case 3: // id_type.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdType = &Key_PathElement_Name{x} + return true, err + default: + return false, nil + } +} + +func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for a unique key indicating a record that contains a finding. +type RecordKey struct { + // Types that are valid to be assigned to Type: + // *RecordKey_CloudStorageKey + // *RecordKey_DatastoreKey + Type isRecordKey_Type `protobuf_oneof:"type"` +} + +func (m *RecordKey) Reset() { *m = RecordKey{} } +func (m *RecordKey) String() string { return proto.CompactTextString(m) } +func (*RecordKey) ProtoMessage() {} +func (*RecordKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +type isRecordKey_Type interface { + isRecordKey_Type() +} + +type RecordKey_CloudStorageKey struct { + CloudStorageKey *CloudStorageKey `protobuf:"bytes,1,opt,name=cloud_storage_key,json=cloudStorageKey,oneof"` +} +type RecordKey_DatastoreKey struct { + DatastoreKey *DatastoreKey `protobuf:"bytes,2,opt,name=datastore_key,json=datastoreKey,oneof"` +} + +func (*RecordKey_CloudStorageKey) isRecordKey_Type() {} +func (*RecordKey_DatastoreKey) isRecordKey_Type() {} + +func (m *RecordKey) GetType() isRecordKey_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *RecordKey) GetCloudStorageKey() *CloudStorageKey { + if x, ok := m.GetType().(*RecordKey_CloudStorageKey); ok { + return x.CloudStorageKey + } + return nil +} + +func (m *RecordKey) GetDatastoreKey() *DatastoreKey { + if x, ok := m.GetType().(*RecordKey_DatastoreKey); ok { + return x.DatastoreKey + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecordKey) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecordKey_OneofMarshaler, _RecordKey_OneofUnmarshaler, _RecordKey_OneofSizer, []interface{}{ + (*RecordKey_CloudStorageKey)(nil), + (*RecordKey_DatastoreKey)(nil), + } +} + +func _RecordKey_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecordKey) + // type + switch x := m.Type.(type) { + case *RecordKey_CloudStorageKey: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CloudStorageKey); err != nil { + return err + } + case *RecordKey_DatastoreKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatastoreKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RecordKey.Type has unexpected type %T", x) + } + return nil +} + +func _RecordKey_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecordKey) + switch tag { + case 1: // type.cloud_storage_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudStorageKey) + err := b.DecodeMessage(msg) + m.Type = &RecordKey_CloudStorageKey{msg} + return true, err + case 2: // type.datastore_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatastoreKey) + err := b.DecodeMessage(msg) + m.Type = &RecordKey_DatastoreKey{msg} + return true, err + default: + return false, nil + } +} + +func _RecordKey_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecordKey) + // type + switch x := m.Type.(type) { + case *RecordKey_CloudStorageKey: + s := proto.Size(x.CloudStorageKey) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RecordKey_DatastoreKey: + s := proto.Size(x.DatastoreKey) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message defining the location of a BigQuery table. A table is uniquely +// identified by its project_id, dataset_id, and table_name. Within a query +// a table is often referenced with a string in the format of: +// `:.` or +// `..`. +type BigQueryTable struct { + // The Google Cloud Platform project ID of the project containing the table. + // If omitted, project ID is inferred from the API call. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Dataset ID of the table. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // Name of the table. + TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId" json:"table_id,omitempty"` +} + +func (m *BigQueryTable) Reset() { *m = BigQueryTable{} } +func (m *BigQueryTable) String() string { return proto.CompactTextString(m) } +func (*BigQueryTable) ProtoMessage() {} +func (*BigQueryTable) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *BigQueryTable) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *BigQueryTable) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *BigQueryTable) GetTableId() string { + if m != nil { + return m.TableId + } + return "" +} + +func init() { + proto.RegisterType((*InfoType)(nil), "google.privacy.dlp.v2beta1.InfoType") + proto.RegisterType((*FieldId)(nil), "google.privacy.dlp.v2beta1.FieldId") + proto.RegisterType((*PartitionId)(nil), "google.privacy.dlp.v2beta1.PartitionId") + proto.RegisterType((*KindExpression)(nil), "google.privacy.dlp.v2beta1.KindExpression") + proto.RegisterType((*PropertyReference)(nil), "google.privacy.dlp.v2beta1.PropertyReference") + proto.RegisterType((*Projection)(nil), "google.privacy.dlp.v2beta1.Projection") + proto.RegisterType((*DatastoreOptions)(nil), "google.privacy.dlp.v2beta1.DatastoreOptions") + proto.RegisterType((*CloudStorageOptions)(nil), "google.privacy.dlp.v2beta1.CloudStorageOptions") + proto.RegisterType((*CloudStorageOptions_FileSet)(nil), "google.privacy.dlp.v2beta1.CloudStorageOptions.FileSet") + proto.RegisterType((*CloudStoragePath)(nil), "google.privacy.dlp.v2beta1.CloudStoragePath") + proto.RegisterType((*BigQueryOptions)(nil), "google.privacy.dlp.v2beta1.BigQueryOptions") + proto.RegisterType((*StorageConfig)(nil), "google.privacy.dlp.v2beta1.StorageConfig") + proto.RegisterType((*CloudStorageKey)(nil), "google.privacy.dlp.v2beta1.CloudStorageKey") + proto.RegisterType((*DatastoreKey)(nil), "google.privacy.dlp.v2beta1.DatastoreKey") + proto.RegisterType((*Key)(nil), "google.privacy.dlp.v2beta1.Key") + proto.RegisterType((*Key_PathElement)(nil), "google.privacy.dlp.v2beta1.Key.PathElement") + proto.RegisterType((*RecordKey)(nil), "google.privacy.dlp.v2beta1.RecordKey") + proto.RegisterType((*BigQueryTable)(nil), "google.privacy.dlp.v2beta1.BigQueryTable") +} + +func init() { proto.RegisterFile("google/privacy/dlp/v2beta1/storage.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 909 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xef, 0x6e, 0x1b, 0x45, + 0x10, 0xcf, 0xd9, 0x56, 0x13, 0x8f, 0x9d, 0xc6, 0xbe, 0x82, 0x64, 0x5c, 0xda, 0xb4, 0x07, 0x6a, + 0x43, 0x03, 0x77, 0xc2, 0x7c, 0xe0, 0x03, 0xa2, 0x48, 0x6e, 0x1a, 0x62, 0x22, 0x35, 0xe9, 0x36, + 0x02, 0x15, 0x3e, 0x9c, 0xd6, 0xb7, 0x7b, 0x97, 0xa5, 0xe7, 0xdb, 0xe5, 0x6e, 0x5d, 0x71, 0x2f, + 0x80, 0x78, 0x16, 0x1e, 0x00, 0x89, 0x4f, 0xbc, 0x0d, 0xe2, 0x31, 0xd0, 0xfe, 0xf1, 0xf9, 0x62, + 0x8c, 0x1b, 0xa4, 0x7e, 0x5b, 0xcf, 0xcd, 0xfc, 0x66, 0xe7, 0x37, 0xbf, 0x99, 0x35, 0x1c, 0x24, + 0x9c, 0x27, 0x29, 0x0d, 0x44, 0xce, 0x5e, 0xe3, 0xa8, 0x0c, 0x48, 0x2a, 0x82, 0xd7, 0xa3, 0x29, + 0x95, 0xf8, 0xd3, 0xa0, 0x90, 0x3c, 0xc7, 0x09, 0xf5, 0x45, 0xce, 0x25, 0x77, 0x87, 0xc6, 0xd3, + 0xb7, 0x9e, 0x3e, 0x49, 0x85, 0x6f, 0x3d, 0x87, 0xef, 0x5b, 0x14, 0x2c, 0x58, 0x80, 0xb3, 0x8c, + 0x4b, 0x2c, 0x19, 0xcf, 0x0a, 0x13, 0x39, 0xdc, 0xaf, 0x72, 0x70, 0xc9, 0xa7, 0xf3, 0x38, 0x90, + 0x6c, 0x46, 0x0b, 0x89, 0x67, 0xc2, 0x38, 0x78, 0x77, 0x61, 0x67, 0x92, 0xc5, 0xfc, 0xa2, 0x14, + 0xd4, 0x75, 0xa1, 0x95, 0xe1, 0x19, 0x1d, 0x38, 0xf7, 0x9c, 0x83, 0x36, 0xd2, 0x67, 0xef, 0x11, + 0x6c, 0x1f, 0x33, 0x9a, 0x92, 0x09, 0x71, 0xf7, 0xa1, 0x13, 0xf1, 0x74, 0x3e, 0xcb, 0xc2, 0x9a, + 0x17, 0x18, 0xd3, 0x33, 0xe5, 0x7b, 0x06, 0x9d, 0x73, 0x9c, 0x4b, 0xa6, 0x2e, 0x30, 0x21, 0xee, + 0x1d, 0x00, 0x91, 0xf3, 0x1f, 0x69, 0x24, 0x43, 0x46, 0x06, 0x0d, 0xed, 0xde, 0xb6, 0x96, 0x09, + 0x71, 0xef, 0x43, 0x57, 0xe1, 0x14, 0x02, 0x47, 0x54, 0x39, 0xb4, 0xb4, 0x43, 0xa7, 0xb2, 0x4d, + 0x88, 0xf7, 0x21, 0xdc, 0x3c, 0x65, 0x19, 0x79, 0xfa, 0xb3, 0xc8, 0x69, 0x51, 0x30, 0x9e, 0xad, + 0xbd, 0xe2, 0x43, 0xe8, 0x9f, 0xe7, 0x5c, 0xd0, 0x5c, 0x96, 0x88, 0xc6, 0x34, 0xa7, 0x59, 0xb4, + 0xac, 0xa5, 0x51, 0x73, 0xfc, 0x0e, 0xe0, 0xdc, 0xa4, 0x57, 0x50, 0x13, 0xd8, 0x11, 0x36, 0x4c, + 0xc3, 0x75, 0x46, 0x9f, 0xf8, 0xff, 0xcd, 0xb3, 0xff, 0xaf, 0x14, 0xa8, 0x0a, 0xf7, 0xfe, 0x72, + 0xa0, 0x77, 0x84, 0x25, 0x56, 0x5d, 0xa3, 0x67, 0x42, 0x37, 0xc0, 0xfd, 0x06, 0xba, 0x62, 0xc1, + 0x86, 0xaa, 0xcf, 0xe4, 0x78, 0xb8, 0x31, 0xc7, 0x92, 0x3d, 0xd4, 0x11, 0x35, 0x2a, 0x1f, 0x43, + 0xeb, 0x15, 0xcb, 0x0c, 0x89, 0x9d, 0xd1, 0xa3, 0x4d, 0x18, 0x57, 0x09, 0x43, 0x3a, 0xce, 0x3d, + 0xae, 0x5a, 0xc1, 0x78, 0x36, 0x68, 0xde, 0x6b, 0x1e, 0x74, 0x46, 0x0f, 0xde, 0x50, 0xad, 0xf5, + 0x46, 0xb5, 0x48, 0xef, 0x17, 0x07, 0x6e, 0x3d, 0x49, 0xf9, 0x9c, 0xbc, 0x30, 0xfa, 0x5c, 0xd4, + 0x8a, 0x60, 0x27, 0x66, 0x29, 0x0d, 0x0b, 0x2a, 0x6d, 0x9d, 0x9f, 0x6f, 0x42, 0x5f, 0x03, 0xe1, + 0x1f, 0xb3, 0x94, 0xbe, 0xa0, 0x12, 0x6d, 0xc7, 0xe6, 0x30, 0xbc, 0xad, 0x94, 0xa7, 0x8f, 0x6e, + 0x0f, 0x9a, 0xf3, 0x3c, 0xb5, 0x4d, 0x57, 0x47, 0xef, 0x01, 0xf4, 0xea, 0x20, 0xe7, 0x58, 0x5e, + 0xaa, 0x96, 0x0b, 0x2c, 0x2f, 0x17, 0xda, 0x50, 0x67, 0xef, 0x0f, 0x07, 0xf6, 0xc6, 0x2c, 0x79, + 0x3e, 0xa7, 0x79, 0xb9, 0xbc, 0xec, 0x9e, 0xc4, 0xd3, 0x94, 0x86, 0xf9, 0xa2, 0x95, 0xf6, 0xce, + 0x1f, 0x6d, 0xba, 0xf3, 0x02, 0xe5, 0x42, 0x85, 0xa2, 0x9b, 0x1a, 0x61, 0x29, 0x37, 0x04, 0x2e, + 0x23, 0x34, 0x93, 0x2c, 0x2e, 0x59, 0x96, 0x84, 0xb1, 0x1a, 0x99, 0x62, 0xd0, 0xd0, 0x44, 0x7f, + 0xb0, 0x09, 0xd6, 0x0e, 0x17, 0xea, 0xd7, 0xc2, 0xb5, 0xad, 0xf0, 0x7e, 0x6f, 0xc0, 0xae, 0xad, + 0xef, 0x09, 0xcf, 0x62, 0x96, 0xb8, 0x3f, 0x40, 0x9f, 0x2c, 0x64, 0x16, 0x72, 0x53, 0x8e, 0xd5, + 0xc4, 0xc7, 0x9b, 0x92, 0xac, 0x6a, 0xf3, 0x64, 0x0b, 0xf5, 0xc8, 0xaa, 0x5e, 0x29, 0xbc, 0x1b, + 0x29, 0x4a, 0x43, 0xbb, 0x7b, 0xaa, 0x04, 0x4d, 0x9d, 0x20, 0xf8, 0x9f, 0x0d, 0x3d, 0xd9, 0x42, + 0xb7, 0xa2, 0x35, 0x52, 0x79, 0x09, 0xfd, 0x29, 0x4b, 0xc2, 0x9f, 0x14, 0x97, 0x55, 0x8a, 0x96, + 0x4e, 0x71, 0x78, 0x1d, 0xfe, 0x97, 0xf0, 0x7b, 0xd3, 0xab, 0xa6, 0xf1, 0x0d, 0x68, 0xc9, 0x52, + 0x50, 0xef, 0x39, 0xec, 0xd5, 0x2f, 0x74, 0x4a, 0x4b, 0xf7, 0x36, 0xb4, 0xb5, 0x40, 0x6b, 0x02, + 0xd1, 0x8a, 0xd5, 0xc2, 0xb9, 0x0f, 0xdd, 0x42, 0xe2, 0x5c, 0x86, 0x3c, 0x8e, 0x95, 0x82, 0x15, + 0xa3, 0x4d, 0xd4, 0xd1, 0xb6, 0x33, 0x6d, 0xf2, 0x9e, 0x41, 0xb7, 0x22, 0x51, 0xe1, 0x3d, 0x06, + 0x50, 0xed, 0x92, 0x65, 0xf8, 0x8a, 0x2e, 0xd6, 0xc7, 0xfe, 0xc6, 0xb1, 0xa4, 0x25, 0x6a, 0x9b, + 0x90, 0x53, 0x5a, 0x7a, 0x7f, 0x3b, 0xd0, 0x54, 0x38, 0x6f, 0x73, 0x49, 0x7c, 0x65, 0xf5, 0x6f, + 0x54, 0x77, 0xf8, 0x86, 0xdb, 0xf8, 0xaa, 0xf4, 0xa7, 0x29, 0x9d, 0xd1, 0x4c, 0x9a, 0x61, 0x19, + 0x5e, 0xa8, 0xfd, 0x5d, 0x19, 0xd5, 0x3c, 0xe9, 0xa5, 0x63, 0xe7, 0x49, 0x2f, 0x92, 0x1e, 0x34, + 0xec, 0x2e, 0x6f, 0x9e, 0x6c, 0xa1, 0x06, 0x23, 0xee, 0x3b, 0x76, 0xd1, 0x2a, 0x95, 0xb4, 0x4f, + 0xb6, 0xcc, 0xaa, 0x1d, 0xb7, 0x61, 0x9b, 0x91, 0x50, 0x77, 0xe3, 0x4f, 0x07, 0xda, 0x88, 0x46, + 0x3c, 0x27, 0xaa, 0xe0, 0x97, 0xd0, 0xbf, 0xaa, 0xb2, 0x25, 0x7f, 0x87, 0xd7, 0x55, 0xd8, 0x29, + 0x2d, 0x55, 0xfb, 0xa3, 0x95, 0x1e, 0x9f, 0xc1, 0xee, 0x72, 0x3a, 0x14, 0xac, 0x99, 0x8c, 0x83, + 0x6b, 0x4d, 0x86, 0xc1, 0xec, 0x92, 0xda, 0xef, 0x4a, 0x4f, 0x97, 0xb0, 0x7b, 0x65, 0xfa, 0x57, + 0x5e, 0x36, 0x67, 0xf5, 0x65, 0xbb, 0x03, 0xa0, 0x71, 0x68, 0xfd, 0xe1, 0xb3, 0x96, 0x09, 0x71, + 0xdf, 0x83, 0x1d, 0xb3, 0x7f, 0x18, 0x31, 0xac, 0xa1, 0x6d, 0xfd, 0x7b, 0x42, 0xc6, 0xbf, 0x3a, + 0x70, 0x37, 0xe2, 0xb3, 0x0d, 0x37, 0x1e, 0xc3, 0x51, 0x2a, 0x16, 0x5b, 0xcf, 0xf9, 0xfe, 0x4b, + 0xeb, 0x99, 0xf0, 0x14, 0x67, 0x89, 0xcf, 0xf3, 0x24, 0x48, 0x68, 0xa6, 0x9f, 0xf6, 0xc0, 0x7c, + 0xc2, 0x82, 0x15, 0xeb, 0xfe, 0x62, 0x7c, 0x41, 0x52, 0xf1, 0x5b, 0x63, 0xf0, 0xb5, 0x89, 0xd7, + 0xf4, 0xfa, 0x47, 0xa9, 0xf0, 0xbf, 0x1d, 0x8d, 0xd5, 0xe7, 0xe9, 0x0d, 0x0d, 0xf2, 0xd9, 0x3f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xe7, 0xe9, 0x4f, 0xa6, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go new file mode 100644 index 000000000..ab4768dfe --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go @@ -0,0 +1,2523 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/pubsub/v1/pubsub.proto + +/* +Package pubsub is a generated protocol buffer package. + +It is generated from these files: + google/pubsub/v1/pubsub.proto + +It has these top-level messages: + Topic + PubsubMessage + GetTopicRequest + UpdateTopicRequest + PublishRequest + PublishResponse + ListTopicsRequest + ListTopicsResponse + ListTopicSubscriptionsRequest + ListTopicSubscriptionsResponse + DeleteTopicRequest + Subscription + PushConfig + ReceivedMessage + GetSubscriptionRequest + UpdateSubscriptionRequest + ListSubscriptionsRequest + ListSubscriptionsResponse + DeleteSubscriptionRequest + ModifyPushConfigRequest + PullRequest + PullResponse + ModifyAckDeadlineRequest + AcknowledgeRequest + StreamingPullRequest + StreamingPullResponse + CreateSnapshotRequest + UpdateSnapshotRequest + Snapshot + ListSnapshotsRequest + ListSnapshotsResponse + DeleteSnapshotRequest + SeekRequest + SeekResponse +*/ +package pubsub + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A topic resource. +type Topic struct { + // The name of the topic. It must have the format + // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, + // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), + // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent + // signs (`%`). It must be between 3 and 255 characters in length, and it + // must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // User labels. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Topic) Reset() { *m = Topic{} } +func (m *Topic) String() string { return proto.CompactTextString(m) } +func (*Topic) ProtoMessage() {} +func (*Topic) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Topic) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Topic) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// A message data and its attributes. The message payload must not be empty; +// it must contain either a non-empty data field, or at least one attribute. +type PubsubMessage struct { + // The message payload. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Optional attributes for this message. + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // ID of this message, assigned by the server when the message is published. + // Guaranteed to be unique within the topic. This value may be read by a + // subscriber that receives a `PubsubMessage` via a `Pull` call or a push + // delivery. It must not be populated by the publisher in a `Publish` call. + MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId" json:"message_id,omitempty"` + // The time at which the message was published, populated by the server when + // it receives the `Publish` call. It must not be populated by the + // publisher in a `Publish` call. + PublishTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=publish_time,json=publishTime" json:"publish_time,omitempty"` +} + +func (m *PubsubMessage) Reset() { *m = PubsubMessage{} } +func (m *PubsubMessage) String() string { return proto.CompactTextString(m) } +func (*PubsubMessage) ProtoMessage() {} +func (*PubsubMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *PubsubMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *PubsubMessage) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *PubsubMessage) GetMessageId() string { + if m != nil { + return m.MessageId + } + return "" +} + +func (m *PubsubMessage) GetPublishTime() *google_protobuf4.Timestamp { + if m != nil { + return m.PublishTime + } + return nil +} + +// Request for the GetTopic method. +type GetTopicRequest struct { + // The name of the topic to get. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *GetTopicRequest) Reset() { *m = GetTopicRequest{} } +func (m *GetTopicRequest) String() string { return proto.CompactTextString(m) } +func (*GetTopicRequest) ProtoMessage() {} +func (*GetTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +// Request for the UpdateTopic method. +type UpdateTopicRequest struct { + // The topic to update. + Topic *Topic `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + // Indicates which fields in the provided topic to update. + // Must be specified and non-empty. + UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateTopicRequest) Reset() { *m = UpdateTopicRequest{} } +func (m *UpdateTopicRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTopicRequest) ProtoMessage() {} +func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *UpdateTopicRequest) GetTopic() *Topic { + if m != nil { + return m.Topic + } + return nil +} + +func (m *UpdateTopicRequest) GetUpdateMask() *google_protobuf3.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request for the Publish method. +type PublishRequest struct { + // The messages in the request will be published on this topic. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + // The messages to publish. + Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages" json:"messages,omitempty"` +} + +func (m *PublishRequest) Reset() { *m = PublishRequest{} } +func (m *PublishRequest) String() string { return proto.CompactTextString(m) } +func (*PublishRequest) ProtoMessage() {} +func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *PublishRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *PublishRequest) GetMessages() []*PubsubMessage { + if m != nil { + return m.Messages + } + return nil +} + +// Response for the `Publish` method. +type PublishResponse struct { + // The server-assigned ID of each published message, in the same order as + // the messages in the request. IDs are guaranteed to be unique within + // the topic. + MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds" json:"message_ids,omitempty"` +} + +func (m *PublishResponse) Reset() { *m = PublishResponse{} } +func (m *PublishResponse) String() string { return proto.CompactTextString(m) } +func (*PublishResponse) ProtoMessage() {} +func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *PublishResponse) GetMessageIds() []string { + if m != nil { + return m.MessageIds + } + return nil +} + +// Request for the `ListTopics` method. +type ListTopicsRequest struct { + // The name of the cloud project that topics belong to. + // Format is `projects/{project}`. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // Maximum number of topics to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListTopicsResponse`; indicates that this is + // a continuation of a prior `ListTopics` call, and that the system should + // return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTopicsRequest) Reset() { *m = ListTopicsRequest{} } +func (m *ListTopicsRequest) String() string { return proto.CompactTextString(m) } +func (*ListTopicsRequest) ProtoMessage() {} +func (*ListTopicsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ListTopicsRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListTopicsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTopicsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListTopics` method. +type ListTopicsResponse struct { + // The resulting topics. + Topics []*Topic `protobuf:"bytes,1,rep,name=topics" json:"topics,omitempty"` + // If not empty, indicates that there may be more topics that match the + // request; this value should be passed in a new `ListTopicsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTopicsResponse) Reset() { *m = ListTopicsResponse{} } +func (m *ListTopicsResponse) String() string { return proto.CompactTextString(m) } +func (*ListTopicsResponse) ProtoMessage() {} +func (*ListTopicsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListTopicsResponse) GetTopics() []*Topic { + if m != nil { + return m.Topics + } + return nil +} + +func (m *ListTopicsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsRequest struct { + // The name of the topic that subscriptions are attached to. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + // Maximum number of subscription names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListTopicSubscriptionsResponse`; indicates + // that this is a continuation of a prior `ListTopicSubscriptions` call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTopicSubscriptionsRequest) Reset() { *m = ListTopicSubscriptionsRequest{} } +func (m *ListTopicSubscriptionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListTopicSubscriptionsRequest) ProtoMessage() {} +func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListTopicSubscriptionsRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ListTopicSubscriptionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTopicSubscriptionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsResponse struct { + // The names of the subscriptions that match the request. + Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListTopicSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTopicSubscriptionsResponse) Reset() { *m = ListTopicSubscriptionsResponse{} } +func (m *ListTopicSubscriptionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListTopicSubscriptionsResponse) ProtoMessage() {} +func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ListTopicSubscriptionsResponse) GetSubscriptions() []string { + if m != nil { + return m.Subscriptions + } + return nil +} + +func (m *ListTopicSubscriptionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `DeleteTopic` method. +type DeleteTopicRequest struct { + // Name of the topic to delete. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} } +func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicRequest) ProtoMessage() {} +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *DeleteTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +// A subscription resource. +type Subscription struct { + // The name of the subscription. It must have the format + // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must + // start with a letter, and contain only letters (`[A-Za-z]`), numbers + // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), + // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the topic from which this subscription is receiving messages. + // Format is `projects/{project}/topics/{topic}`. + // The value of this field will be `_deleted-topic_` if the topic has been + // deleted. + Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` + // If push delivery is used with this subscription, this field is + // used to configure it. An empty `pushConfig` signifies that the subscriber + // will pull and ack messages using API methods. + PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"` + // This value is the maximum time after a subscriber receives a message + // before the subscriber should acknowledge the message. After message + // delivery but before the ack deadline expires and before the message is + // acknowledged, it is an outstanding message and will not be delivered + // again during that time (on a best-effort basis). + // + // For pull subscriptions, this value is used as the initial value for the ack + // deadline. To override this value for a given message, call + // `ModifyAckDeadline` with the corresponding `ack_id` if using + // pull. + // The minimum custom deadline you can specify is 10 seconds. + // The maximum custom deadline you can specify is 600 seconds (10 minutes). + // If this parameter is 0, a default value of 10 seconds is used. + // + // For push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. + // + // If the subscriber never acknowledges the message, the Pub/Sub + // system will eventually redeliver the message. + AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"` + // Indicates whether to retain acknowledged messages. If true, then + // messages are not expunged from the subscription's backlog, even if they are + // acknowledged, until they fall out of the `message_retention_duration` + // window. + RetainAckedMessages bool `protobuf:"varint,7,opt,name=retain_acked_messages,json=retainAckedMessages" json:"retain_acked_messages,omitempty"` + // How long to retain unacknowledged messages in the subscription's backlog, + // from the moment a message is published. + // If `retain_acked_messages` is true, then this also configures the retention + // of acknowledged messages, and thus configures how far back in time a `Seek` + // can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 + // minutes. + MessageRetentionDuration *google_protobuf1.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration" json:"message_retention_duration,omitempty"` + // User labels. + Labels map[string]string `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Subscription) Reset() { *m = Subscription{} } +func (m *Subscription) String() string { return proto.CompactTextString(m) } +func (*Subscription) ProtoMessage() {} +func (*Subscription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Subscription) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Subscription) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *Subscription) GetPushConfig() *PushConfig { + if m != nil { + return m.PushConfig + } + return nil +} + +func (m *Subscription) GetAckDeadlineSeconds() int32 { + if m != nil { + return m.AckDeadlineSeconds + } + return 0 +} + +func (m *Subscription) GetRetainAckedMessages() bool { + if m != nil { + return m.RetainAckedMessages + } + return false +} + +func (m *Subscription) GetMessageRetentionDuration() *google_protobuf1.Duration { + if m != nil { + return m.MessageRetentionDuration + } + return nil +} + +func (m *Subscription) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Configuration for a push delivery endpoint. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + // For example, a Webhook endpoint might use "https://example.com/push". + PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint" json:"push_endpoint,omitempty"` + // Endpoint configuration attributes. + // + // Every endpoint has a set of API supported attributes that can be used to + // control different aspects of the message delivery. + // + // The currently supported attribute is `x-goog-version`, which you can + // use to change the format of the pushed message. This attribute + // indicates the version of the data expected by the endpoint. This + // controls the shape of the pushed message (i.e., its fields and metadata). + // The endpoint version is based on the version of the Pub/Sub API. + // + // If not present during the `CreateSubscription` call, it will default to + // the version of the API used to make such call. If not present during a + // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` + // calls will always return a valid version, even if the subscription was + // created without this attribute. + // + // The possible values for this attribute are: + // + // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. + // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *PushConfig) Reset() { *m = PushConfig{} } +func (m *PushConfig) String() string { return proto.CompactTextString(m) } +func (*PushConfig) ProtoMessage() {} +func (*PushConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *PushConfig) GetPushEndpoint() string { + if m != nil { + return m.PushEndpoint + } + return "" +} + +func (m *PushConfig) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// A message and its corresponding acknowledgment ID. +type ReceivedMessage struct { + // This ID can be used to acknowledge the received message. + AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId" json:"ack_id,omitempty"` + // The message. + Message *PubsubMessage `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *ReceivedMessage) Reset() { *m = ReceivedMessage{} } +func (m *ReceivedMessage) String() string { return proto.CompactTextString(m) } +func (*ReceivedMessage) ProtoMessage() {} +func (*ReceivedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *ReceivedMessage) GetAckId() string { + if m != nil { + return m.AckId + } + return "" +} + +func (m *ReceivedMessage) GetMessage() *PubsubMessage { + if m != nil { + return m.Message + } + return nil +} + +// Request for the GetSubscription method. +type GetSubscriptionRequest struct { + // The name of the subscription to get. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` +} + +func (m *GetSubscriptionRequest) Reset() { *m = GetSubscriptionRequest{} } +func (m *GetSubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*GetSubscriptionRequest) ProtoMessage() {} +func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *GetSubscriptionRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +// Request for the UpdateSubscription method. +type UpdateSubscriptionRequest struct { + // The updated subscription object. + Subscription *Subscription `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // Indicates which fields in the provided subscription to update. + // Must be specified and non-empty. + UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateSubscriptionRequest) Reset() { *m = UpdateSubscriptionRequest{} } +func (m *UpdateSubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSubscriptionRequest) ProtoMessage() {} +func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *UpdateSubscriptionRequest) GetSubscription() *Subscription { + if m != nil { + return m.Subscription + } + return nil +} + +func (m *UpdateSubscriptionRequest) GetUpdateMask() *google_protobuf3.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request for the `ListSubscriptions` method. +type ListSubscriptionsRequest struct { + // The name of the cloud project that subscriptions belong to. + // Format is `projects/{project}`. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // Maximum number of subscriptions to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListSubscriptionsResponse`; indicates that + // this is a continuation of a prior `ListSubscriptions` call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListSubscriptionsRequest) Reset() { *m = ListSubscriptionsRequest{} } +func (m *ListSubscriptionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSubscriptionsRequest) ProtoMessage() {} +func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ListSubscriptionsRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListSubscriptionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSubscriptionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListSubscriptions` method. +type ListSubscriptionsResponse struct { + // The subscriptions that match the request. + Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListSubscriptionsResponse) Reset() { *m = ListSubscriptionsResponse{} } +func (m *ListSubscriptionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSubscriptionsResponse) ProtoMessage() {} +func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { + if m != nil { + return m.Subscriptions + } + return nil +} + +func (m *ListSubscriptionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the DeleteSubscription method. +type DeleteSubscriptionRequest struct { + // The subscription to delete. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` +} + +func (m *DeleteSubscriptionRequest) Reset() { *m = DeleteSubscriptionRequest{} } +func (m *DeleteSubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSubscriptionRequest) ProtoMessage() {} +func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *DeleteSubscriptionRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +// Request for the ModifyPushConfig method. +type ModifyPushConfigRequest struct { + // The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The push configuration for future deliveries. + // + // An empty `pushConfig` indicates that the Pub/Sub system should + // stop pushing messages from the given subscription and allow + // messages to be pulled and acknowledged - effectively pausing + // the subscription if `Pull` is not called. + PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"` +} + +func (m *ModifyPushConfigRequest) Reset() { *m = ModifyPushConfigRequest{} } +func (m *ModifyPushConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyPushConfigRequest) ProtoMessage() {} +func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ModifyPushConfigRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *ModifyPushConfigRequest) GetPushConfig() *PushConfig { + if m != nil { + return m.PushConfig + } + return nil +} + +// Request for the `Pull` method. +type PullRequest struct { + // The subscription from which messages should be pulled. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // If this field set to true, the system will respond immediately even if + // it there are no messages available to return in the `Pull` response. + // Otherwise, the system may wait (for a bounded amount of time) until at + // least one message is available, rather than returning no messages. The + // client may cancel the request if it does not wish to wait any longer for + // the response. + ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately" json:"return_immediately,omitempty"` + // The maximum number of messages returned for this request. The Pub/Sub + // system may return fewer than the number specified. + MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages" json:"max_messages,omitempty"` +} + +func (m *PullRequest) Reset() { *m = PullRequest{} } +func (m *PullRequest) String() string { return proto.CompactTextString(m) } +func (*PullRequest) ProtoMessage() {} +func (*PullRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *PullRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *PullRequest) GetReturnImmediately() bool { + if m != nil { + return m.ReturnImmediately + } + return false +} + +func (m *PullRequest) GetMaxMessages() int32 { + if m != nil { + return m.MaxMessages + } + return 0 +} + +// Response for the `Pull` method. +type PullResponse struct { + // Received Pub/Sub messages. The Pub/Sub system will return zero messages if + // there are no more available in the backlog. The Pub/Sub system may return + // fewer than the `maxMessages` requested even if there are more messages + // available in the backlog. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *PullResponse) Reset() { *m = PullResponse{} } +func (m *PullResponse) String() string { return proto.CompactTextString(m) } +func (*PullResponse) ProtoMessage() {} +func (*PullResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *PullResponse) GetReceivedMessages() []*ReceivedMessage { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +// Request for the ModifyAckDeadline method. +type ModifyAckDeadlineRequest struct { + // The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // List of acknowledgment IDs. + AckIds []string `protobuf:"bytes,4,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"` + // The new ack deadline with respect to the time this request was sent to + // the Pub/Sub system. For example, if the value is 10, the new + // ack deadline will expire 10 seconds after the `ModifyAckDeadline` call + // was made. Specifying zero may immediately make the message available for + // another pull request. + // The minimum deadline you can specify is 0 seconds. + // The maximum deadline you can specify is 600 seconds (10 minutes). + AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"` +} + +func (m *ModifyAckDeadlineRequest) Reset() { *m = ModifyAckDeadlineRequest{} } +func (m *ModifyAckDeadlineRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyAckDeadlineRequest) ProtoMessage() {} +func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *ModifyAckDeadlineRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *ModifyAckDeadlineRequest) GetAckIds() []string { + if m != nil { + return m.AckIds + } + return nil +} + +func (m *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 { + if m != nil { + return m.AckDeadlineSeconds + } + return 0 +} + +// Request for the Acknowledge method. +type AcknowledgeRequest struct { + // The subscription whose message is being acknowledged. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The acknowledgment ID for the messages being acknowledged that was returned + // by the Pub/Sub system in the `Pull` response. Must not be empty. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"` +} + +func (m *AcknowledgeRequest) Reset() { *m = AcknowledgeRequest{} } +func (m *AcknowledgeRequest) String() string { return proto.CompactTextString(m) } +func (*AcknowledgeRequest) ProtoMessage() {} +func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *AcknowledgeRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *AcknowledgeRequest) GetAckIds() []string { + if m != nil { + return m.AckIds + } + return nil +} + +// Request for the `StreamingPull` streaming RPC method. This request is used to +// establish the initial stream as well as to stream acknowledgements and ack +// deadline modifications from the client to the server. +type StreamingPullRequest struct { + // The subscription for which to initialize the new stream. This must be + // provided in the first request on the stream, and must not be set in + // subsequent requests from client to server. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // List of acknowledgement IDs for acknowledging previously received messages + // (received on this stream or a different stream). If an ack ID has expired, + // the corresponding message may be redelivered later. Acknowledging a message + // more than once will not result in an error. If the acknowledgement ID is + // malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"` + // The list of new ack deadlines for the IDs listed in + // `modify_deadline_ack_ids`. The size of this list must be the same as the + // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted + // with `INVALID_ARGUMENT`. Each element in this list is applied to the + // element in the same position in `modify_deadline_ack_ids`. The new ack + // deadline is with respect to the time this request was sent to the Pub/Sub + // system. Must be >= 0. For example, if the value is 10, the new ack deadline + // will expire 10 seconds after this request is received. If the value is 0, + // the message is immediately made available for another streaming or + // non-streaming pull request. If the value is < 0 (an error), the stream will + // be aborted with status `INVALID_ARGUMENT`. + ModifyDeadlineSeconds []int32 `protobuf:"varint,3,rep,packed,name=modify_deadline_seconds,json=modifyDeadlineSeconds" json:"modify_deadline_seconds,omitempty"` + // List of acknowledgement IDs whose deadline will be modified based on the + // corresponding element in `modify_deadline_seconds`. This field can be used + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. + ModifyDeadlineAckIds []string `protobuf:"bytes,4,rep,name=modify_deadline_ack_ids,json=modifyDeadlineAckIds" json:"modify_deadline_ack_ids,omitempty"` + // The ack deadline to use for the stream. This must be provided in the + // first request on the stream, but it can also be updated on subsequent + // requests from client to server. The minimum deadline you can specify is 10 + // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). + StreamAckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=stream_ack_deadline_seconds,json=streamAckDeadlineSeconds" json:"stream_ack_deadline_seconds,omitempty"` +} + +func (m *StreamingPullRequest) Reset() { *m = StreamingPullRequest{} } +func (m *StreamingPullRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingPullRequest) ProtoMessage() {} +func (*StreamingPullRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *StreamingPullRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *StreamingPullRequest) GetAckIds() []string { + if m != nil { + return m.AckIds + } + return nil +} + +func (m *StreamingPullRequest) GetModifyDeadlineSeconds() []int32 { + if m != nil { + return m.ModifyDeadlineSeconds + } + return nil +} + +func (m *StreamingPullRequest) GetModifyDeadlineAckIds() []string { + if m != nil { + return m.ModifyDeadlineAckIds + } + return nil +} + +func (m *StreamingPullRequest) GetStreamAckDeadlineSeconds() int32 { + if m != nil { + return m.StreamAckDeadlineSeconds + } + return 0 +} + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +type StreamingPullResponse struct { + // Received Pub/Sub messages. This will not be empty. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *StreamingPullResponse) Reset() { *m = StreamingPullResponse{} } +func (m *StreamingPullResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingPullResponse) ProtoMessage() {} +func (*StreamingPullResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +// Request for the `CreateSnapshot` method. +type CreateSnapshotRequest struct { + // Optional user-provided name for this snapshot. + // If the name is not provided in the request, the server will assign a random + // name for this snapshot on the same project as the subscription. + // Note that for REST API requests, you must specify a name. + // Format is `projects/{project}/snapshots/{snap}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The subscription whose backlog the snapshot retains. + // Specifically, the created snapshot is guaranteed to retain: + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,2,opt,name=subscription" json:"subscription,omitempty"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +// Request for the UpdateSnapshot method. +type UpdateSnapshotRequest struct { + // The updated snpashot object. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + // Indicates which fields in the provided snapshot to update. + // Must be specified and non-empty. + UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateSnapshotRequest) Reset() { *m = UpdateSnapshotRequest{} } +func (m *UpdateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSnapshotRequest) ProtoMessage() {} +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *UpdateSnapshotRequest) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *UpdateSnapshotRequest) GetUpdateMask() *google_protobuf3.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// A snapshot resource. +type Snapshot struct { + // The name of the snapshot. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the topic from which this snapshot is retaining messages. + Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` + // The snapshot is guaranteed to exist up until this time. + // A newly-created snapshot expires no later than 7 days from the time of its + // creation. Its exact lifetime is determined at creation by the existing + // backlog in the source subscription. Specifically, the lifetime of the + // snapshot is `7 days - (age of oldest unacked message in the subscription)`. + // For example, consider a subscription whose oldest unacked message is 3 days + // old. If a snapshot is created from this subscription, the snapshot -- which + // will always capture this 3-day-old backlog as long as the snapshot + // exists -- will expire in 4 days. + ExpireTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime" json:"expire_time,omitempty"` + // User labels. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *Snapshot) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Snapshot) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *Snapshot) GetExpireTime() *google_protobuf4.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +func (m *Snapshot) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Request for the `ListSnapshots` method. +type ListSnapshotsRequest struct { + // The name of the cloud project that snapshots belong to. + // Format is `projects/{project}`. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // Maximum number of snapshots to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListSnapshotsResponse`; indicates that this + // is a continuation of a prior `ListSnapshots` call, and that the system + // should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *ListSnapshotsRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListSnapshotsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSnapshotsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the `ListSnapshots` method. +type ListSnapshotsResponse struct { + // The resulting snapshots. + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots" json:"snapshots,omitempty"` + // If not empty, indicates that there may be more snapshot that match the + // request; this value should be passed in a new `ListSnapshotsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *ListSnapshotsResponse) GetSnapshots() []*Snapshot { + if m != nil { + return m.Snapshots + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the `DeleteSnapshot` method. +type DeleteSnapshotRequest struct { + // The name of the snapshot to delete. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *DeleteSnapshotRequest) GetSnapshot() string { + if m != nil { + return m.Snapshot + } + return "" +} + +// Request for the `Seek` method. +type SeekRequest struct { + // The subscription to affect. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // Types that are valid to be assigned to Target: + // *SeekRequest_Time + // *SeekRequest_Snapshot + Target isSeekRequest_Target `protobuf_oneof:"target"` +} + +func (m *SeekRequest) Reset() { *m = SeekRequest{} } +func (m *SeekRequest) String() string { return proto.CompactTextString(m) } +func (*SeekRequest) ProtoMessage() {} +func (*SeekRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +type isSeekRequest_Target interface { + isSeekRequest_Target() +} + +type SeekRequest_Time struct { + Time *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=time,oneof"` +} +type SeekRequest_Snapshot struct { + Snapshot string `protobuf:"bytes,3,opt,name=snapshot,oneof"` +} + +func (*SeekRequest_Time) isSeekRequest_Target() {} +func (*SeekRequest_Snapshot) isSeekRequest_Target() {} + +func (m *SeekRequest) GetTarget() isSeekRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SeekRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *SeekRequest) GetTime() *google_protobuf4.Timestamp { + if x, ok := m.GetTarget().(*SeekRequest_Time); ok { + return x.Time + } + return nil +} + +func (m *SeekRequest) GetSnapshot() string { + if x, ok := m.GetTarget().(*SeekRequest_Snapshot); ok { + return x.Snapshot + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SeekRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SeekRequest_OneofMarshaler, _SeekRequest_OneofUnmarshaler, _SeekRequest_OneofSizer, []interface{}{ + (*SeekRequest_Time)(nil), + (*SeekRequest_Snapshot)(nil), + } +} + +func _SeekRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SeekRequest) + // target + switch x := m.Target.(type) { + case *SeekRequest_Time: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Time); err != nil { + return err + } + case *SeekRequest_Snapshot: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Snapshot) + case nil: + default: + return fmt.Errorf("SeekRequest.Target has unexpected type %T", x) + } + return nil +} + +func _SeekRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SeekRequest) + switch tag { + case 2: // target.time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf4.Timestamp) + err := b.DecodeMessage(msg) + m.Target = &SeekRequest_Time{msg} + return true, err + case 3: // target.snapshot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Target = &SeekRequest_Snapshot{x} + return true, err + default: + return false, nil + } +} + +func _SeekRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SeekRequest) + // target + switch x := m.Target.(type) { + case *SeekRequest_Time: + s := proto.Size(x.Time) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SeekRequest_Snapshot: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Snapshot))) + n += len(x.Snapshot) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SeekResponse struct { +} + +func (m *SeekResponse) Reset() { *m = SeekResponse{} } +func (m *SeekResponse) String() string { return proto.CompactTextString(m) } +func (*SeekResponse) ProtoMessage() {} +func (*SeekResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func init() { + proto.RegisterType((*Topic)(nil), "google.pubsub.v1.Topic") + proto.RegisterType((*PubsubMessage)(nil), "google.pubsub.v1.PubsubMessage") + proto.RegisterType((*GetTopicRequest)(nil), "google.pubsub.v1.GetTopicRequest") + proto.RegisterType((*UpdateTopicRequest)(nil), "google.pubsub.v1.UpdateTopicRequest") + proto.RegisterType((*PublishRequest)(nil), "google.pubsub.v1.PublishRequest") + proto.RegisterType((*PublishResponse)(nil), "google.pubsub.v1.PublishResponse") + proto.RegisterType((*ListTopicsRequest)(nil), "google.pubsub.v1.ListTopicsRequest") + proto.RegisterType((*ListTopicsResponse)(nil), "google.pubsub.v1.ListTopicsResponse") + proto.RegisterType((*ListTopicSubscriptionsRequest)(nil), "google.pubsub.v1.ListTopicSubscriptionsRequest") + proto.RegisterType((*ListTopicSubscriptionsResponse)(nil), "google.pubsub.v1.ListTopicSubscriptionsResponse") + proto.RegisterType((*DeleteTopicRequest)(nil), "google.pubsub.v1.DeleteTopicRequest") + proto.RegisterType((*Subscription)(nil), "google.pubsub.v1.Subscription") + proto.RegisterType((*PushConfig)(nil), "google.pubsub.v1.PushConfig") + proto.RegisterType((*ReceivedMessage)(nil), "google.pubsub.v1.ReceivedMessage") + proto.RegisterType((*GetSubscriptionRequest)(nil), "google.pubsub.v1.GetSubscriptionRequest") + proto.RegisterType((*UpdateSubscriptionRequest)(nil), "google.pubsub.v1.UpdateSubscriptionRequest") + proto.RegisterType((*ListSubscriptionsRequest)(nil), "google.pubsub.v1.ListSubscriptionsRequest") + proto.RegisterType((*ListSubscriptionsResponse)(nil), "google.pubsub.v1.ListSubscriptionsResponse") + proto.RegisterType((*DeleteSubscriptionRequest)(nil), "google.pubsub.v1.DeleteSubscriptionRequest") + proto.RegisterType((*ModifyPushConfigRequest)(nil), "google.pubsub.v1.ModifyPushConfigRequest") + proto.RegisterType((*PullRequest)(nil), "google.pubsub.v1.PullRequest") + proto.RegisterType((*PullResponse)(nil), "google.pubsub.v1.PullResponse") + proto.RegisterType((*ModifyAckDeadlineRequest)(nil), "google.pubsub.v1.ModifyAckDeadlineRequest") + proto.RegisterType((*AcknowledgeRequest)(nil), "google.pubsub.v1.AcknowledgeRequest") + proto.RegisterType((*StreamingPullRequest)(nil), "google.pubsub.v1.StreamingPullRequest") + proto.RegisterType((*StreamingPullResponse)(nil), "google.pubsub.v1.StreamingPullResponse") + proto.RegisterType((*CreateSnapshotRequest)(nil), "google.pubsub.v1.CreateSnapshotRequest") + proto.RegisterType((*UpdateSnapshotRequest)(nil), "google.pubsub.v1.UpdateSnapshotRequest") + proto.RegisterType((*Snapshot)(nil), "google.pubsub.v1.Snapshot") + proto.RegisterType((*ListSnapshotsRequest)(nil), "google.pubsub.v1.ListSnapshotsRequest") + proto.RegisterType((*ListSnapshotsResponse)(nil), "google.pubsub.v1.ListSnapshotsResponse") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "google.pubsub.v1.DeleteSnapshotRequest") + proto.RegisterType((*SeekRequest)(nil), "google.pubsub.v1.SeekRequest") + proto.RegisterType((*SeekResponse)(nil), "google.pubsub.v1.SeekResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Subscriber service + +type SubscriberClient interface { + // Creates a subscription to a given topic. + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the + // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). + // The generated name is populated in the returned Subscription object. + // Note that for REST API requests, you must specify a name in the request. + CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Updates an existing subscription. Note that certain properties of a + // subscription, such as its topic, are not modifiable. + // NOTE: The style guide requires body: "subscription" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Pulls messages from the server. Returns an empty list if there are no + // messages available in the backlog. The server may return `UNAVAILABLE` if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) + // (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will + // respond with UNIMPLEMENTED errors unless you have been invited to test + // this feature. Contact cloud-pubsub@google.com with any questions. + // + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgements and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `OK` to reassign + // server-side resources, in which case, the client should re-establish the + // stream. `UNAVAILABLE` may also be returned in the case of a transient error + // (e.g., a server restart). These should also be retried by the client. Flow + // control can be achieved by configuring the underlying RPC channel. + StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Lists the existing snapshots. + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the + // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). + // The generated name is populated in the returned Snapshot object. + // Note that for REST API requests, you must specify a name in the request. + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Updates an existing snapshot. Note that certain properties of a snapshot + // are not modifiable. + // NOTE: The style guide requires body: "snapshot" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Removes an existing snapshot. All messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. + Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) +} + +type subscriberClient struct { + cc *grpc.ClientConn +} + +func NewSubscriberClient(cc *grpc.ClientConn) SubscriberClient { + return &subscriberClient{cc} +} + +func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) { + out := new(ListSubscriptionsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSubscriptions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyAckDeadline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Acknowledge", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { + out := new(PullResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Pull", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Subscriber_serviceDesc.Streams[0], c.cc, "/google.pubsub.v1.Subscriber/StreamingPull", opts...) + if err != nil { + return nil, err + } + x := &subscriberStreamingPullClient{stream} + return x, nil +} + +type Subscriber_StreamingPullClient interface { + Send(*StreamingPullRequest) error + Recv() (*StreamingPullResponse, error) + grpc.ClientStream +} + +type subscriberStreamingPullClient struct { + grpc.ClientStream +} + +func (x *subscriberStreamingPullClient) Send(m *StreamingPullRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *subscriberStreamingPullClient) Recv() (*StreamingPullResponse, error) { + m := new(StreamingPullResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyPushConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSnapshots", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) { + out := new(SeekResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Seek", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Subscriber service + +type SubscriberServer interface { + // Creates a subscription to a given topic. + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the + // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). + // The generated name is populated in the returned Subscription object. + // Note that for REST API requests, you must specify a name in the request. + CreateSubscription(context.Context, *Subscription) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) + // Updates an existing subscription. Note that certain properties of a + // subscription, such as its topic, are not modifiable. + // NOTE: The style guide requires body: "subscription" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*google_protobuf2.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*google_protobuf2.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(context.Context, *AcknowledgeRequest) (*google_protobuf2.Empty, error) + // Pulls messages from the server. Returns an empty list if there are no + // messages available in the backlog. The server may return `UNAVAILABLE` if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(context.Context, *PullRequest) (*PullResponse, error) + // (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will + // respond with UNIMPLEMENTED errors unless you have been invited to test + // this feature. Contact cloud-pubsub@google.com with any questions. + // + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgements and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `OK` to reassign + // server-side resources, in which case, the client should re-establish the + // stream. `UNAVAILABLE` may also be returned in the case of a transient error + // (e.g., a server restart). These should also be retried by the client. Flow + // control can be achieved by configuring the underlying RPC channel. + StreamingPull(Subscriber_StreamingPullServer) error + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*google_protobuf2.Empty, error) + // Lists the existing snapshots. + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the + // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). + // The generated name is populated in the returned Snapshot object. + // Note that for REST API requests, you must specify a name in the request. + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) + // Updates an existing snapshot. Note that certain properties of a snapshot + // are not modifiable. + // NOTE: The style guide requires body: "snapshot" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) + // Removes an existing snapshot. All messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*google_protobuf2.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. + Seek(context.Context, *SeekRequest) (*SeekResponse, error) +} + +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + s.RegisterService(&_Subscriber_serviceDesc, srv) +} + +func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Subscription) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/GetSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSubscription(ctx, req.(*UpdateSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyAckDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyAckDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AcknowledgeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Acknowledge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Acknowledge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Pull(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Pull", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_StreamingPull_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SubscriberServer).StreamingPull(&subscriberStreamingPullServer{stream}) +} + +type Subscriber_StreamingPullServer interface { + Send(*StreamingPullResponse) error + Recv() (*StreamingPullRequest, error) + grpc.ServerStream +} + +type subscriberStreamingPullServer struct { + grpc.ServerStream +} + +func (x *subscriberStreamingPullServer) Send(m *StreamingPullResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *subscriberStreamingPullServer) Recv() (*StreamingPullRequest, error) { + m := new(StreamingPullRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyPushConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyPushConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyPushConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSnapshot(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Seek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeekRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Seek(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Seek", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Seek(ctx, req.(*SeekRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Subscriber_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Subscriber", + HandlerType: (*SubscriberServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSubscription", + Handler: _Subscriber_CreateSubscription_Handler, + }, + { + MethodName: "GetSubscription", + Handler: _Subscriber_GetSubscription_Handler, + }, + { + MethodName: "UpdateSubscription", + Handler: _Subscriber_UpdateSubscription_Handler, + }, + { + MethodName: "ListSubscriptions", + Handler: _Subscriber_ListSubscriptions_Handler, + }, + { + MethodName: "DeleteSubscription", + Handler: _Subscriber_DeleteSubscription_Handler, + }, + { + MethodName: "ModifyAckDeadline", + Handler: _Subscriber_ModifyAckDeadline_Handler, + }, + { + MethodName: "Acknowledge", + Handler: _Subscriber_Acknowledge_Handler, + }, + { + MethodName: "Pull", + Handler: _Subscriber_Pull_Handler, + }, + { + MethodName: "ModifyPushConfig", + Handler: _Subscriber_ModifyPushConfig_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Subscriber_ListSnapshots_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Subscriber_CreateSnapshot_Handler, + }, + { + MethodName: "UpdateSnapshot", + Handler: _Subscriber_UpdateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Subscriber_DeleteSnapshot_Handler, + }, + { + MethodName: "Seek", + Handler: _Subscriber_Seek_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingPull", + Handler: _Subscriber_StreamingPull_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/pubsub/v1/pubsub.proto", +} + +// Client API for Publisher service + +type PublisherClient interface { + // Creates the given topic with the given name. + CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) + // Updates an existing topic. Note that certain properties of a topic are not + // modifiable. Options settings follow the style guide: + // NOTE: The style guide requires body: "topic" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. The message payload must not be empty; it must contain + // either a non-empty data field, or at least one attribute. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Lists matching topics. + ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) + // Lists the name of the subscriptions for this topic. + ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type publisherClient struct { + cc *grpc.ClientConn +} + +func NewPublisherClient(cc *grpc.ClientConn) PublisherClient { + return &publisherClient{cc} +} + +func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/CreateTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/UpdateTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) { + out := new(PublishResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/Publish", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/GetTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { + out := new(ListTopicsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopics", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) { + out := new(ListTopicSubscriptionsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSubscriptions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1.Publisher/DeleteTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Publisher service + +type PublisherServer interface { + // Creates the given topic with the given name. + CreateTopic(context.Context, *Topic) (*Topic, error) + // Updates an existing topic. Note that certain properties of a topic are not + // modifiable. Options settings follow the style guide: + // NOTE: The style guide requires body: "topic" instead of body: "*". + // Keeping the latter for internal consistency in V1, however it should be + // corrected in V2. See + // https://cloud.google.com/apis/design/standard_methods#update for details. + UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. The message payload must not be empty; it must contain + // either a non-empty data field, or at least one attribute. + Publish(context.Context, *PublishRequest) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(context.Context, *GetTopicRequest) (*Topic, error) + // Lists matching topics. + ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) + // Lists the name of the subscriptions for this topic. + ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(context.Context, *DeleteTopicRequest) (*google_protobuf2.Empty, error) +} + +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + s.RegisterService(&_Publisher_serviceDesc, srv) +} + +func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Topic) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).CreateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/CreateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_UpdateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).UpdateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/UpdateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).UpdateTopic(ctx, req.(*UpdateTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).GetTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/GetTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopicSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Publisher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Publisher", + HandlerType: (*PublisherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTopic", + Handler: _Publisher_CreateTopic_Handler, + }, + { + MethodName: "UpdateTopic", + Handler: _Publisher_UpdateTopic_Handler, + }, + { + MethodName: "Publish", + Handler: _Publisher_Publish_Handler, + }, + { + MethodName: "GetTopic", + Handler: _Publisher_GetTopic_Handler, + }, + { + MethodName: "ListTopics", + Handler: _Publisher_ListTopics_Handler, + }, + { + MethodName: "ListTopicSubscriptions", + Handler: _Publisher_ListTopicSubscriptions_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _Publisher_DeleteTopic_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/pubsub/v1/pubsub.proto", +} + +func init() { proto.RegisterFile("google/pubsub/v1/pubsub.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2011 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xcb, 0x73, 0xdb, 0xc6, + 0x19, 0xcf, 0x92, 0x7a, 0x50, 0x1f, 0xf4, 0xf2, 0x56, 0xb2, 0x69, 0xf8, 0x25, 0xc1, 0x8a, 0x45, + 0x33, 0x36, 0x29, 0x33, 0x13, 0x37, 0xb6, 0x2a, 0x67, 0x24, 0xcb, 0x75, 0xdc, 0xb1, 0x1b, 0x15, + 0x72, 0xdb, 0x99, 0x1e, 0xca, 0x01, 0x89, 0x35, 0x8d, 0x90, 0x04, 0x10, 0x00, 0x54, 0xad, 0xb4, + 0x9e, 0x49, 0x93, 0x4e, 0x67, 0x3a, 0xf5, 0xa1, 0x69, 0x6e, 0x9d, 0x1c, 0x3a, 0xd3, 0x5b, 0x8f, + 0x9d, 0xe9, 0xb5, 0xff, 0x44, 0xff, 0x85, 0x1e, 0x7b, 0x6f, 0x8f, 0x99, 0x7d, 0x00, 0xc4, 0x63, + 0x41, 0x8a, 0xb2, 0x7d, 0x03, 0xf6, 0xfb, 0x76, 0xbf, 0xdf, 0xf7, 0xfe, 0x80, 0x85, 0x4b, 0x1d, + 0xc7, 0xe9, 0xf4, 0x48, 0xdd, 0x1d, 0xb4, 0xfc, 0x41, 0xab, 0x7e, 0x74, 0x4b, 0x3c, 0xd5, 0x5c, + 0xcf, 0x09, 0x1c, 0xbc, 0xcc, 0xc9, 0x35, 0xb1, 0x78, 0x74, 0x4b, 0xbd, 0x28, 0x36, 0x18, 0xae, + 0x55, 0x37, 0x6c, 0xdb, 0x09, 0x8c, 0xc0, 0x72, 0x6c, 0x9f, 0xf3, 0xab, 0x97, 0xc3, 0xe3, 0xe8, + 0x5b, 0x6b, 0xf0, 0xac, 0x6e, 0x0e, 0x3c, 0xc6, 0x20, 0xe8, 0x17, 0xd2, 0x74, 0xd2, 0x77, 0x83, + 0x63, 0x41, 0x5c, 0x4b, 0x13, 0x9f, 0x59, 0xa4, 0x67, 0x36, 0xfb, 0x86, 0xdf, 0x15, 0x1c, 0x57, + 0xd2, 0x1c, 0x81, 0xd5, 0x27, 0x7e, 0x60, 0xf4, 0x5d, 0xce, 0xa0, 0x7d, 0x83, 0x60, 0xfa, 0xa9, + 0xe3, 0x5a, 0x6d, 0x8c, 0x61, 0xca, 0x36, 0xfa, 0xa4, 0x8c, 0xd6, 0x50, 0x65, 0x4e, 0x67, 0xcf, + 0x78, 0x1b, 0x66, 0x7a, 0x46, 0x8b, 0xf4, 0xfc, 0x72, 0x61, 0xad, 0x58, 0x51, 0x1a, 0x57, 0x6b, + 0x69, 0xf5, 0x6a, 0x6c, 0x73, 0xed, 0x31, 0xe3, 0x7a, 0x60, 0x07, 0xde, 0xb1, 0x2e, 0xb6, 0xa8, + 0x77, 0x40, 0x89, 0x2d, 0xe3, 0x65, 0x28, 0x76, 0xc9, 0xb1, 0x38, 0x9e, 0x3e, 0xe2, 0x15, 0x98, + 0x3e, 0x32, 0x7a, 0x03, 0x52, 0x2e, 0xb0, 0x35, 0xfe, 0x72, 0xb7, 0xf0, 0x21, 0xd2, 0xbe, 0x2e, + 0xc0, 0xc2, 0x01, 0x13, 0xf1, 0x84, 0xf8, 0xbe, 0xd1, 0x21, 0x14, 0x9d, 0x69, 0x04, 0x06, 0xdb, + 0x3e, 0xaf, 0xb3, 0x67, 0xfc, 0x09, 0x80, 0x11, 0x04, 0x9e, 0xd5, 0x1a, 0x04, 0x24, 0x44, 0x58, + 0xcf, 0x22, 0x4c, 0x1c, 0x54, 0xdb, 0x8d, 0x76, 0x70, 0xb4, 0xb1, 0x23, 0xf0, 0x25, 0x80, 0x3e, + 0x67, 0x6b, 0x5a, 0x66, 0xb9, 0xc8, 0x50, 0xcd, 0x89, 0x95, 0x47, 0x26, 0xde, 0x81, 0x79, 0x77, + 0xd0, 0xea, 0x59, 0xfe, 0xf3, 0x26, 0x35, 0x63, 0x79, 0x6a, 0x0d, 0x55, 0x94, 0x86, 0x1a, 0x49, + 0x14, 0x36, 0xae, 0x3d, 0x0d, 0x6d, 0xac, 0x2b, 0x82, 0x9f, 0xae, 0xa8, 0x3b, 0xb0, 0x94, 0x12, + 0x3e, 0x91, 0x4d, 0x36, 0x61, 0xe9, 0x21, 0x09, 0x98, 0xb9, 0x75, 0xf2, 0xd9, 0x80, 0xf8, 0x01, + 0x65, 0x0e, 0xe8, 0xbb, 0x38, 0x80, 0xbf, 0x68, 0x5f, 0x20, 0xc0, 0x3f, 0x75, 0x4d, 0x23, 0x20, + 0x09, 0xe6, 0x9b, 0x71, 0x66, 0xa5, 0x71, 0x2e, 0xc7, 0x95, 0xe2, 0x14, 0xbc, 0x0d, 0xca, 0x80, + 0x1d, 0xc2, 0xc2, 0x89, 0xc1, 0x91, 0xe9, 0xfa, 0x43, 0x1a, 0x71, 0x4f, 0x0c, 0xbf, 0xab, 0x03, + 0x67, 0xa7, 0xcf, 0x5a, 0x1b, 0x16, 0x0f, 0xb8, 0xe6, 0x23, 0xa1, 0xe2, 0x6d, 0x28, 0x09, 0xf3, + 0x86, 0xfe, 0xbb, 0x32, 0xc6, 0x7f, 0x7a, 0xb4, 0x41, 0x6b, 0xc0, 0x52, 0x24, 0xc4, 0x77, 0x1d, + 0xdb, 0x27, 0xf8, 0x0a, 0x28, 0x43, 0x07, 0xfa, 0x65, 0xb4, 0x56, 0xac, 0xcc, 0xe9, 0x10, 0x79, + 0xd0, 0xd7, 0x2c, 0x38, 0xf3, 0xd8, 0xf2, 0xb9, 0x15, 0xfd, 0x10, 0x5b, 0x19, 0x66, 0x5d, 0xcf, + 0xf9, 0x94, 0xb4, 0x03, 0x81, 0x2e, 0x7c, 0xc5, 0x17, 0x60, 0xce, 0xa5, 0x87, 0xf9, 0xd6, 0xe7, + 0xdc, 0x23, 0xd3, 0x7a, 0x89, 0x2e, 0x1c, 0x5a, 0x9f, 0x13, 0x1a, 0x2d, 0x8c, 0x18, 0x38, 0x5d, + 0x62, 0x87, 0xd1, 0x42, 0x57, 0x9e, 0xd2, 0x05, 0xad, 0x0f, 0x38, 0x2e, 0x4a, 0x20, 0xac, 0xc3, + 0x0c, 0x53, 0x9d, 0x83, 0x1b, 0xe1, 0x06, 0xc1, 0x86, 0xaf, 0xc1, 0x92, 0x4d, 0x5e, 0x04, 0xcd, + 0x98, 0x28, 0x1e, 0x1a, 0x0b, 0x74, 0xf9, 0x20, 0x12, 0xf7, 0x19, 0x5c, 0x8a, 0xc4, 0x1d, 0x0e, + 0x5a, 0x7e, 0xdb, 0xb3, 0x5c, 0x56, 0x68, 0x46, 0x7b, 0xe0, 0x75, 0x34, 0xb4, 0xe1, 0x72, 0x9e, + 0x48, 0xa1, 0xed, 0x06, 0x2c, 0xf8, 0x71, 0x82, 0xf0, 0x48, 0x72, 0xf1, 0xc4, 0x2a, 0x56, 0x01, + 0xef, 0x93, 0x1e, 0x49, 0xc5, 0xb5, 0x3c, 0x09, 0xfe, 0x59, 0x84, 0xf9, 0x38, 0x26, 0x69, 0x79, + 0x8b, 0xb6, 0x16, 0xe2, 0x26, 0xd9, 0x01, 0xc5, 0x1d, 0xf8, 0xcf, 0x9b, 0x6d, 0xc7, 0x7e, 0x66, + 0x75, 0x44, 0x96, 0x5f, 0x94, 0xc5, 0xa5, 0xff, 0xfc, 0x3e, 0xe3, 0xd1, 0xc1, 0x8d, 0x9e, 0xf1, + 0x16, 0xac, 0x18, 0xed, 0x6e, 0xd3, 0x24, 0x86, 0xd9, 0xb3, 0x6c, 0xd2, 0xf4, 0x49, 0xdb, 0xb1, + 0x4d, 0xbf, 0x3c, 0xcd, 0x8c, 0x8b, 0x8d, 0x76, 0x77, 0x5f, 0x90, 0x0e, 0x39, 0x05, 0x37, 0x60, + 0xd5, 0x23, 0x81, 0x61, 0xd9, 0x4d, 0xa3, 0xdd, 0x25, 0x66, 0x33, 0x4a, 0x89, 0xd9, 0x35, 0x54, + 0x29, 0xe9, 0xdf, 0xe3, 0xc4, 0x5d, 0x4a, 0x13, 0x59, 0xe0, 0xe3, 0x9f, 0x83, 0x1a, 0x46, 0xba, + 0x47, 0x02, 0x62, 0x53, 0x1d, 0x9b, 0x61, 0xef, 0x28, 0x97, 0x18, 0xe6, 0xf3, 0x99, 0x6c, 0xdd, + 0x17, 0x0c, 0x7a, 0x59, 0x6c, 0xd6, 0xc3, 0xbd, 0x21, 0x05, 0xef, 0x45, 0x25, 0x7f, 0x8e, 0x05, + 0x68, 0x35, 0xab, 0x78, 0xdc, 0xae, 0x6f, 0xba, 0xf2, 0xff, 0x0b, 0x01, 0x0c, 0x0d, 0x8b, 0xaf, + 0xc2, 0x02, 0xf3, 0x05, 0xb1, 0x4d, 0xd7, 0xb1, 0xec, 0x30, 0x41, 0xe7, 0xe9, 0xe2, 0x03, 0xb1, + 0x86, 0x1f, 0x4b, 0xfa, 0xc0, 0x8d, 0x51, 0xfe, 0x1a, 0xd5, 0x04, 0x5e, 0xb7, 0x4c, 0xb7, 0x61, + 0x49, 0x27, 0x6d, 0x62, 0x1d, 0x45, 0xce, 0xc2, 0xab, 0x30, 0x43, 0x23, 0xc2, 0x32, 0xc3, 0x10, + 0x35, 0xda, 0xdd, 0x47, 0x26, 0xbe, 0x03, 0xb3, 0xc2, 0x0b, 0xa2, 0xba, 0x8e, 0xad, 0x7d, 0x21, + 0xbf, 0xf6, 0x03, 0x38, 0xfb, 0x90, 0x04, 0x71, 0x3f, 0x84, 0xd9, 0xa0, 0xc1, 0x7c, 0x3c, 0xb9, + 0x42, 0x7b, 0xc5, 0xd7, 0xb4, 0x6f, 0x11, 0x9c, 0xe7, 0x0d, 0x42, 0x76, 0xc2, 0x9e, 0xe4, 0x04, + 0xa5, 0x71, 0x79, 0x74, 0x18, 0x24, 0x25, 0xbc, 0x5e, 0xf3, 0x70, 0xa1, 0x4c, 0xcb, 0x8a, 0xb4, + 0x88, 0xbd, 0x9d, 0x52, 0xfd, 0x07, 0x04, 0xe7, 0x25, 0x22, 0x45, 0x11, 0xdb, 0x97, 0x15, 0xb1, + 0xf1, 0x16, 0x39, 0x65, 0x91, 0xfb, 0x08, 0xce, 0xf3, 0x22, 0x77, 0x5a, 0xef, 0xfe, 0x06, 0xce, + 0x3d, 0x71, 0x4c, 0xeb, 0xd9, 0x71, 0xac, 0x3e, 0x9d, 0x7c, 0x7b, 0xba, 0xfa, 0x15, 0x26, 0xab, + 0x7e, 0xda, 0x57, 0x08, 0x94, 0x83, 0x41, 0xaf, 0x37, 0x89, 0xc8, 0x9b, 0x80, 0x3d, 0x12, 0x0c, + 0x3c, 0xbb, 0x69, 0xf5, 0xfb, 0xc4, 0xb4, 0x8c, 0x80, 0xf4, 0x8e, 0x99, 0xe4, 0x92, 0x7e, 0x86, + 0x53, 0x1e, 0x0d, 0x09, 0x78, 0x1d, 0xe6, 0xfb, 0xc6, 0x8b, 0x61, 0x95, 0x2c, 0x32, 0x67, 0x2b, + 0x7d, 0xe3, 0x45, 0x58, 0x1d, 0xb5, 0x5f, 0xc2, 0x3c, 0x07, 0x21, 0x5c, 0xf8, 0x63, 0x38, 0xe3, + 0x89, 0xa4, 0x1c, 0xee, 0xe3, 0x6e, 0x5c, 0xcf, 0xaa, 0x96, 0xca, 0x5f, 0x7d, 0xd9, 0x4b, 0x2e, + 0xf8, 0x34, 0x60, 0xca, 0xdc, 0xc8, 0xbb, 0xc3, 0x72, 0x3e, 0x89, 0xca, 0xe7, 0x60, 0x96, 0x97, + 0x04, 0xbf, 0x3c, 0xc5, 0x5a, 0xe2, 0x0c, 0xab, 0x09, 0x7e, 0x6e, 0xf7, 0x28, 0xe6, 0x75, 0x0f, + 0xed, 0x27, 0x80, 0x77, 0xdb, 0x5d, 0xdb, 0xf9, 0x55, 0x8f, 0x98, 0x9d, 0xd3, 0x82, 0x28, 0xc4, + 0x41, 0x68, 0xbf, 0x2d, 0xc0, 0xca, 0x61, 0xe0, 0x11, 0xa3, 0x6f, 0xd9, 0x9d, 0x49, 0xbd, 0x99, + 0x77, 0x2a, 0xbe, 0x0d, 0xe7, 0xfa, 0xcc, 0x66, 0x32, 0xed, 0x8a, 0x95, 0x69, 0x7d, 0x95, 0x93, + 0xd3, 0xed, 0xf1, 0x83, 0xec, 0xbe, 0xa4, 0xed, 0x56, 0x92, 0xfb, 0x76, 0xb9, 0xb8, 0x1d, 0xb8, + 0xe0, 0x33, 0x1d, 0x9a, 0x23, 0xda, 0x71, 0x99, 0xb3, 0xec, 0x66, 0xcd, 0xda, 0x81, 0xd5, 0x94, + 0x09, 0xde, 0x52, 0x2c, 0x7d, 0x02, 0xab, 0xf7, 0x3d, 0x42, 0x8b, 0xb1, 0x6d, 0xb8, 0xfe, 0x73, + 0x27, 0x08, 0x8d, 0x2d, 0x9b, 0x58, 0xd2, 0x0e, 0x28, 0x48, 0x0a, 0xc0, 0x2b, 0x04, 0xab, 0xa2, + 0xbc, 0xa7, 0x4e, 0xbc, 0x0d, 0x25, 0x5f, 0x2c, 0x89, 0xb2, 0xae, 0x4a, 0x8a, 0x58, 0xb8, 0x29, + 0xe2, 0x7d, 0xbd, 0x72, 0xfe, 0x5f, 0x04, 0xa5, 0xf0, 0xcc, 0x09, 0xa6, 0xb0, 0x6d, 0x50, 0xc8, + 0x0b, 0xd7, 0xf2, 0x08, 0xff, 0xd6, 0x2a, 0x8e, 0xfd, 0xd6, 0x02, 0xce, 0x4e, 0x17, 0xf0, 0xbd, + 0x68, 0x88, 0x99, 0x62, 0x8e, 0xb9, 0x96, 0xaf, 0xe6, 0x9b, 0x1e, 0x60, 0x7a, 0xb0, 0xc2, 0x5a, + 0x89, 0x38, 0xfe, 0x2d, 0x77, 0xae, 0x63, 0x58, 0x4d, 0x49, 0x13, 0x51, 0xfa, 0x21, 0xcc, 0x85, + 0xee, 0x0b, 0xa3, 0x73, 0x94, 0xaf, 0x87, 0xcc, 0x27, 0x6e, 0x54, 0xef, 0xc3, 0xaa, 0x68, 0x54, + 0xa9, 0x28, 0x53, 0x53, 0x51, 0x36, 0x37, 0x8c, 0x24, 0xed, 0x8f, 0x08, 0x94, 0x43, 0x42, 0xba, + 0x93, 0x14, 0x94, 0x2d, 0x98, 0x62, 0x21, 0x50, 0x18, 0x17, 0x02, 0x1f, 0xbf, 0xa3, 0x33, 0x4e, + 0x7c, 0x31, 0x86, 0x80, 0x99, 0xec, 0xe3, 0x77, 0x86, 0x18, 0xf6, 0x4a, 0x30, 0x13, 0x18, 0x5e, + 0x87, 0x04, 0xda, 0x22, 0xcc, 0x73, 0x30, 0xdc, 0x68, 0x8d, 0xff, 0x2d, 0x03, 0x88, 0xb6, 0xdb, + 0x22, 0x1e, 0xfe, 0x3d, 0x02, 0x2c, 0x52, 0x33, 0x8e, 0x67, 0x4c, 0xe3, 0x57, 0xc7, 0xd0, 0xb5, + 0xad, 0x2f, 0xff, 0xfd, 0x9f, 0x6f, 0x0a, 0x55, 0xf5, 0xdd, 0xfa, 0xd1, 0xad, 0xfa, 0xaf, 0x69, + 0x0a, 0xec, 0x88, 0x50, 0xf0, 0xeb, 0xd5, 0x7a, 0x62, 0x6a, 0xa8, 0x57, 0x5f, 0xde, 0x45, 0x55, + 0xfc, 0x67, 0xc4, 0xbe, 0xfd, 0x13, 0x28, 0x2a, 0x59, 0x29, 0xf2, 0x91, 0x70, 0x2c, 0x9e, 0x0f, + 0x18, 0x9e, 0x3a, 0xbe, 0xc9, 0xf0, 0xc4, 0xe5, 0x8f, 0xc2, 0x85, 0xff, 0x1a, 0xfd, 0x66, 0x48, + 0xe0, 0x7a, 0x2f, 0x2b, 0x2d, 0x77, 0xd6, 0x1c, 0x0b, 0x6d, 0x87, 0x41, 0xfb, 0x7e, 0xa3, 0x91, + 0x81, 0x56, 0x3b, 0x89, 0xdd, 0xbe, 0x45, 0xfc, 0x73, 0x3f, 0x31, 0xd7, 0x61, 0xc9, 0x17, 0x4d, + 0xde, 0xbc, 0xa9, 0xbe, 0x77, 0x22, 0x5e, 0x1e, 0x3e, 0x5a, 0x8d, 0xa1, 0xad, 0xe0, 0x6b, 0x0c, + 0xad, 0xc0, 0x16, 0xc3, 0xf8, 0x32, 0x09, 0x12, 0xff, 0x09, 0x85, 0x1f, 0xb4, 0xe3, 0x2c, 0x98, + 0x3b, 0x11, 0xaa, 0x67, 0x33, 0xe9, 0xf0, 0xa0, 0xef, 0x06, 0xc7, 0xa1, 0x53, 0xab, 0x13, 0x3a, + 0xf5, 0x6f, 0x08, 0xce, 0x64, 0x06, 0x1b, 0x99, 0xc5, 0xf2, 0xa6, 0x9f, 0x5c, 0x40, 0x3f, 0x62, + 0x80, 0xf6, 0xb5, 0x8f, 0x26, 0x02, 0x74, 0xb7, 0x9f, 0x96, 0x43, 0xfd, 0xfa, 0x35, 0x02, 0x25, + 0x36, 0xf3, 0xe0, 0x8d, 0x2c, 0xbe, 0xec, 0x48, 0x94, 0x8b, 0x6c, 0x9f, 0x21, 0xbb, 0xa7, 0xdd, + 0x99, 0x0c, 0x99, 0x31, 0x94, 0x40, 0x31, 0xfd, 0x0e, 0xc1, 0x14, 0x9d, 0x13, 0xf0, 0x25, 0xd9, + 0xac, 0x1c, 0x8d, 0x50, 0xb2, 0x90, 0x8f, 0x8f, 0x17, 0x61, 0xc8, 0x6b, 0x8d, 0xc9, 0xd0, 0xb8, + 0x83, 0x5e, 0x8f, 0xc2, 0x30, 0x61, 0x21, 0x31, 0xb6, 0x60, 0x59, 0xeb, 0x93, 0x8c, 0x76, 0xea, + 0xe6, 0x58, 0x3e, 0x0e, 0xb0, 0x82, 0xb6, 0x10, 0xcd, 0xfd, 0xe5, 0xf4, 0x47, 0x06, 0xbe, 0x9e, + 0x17, 0x25, 0x99, 0x0f, 0x91, 0x5c, 0x57, 0x3c, 0x62, 0xca, 0xdf, 0xd7, 0xee, 0x9d, 0x26, 0x48, + 0x86, 0x62, 0xa8, 0x21, 0x5e, 0x21, 0x58, 0x48, 0xb4, 0x46, 0x99, 0x25, 0x64, 0x9d, 0x5a, 0x66, + 0x09, 0x69, 0x8f, 0xd5, 0xaa, 0x0c, 0xed, 0x06, 0xd6, 0xf2, 0xf3, 0x3d, 0x12, 0xfe, 0x15, 0x82, + 0xc5, 0xe4, 0x98, 0x87, 0x25, 0x72, 0xa4, 0x83, 0xa0, 0x3a, 0xa2, 0x71, 0x6b, 0x37, 0x18, 0x86, + 0x6b, 0xea, 0xba, 0xbc, 0x99, 0x84, 0xf2, 0x45, 0x41, 0x7c, 0x85, 0x60, 0x31, 0x39, 0x1a, 0xca, + 0x50, 0x48, 0x87, 0xc7, 0x91, 0x28, 0x44, 0xb5, 0x69, 0x54, 0xb9, 0xdf, 0xc2, 0xd1, 0x6a, 0x1c, + 0x9c, 0x2f, 0x10, 0x2c, 0x26, 0x67, 0x08, 0x19, 0x1c, 0xe9, 0x94, 0x91, 0x1b, 0x42, 0x37, 0x19, + 0x94, 0xcd, 0xea, 0xbb, 0x09, 0x28, 0x79, 0x28, 0x58, 0xda, 0xd2, 0x19, 0x40, 0x96, 0xb6, 0xb1, + 0x41, 0x45, 0xda, 0xa9, 0x62, 0xa3, 0xc3, 0x69, 0xd3, 0xd6, 0x27, 0xa4, 0x7b, 0x17, 0x55, 0x1b, + 0x7f, 0x99, 0x85, 0x39, 0xf1, 0x33, 0x9b, 0x78, 0xf8, 0x53, 0x50, 0x78, 0x24, 0xf0, 0x9b, 0x99, + 0xbc, 0x7f, 0xc4, 0x6a, 0x1e, 0x41, 0xbb, 0xce, 0xd0, 0x5c, 0x55, 0x2f, 0x4b, 0xa3, 0x82, 0xff, + 0x59, 0x16, 0x3e, 0x78, 0x09, 0x4a, 0xec, 0xb2, 0x40, 0x56, 0x4a, 0xb3, 0x77, 0x09, 0xf9, 0x82, + 0xeb, 0x4c, 0xf0, 0xf5, 0xc6, 0x06, 0x13, 0xcc, 0x04, 0xd5, 0x46, 0x8a, 0xff, 0x12, 0xc1, 0xac, + 0x50, 0x1c, 0xaf, 0x49, 0xff, 0x7f, 0xc5, 0x6e, 0x11, 0xd4, 0xf5, 0x11, 0x1c, 0xc2, 0x11, 0x0d, + 0x86, 0xe0, 0x86, 0xb6, 0x39, 0x44, 0x20, 0x17, 0x2e, 0xae, 0x66, 0x28, 0x08, 0x07, 0x4a, 0xe1, + 0xd5, 0x0a, 0x5e, 0x97, 0xce, 0x55, 0x27, 0xd3, 0x7e, 0x93, 0xc9, 0x5e, 0xc7, 0x57, 0xc6, 0xc8, + 0xa6, 0x81, 0x0f, 0xc3, 0xcb, 0x01, 0x7c, 0x55, 0x5e, 0x71, 0x12, 0xb7, 0x14, 0xea, 0xc6, 0x68, + 0x26, 0xa1, 0x7e, 0x12, 0x82, 0xac, 0x26, 0x89, 0x7b, 0x85, 0x7f, 0x20, 0x38, 0x2b, 0xff, 0x7b, + 0x8f, 0xeb, 0x23, 0x24, 0x49, 0xa7, 0xa4, 0xad, 0x93, 0x6f, 0x10, 0x30, 0x93, 0x33, 0x67, 0xbe, + 0xa5, 0x52, 0x13, 0x53, 0x00, 0x4a, 0xec, 0x06, 0x40, 0x16, 0xac, 0xd9, 0x0b, 0x82, 0xdc, 0x4a, + 0x21, 0x4c, 0x55, 0x1d, 0xe7, 0xad, 0xbd, 0x63, 0x58, 0x69, 0x3b, 0xfd, 0x8c, 0xac, 0x3d, 0x85, + 0xff, 0x9d, 0x3d, 0xa0, 0xc7, 0x1e, 0xa0, 0x5f, 0xdc, 0x16, 0x0c, 0x1d, 0xa7, 0x67, 0xd8, 0x9d, + 0x9a, 0xe3, 0x75, 0xea, 0x1d, 0x62, 0x33, 0xa1, 0x75, 0x4e, 0x32, 0x5c, 0xcb, 0x1f, 0x5e, 0x1c, + 0x6f, 0xf3, 0xa7, 0xff, 0x23, 0xf4, 0xf7, 0xc2, 0xd9, 0x87, 0x7c, 0xef, 0xfd, 0x9e, 0x33, 0x30, + 0x69, 0x4c, 0x1f, 0x0e, 0x5a, 0xb5, 0x9f, 0xdd, 0x6a, 0xcd, 0xb0, 0xed, 0xef, 0x7f, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x68, 0xa2, 0x62, 0x98, 0x76, 0x1e, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1beta2/pubsub.pb.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1beta2/pubsub.pb.go new file mode 100644 index 000000000..75d044a6c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1beta2/pubsub.pb.go @@ -0,0 +1,1459 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/pubsub/v1beta2/pubsub.proto + +/* +Package pubsub is a generated protocol buffer package. + +It is generated from these files: + google/pubsub/v1beta2/pubsub.proto + +It has these top-level messages: + Topic + PubsubMessage + GetTopicRequest + PublishRequest + PublishResponse + ListTopicsRequest + ListTopicsResponse + ListTopicSubscriptionsRequest + ListTopicSubscriptionsResponse + DeleteTopicRequest + Subscription + PushConfig + ReceivedMessage + GetSubscriptionRequest + ListSubscriptionsRequest + ListSubscriptionsResponse + DeleteSubscriptionRequest + ModifyPushConfigRequest + PullRequest + PullResponse + ModifyAckDeadlineRequest + AcknowledgeRequest +*/ +package pubsub + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A topic resource. +type Topic struct { + // Name of the topic. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *Topic) Reset() { *m = Topic{} } +func (m *Topic) String() string { return proto.CompactTextString(m) } +func (*Topic) ProtoMessage() {} +func (*Topic) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Topic) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// A message data and its attributes. +type PubsubMessage struct { + // The message payload. For JSON requests, the value of this field must be + // base64-encoded. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Optional attributes for this message. + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // ID of this message assigned by the server at publication time. Guaranteed + // to be unique within the topic. This value may be read by a subscriber + // that receives a PubsubMessage via a Pull call or a push delivery. It must + // not be populated by a publisher in a Publish call. + MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId" json:"message_id,omitempty"` +} + +func (m *PubsubMessage) Reset() { *m = PubsubMessage{} } +func (m *PubsubMessage) String() string { return proto.CompactTextString(m) } +func (*PubsubMessage) ProtoMessage() {} +func (*PubsubMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *PubsubMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *PubsubMessage) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *PubsubMessage) GetMessageId() string { + if m != nil { + return m.MessageId + } + return "" +} + +// Request for the GetTopic method. +type GetTopicRequest struct { + // The name of the topic to get. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *GetTopicRequest) Reset() { *m = GetTopicRequest{} } +func (m *GetTopicRequest) String() string { return proto.CompactTextString(m) } +func (*GetTopicRequest) ProtoMessage() {} +func (*GetTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +// Request for the Publish method. +type PublishRequest struct { + // The messages in the request will be published on this topic. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + // The messages to publish. + Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages" json:"messages,omitempty"` +} + +func (m *PublishRequest) Reset() { *m = PublishRequest{} } +func (m *PublishRequest) String() string { return proto.CompactTextString(m) } +func (*PublishRequest) ProtoMessage() {} +func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *PublishRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *PublishRequest) GetMessages() []*PubsubMessage { + if m != nil { + return m.Messages + } + return nil +} + +// Response for the Publish method. +type PublishResponse struct { + // The server-assigned ID of each published message, in the same order as + // the messages in the request. IDs are guaranteed to be unique within + // the topic. + MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds" json:"message_ids,omitempty"` +} + +func (m *PublishResponse) Reset() { *m = PublishResponse{} } +func (m *PublishResponse) String() string { return proto.CompactTextString(m) } +func (*PublishResponse) ProtoMessage() {} +func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *PublishResponse) GetMessageIds() []string { + if m != nil { + return m.MessageIds + } + return nil +} + +// Request for the ListTopics method. +type ListTopicsRequest struct { + // The name of the cloud project that topics belong to. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // Maximum number of topics to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last ListTopicsResponse; indicates that this is + // a continuation of a prior ListTopics call, and that the system should + // return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTopicsRequest) Reset() { *m = ListTopicsRequest{} } +func (m *ListTopicsRequest) String() string { return proto.CompactTextString(m) } +func (*ListTopicsRequest) ProtoMessage() {} +func (*ListTopicsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListTopicsRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListTopicsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTopicsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the ListTopics method. +type ListTopicsResponse struct { + // The resulting topics. + Topics []*Topic `protobuf:"bytes,1,rep,name=topics" json:"topics,omitempty"` + // If not empty, indicates that there may be more topics that match the + // request; this value should be passed in a new ListTopicsRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTopicsResponse) Reset() { *m = ListTopicsResponse{} } +func (m *ListTopicsResponse) String() string { return proto.CompactTextString(m) } +func (*ListTopicsResponse) ProtoMessage() {} +func (*ListTopicsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ListTopicsResponse) GetTopics() []*Topic { + if m != nil { + return m.Topics + } + return nil +} + +func (m *ListTopicsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the ListTopicSubscriptions method. +type ListTopicSubscriptionsRequest struct { + // The name of the topic that subscriptions are attached to. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + // Maximum number of subscription names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last ListTopicSubscriptionsResponse; indicates + // that this is a continuation of a prior ListTopicSubscriptions call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTopicSubscriptionsRequest) Reset() { *m = ListTopicSubscriptionsRequest{} } +func (m *ListTopicSubscriptionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListTopicSubscriptionsRequest) ProtoMessage() {} +func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListTopicSubscriptionsRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ListTopicSubscriptionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTopicSubscriptionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the ListTopicSubscriptions method. +type ListTopicSubscriptionsResponse struct { + // The names of the subscriptions that match the request. + Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // ListTopicSubscriptionsRequest to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTopicSubscriptionsResponse) Reset() { *m = ListTopicSubscriptionsResponse{} } +func (m *ListTopicSubscriptionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListTopicSubscriptionsResponse) ProtoMessage() {} +func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListTopicSubscriptionsResponse) GetSubscriptions() []string { + if m != nil { + return m.Subscriptions + } + return nil +} + +func (m *ListTopicSubscriptionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the DeleteTopic method. +type DeleteTopicRequest struct { + // Name of the topic to delete. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} } +func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicRequest) ProtoMessage() {} +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeleteTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +// A subscription resource. +type Subscription struct { + // Name of the subscription. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of the topic from which this subscription is receiving messages. + // This will be present if and only if the subscription has not been detached + // from its topic. + Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` + // If push delivery is used with this subscription, this field is + // used to configure it. An empty pushConfig signifies that the subscriber + // will pull and ack messages using API methods. + PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"` + // This value is the maximum time after a subscriber receives a message + // before the subscriber should acknowledge the message. After message + // delivery but before the ack deadline expires and before the message is + // acknowledged, it is an outstanding message and will not be delivered + // again during that time (on a best-effort basis). + // + // For pull delivery this value + // is used as the initial value for the ack deadline. It may be overridden + // for a specific message by calling ModifyAckDeadline. + // + // For push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. + // + // If the subscriber never acknowledges the message, the Pub/Sub + // system will eventually redeliver the message. + AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"` +} + +func (m *Subscription) Reset() { *m = Subscription{} } +func (m *Subscription) String() string { return proto.CompactTextString(m) } +func (*Subscription) ProtoMessage() {} +func (*Subscription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *Subscription) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Subscription) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *Subscription) GetPushConfig() *PushConfig { + if m != nil { + return m.PushConfig + } + return nil +} + +func (m *Subscription) GetAckDeadlineSeconds() int32 { + if m != nil { + return m.AckDeadlineSeconds + } + return 0 +} + +// Configuration for a push delivery endpoint. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + // For example, a Webhook endpoint might use "https://example.com/push". + PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint" json:"push_endpoint,omitempty"` + // Endpoint configuration attributes. + // + // Every endpoint has a set of API supported attributes that can be used to + // control different aspects of the message delivery. + // + // The currently supported attribute is `x-goog-version`, which you can + // use to change the format of the push message. This attribute + // indicates the version of the data expected by the endpoint. This + // controls the shape of the envelope (i.e. its fields and metadata). + // The endpoint version is based on the version of the Pub/Sub + // API. + // + // If not present during the CreateSubscription call, it will default to + // the version of the API used to make such call. If not present during a + // ModifyPushConfig call, its value will not be changed. GetSubscription + // calls will always return a valid version, even if the subscription was + // created without this attribute. + // + // The possible values for this attribute are: + // + // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. + // * `v1beta2`: uses the push format defined in the v1beta2 Pub/Sub API. + // + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *PushConfig) Reset() { *m = PushConfig{} } +func (m *PushConfig) String() string { return proto.CompactTextString(m) } +func (*PushConfig) ProtoMessage() {} +func (*PushConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *PushConfig) GetPushEndpoint() string { + if m != nil { + return m.PushEndpoint + } + return "" +} + +func (m *PushConfig) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// A message and its corresponding acknowledgment ID. +type ReceivedMessage struct { + // This ID can be used to acknowledge the received message. + AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId" json:"ack_id,omitempty"` + // The message. + Message *PubsubMessage `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *ReceivedMessage) Reset() { *m = ReceivedMessage{} } +func (m *ReceivedMessage) String() string { return proto.CompactTextString(m) } +func (*ReceivedMessage) ProtoMessage() {} +func (*ReceivedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ReceivedMessage) GetAckId() string { + if m != nil { + return m.AckId + } + return "" +} + +func (m *ReceivedMessage) GetMessage() *PubsubMessage { + if m != nil { + return m.Message + } + return nil +} + +// Request for the GetSubscription method. +type GetSubscriptionRequest struct { + // The name of the subscription to get. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` +} + +func (m *GetSubscriptionRequest) Reset() { *m = GetSubscriptionRequest{} } +func (m *GetSubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*GetSubscriptionRequest) ProtoMessage() {} +func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *GetSubscriptionRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +// Request for the ListSubscriptions method. +type ListSubscriptionsRequest struct { + // The name of the cloud project that subscriptions belong to. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // Maximum number of subscriptions to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last ListSubscriptionsResponse; indicates that + // this is a continuation of a prior ListSubscriptions call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListSubscriptionsRequest) Reset() { *m = ListSubscriptionsRequest{} } +func (m *ListSubscriptionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSubscriptionsRequest) ProtoMessage() {} +func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ListSubscriptionsRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *ListSubscriptionsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListSubscriptionsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for the ListSubscriptions method. +type ListSubscriptionsResponse struct { + // The subscriptions that match the request. + Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new ListSubscriptionsRequest + // to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListSubscriptionsResponse) Reset() { *m = ListSubscriptionsResponse{} } +func (m *ListSubscriptionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSubscriptionsResponse) ProtoMessage() {} +func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { + if m != nil { + return m.Subscriptions + } + return nil +} + +func (m *ListSubscriptionsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for the DeleteSubscription method. +type DeleteSubscriptionRequest struct { + // The subscription to delete. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` +} + +func (m *DeleteSubscriptionRequest) Reset() { *m = DeleteSubscriptionRequest{} } +func (m *DeleteSubscriptionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSubscriptionRequest) ProtoMessage() {} +func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *DeleteSubscriptionRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +// Request for the ModifyPushConfig method. +type ModifyPushConfigRequest struct { + // The name of the subscription. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The push configuration for future deliveries. + // + // An empty pushConfig indicates that the Pub/Sub system should + // stop pushing messages from the given subscription and allow + // messages to be pulled and acknowledged - effectively pausing + // the subscription if Pull is not called. + PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig" json:"push_config,omitempty"` +} + +func (m *ModifyPushConfigRequest) Reset() { *m = ModifyPushConfigRequest{} } +func (m *ModifyPushConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyPushConfigRequest) ProtoMessage() {} +func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ModifyPushConfigRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *ModifyPushConfigRequest) GetPushConfig() *PushConfig { + if m != nil { + return m.PushConfig + } + return nil +} + +// Request for the Pull method. +type PullRequest struct { + // The subscription from which messages should be pulled. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // If this is specified as true the system will respond immediately even if + // it is not able to return a message in the Pull response. Otherwise the + // system is allowed to wait until at least one message is available rather + // than returning no messages. The client may cancel the request if it does + // not wish to wait any longer for the response. + ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately" json:"return_immediately,omitempty"` + // The maximum number of messages returned for this request. The Pub/Sub + // system may return fewer than the number specified. + MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages" json:"max_messages,omitempty"` +} + +func (m *PullRequest) Reset() { *m = PullRequest{} } +func (m *PullRequest) String() string { return proto.CompactTextString(m) } +func (*PullRequest) ProtoMessage() {} +func (*PullRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *PullRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *PullRequest) GetReturnImmediately() bool { + if m != nil { + return m.ReturnImmediately + } + return false +} + +func (m *PullRequest) GetMaxMessages() int32 { + if m != nil { + return m.MaxMessages + } + return 0 +} + +// Response for the Pull method. +type PullResponse struct { + // Received Pub/Sub messages. The Pub/Sub system will return zero messages if + // there are no more available in the backlog. The Pub/Sub system may return + // fewer than the maxMessages requested even if there are more messages + // available in the backlog. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *PullResponse) Reset() { *m = PullResponse{} } +func (m *PullResponse) String() string { return proto.CompactTextString(m) } +func (*PullResponse) ProtoMessage() {} +func (*PullResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *PullResponse) GetReceivedMessages() []*ReceivedMessage { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +// Request for the ModifyAckDeadline method. +type ModifyAckDeadlineRequest struct { + // The name of the subscription. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The acknowledgment ID. + AckId string `protobuf:"bytes,2,opt,name=ack_id,json=ackId" json:"ack_id,omitempty"` + // The new ack deadline with respect to the time this request was sent to the + // Pub/Sub system. Must be >= 0. For example, if the value is 10, the new ack + // deadline will expire 10 seconds after the ModifyAckDeadline call was made. + // Specifying zero may immediately make the message available for another pull + // request. + AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds" json:"ack_deadline_seconds,omitempty"` +} + +func (m *ModifyAckDeadlineRequest) Reset() { *m = ModifyAckDeadlineRequest{} } +func (m *ModifyAckDeadlineRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyAckDeadlineRequest) ProtoMessage() {} +func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ModifyAckDeadlineRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *ModifyAckDeadlineRequest) GetAckId() string { + if m != nil { + return m.AckId + } + return "" +} + +func (m *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 { + if m != nil { + return m.AckDeadlineSeconds + } + return 0 +} + +// Request for the Acknowledge method. +type AcknowledgeRequest struct { + // The subscription whose message is being acknowledged. + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The acknowledgment ID for the messages being acknowledged that was returned + // by the Pub/Sub system in the Pull response. Must not be empty. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds" json:"ack_ids,omitempty"` +} + +func (m *AcknowledgeRequest) Reset() { *m = AcknowledgeRequest{} } +func (m *AcknowledgeRequest) String() string { return proto.CompactTextString(m) } +func (*AcknowledgeRequest) ProtoMessage() {} +func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *AcknowledgeRequest) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *AcknowledgeRequest) GetAckIds() []string { + if m != nil { + return m.AckIds + } + return nil +} + +func init() { + proto.RegisterType((*Topic)(nil), "google.pubsub.v1beta2.Topic") + proto.RegisterType((*PubsubMessage)(nil), "google.pubsub.v1beta2.PubsubMessage") + proto.RegisterType((*GetTopicRequest)(nil), "google.pubsub.v1beta2.GetTopicRequest") + proto.RegisterType((*PublishRequest)(nil), "google.pubsub.v1beta2.PublishRequest") + proto.RegisterType((*PublishResponse)(nil), "google.pubsub.v1beta2.PublishResponse") + proto.RegisterType((*ListTopicsRequest)(nil), "google.pubsub.v1beta2.ListTopicsRequest") + proto.RegisterType((*ListTopicsResponse)(nil), "google.pubsub.v1beta2.ListTopicsResponse") + proto.RegisterType((*ListTopicSubscriptionsRequest)(nil), "google.pubsub.v1beta2.ListTopicSubscriptionsRequest") + proto.RegisterType((*ListTopicSubscriptionsResponse)(nil), "google.pubsub.v1beta2.ListTopicSubscriptionsResponse") + proto.RegisterType((*DeleteTopicRequest)(nil), "google.pubsub.v1beta2.DeleteTopicRequest") + proto.RegisterType((*Subscription)(nil), "google.pubsub.v1beta2.Subscription") + proto.RegisterType((*PushConfig)(nil), "google.pubsub.v1beta2.PushConfig") + proto.RegisterType((*ReceivedMessage)(nil), "google.pubsub.v1beta2.ReceivedMessage") + proto.RegisterType((*GetSubscriptionRequest)(nil), "google.pubsub.v1beta2.GetSubscriptionRequest") + proto.RegisterType((*ListSubscriptionsRequest)(nil), "google.pubsub.v1beta2.ListSubscriptionsRequest") + proto.RegisterType((*ListSubscriptionsResponse)(nil), "google.pubsub.v1beta2.ListSubscriptionsResponse") + proto.RegisterType((*DeleteSubscriptionRequest)(nil), "google.pubsub.v1beta2.DeleteSubscriptionRequest") + proto.RegisterType((*ModifyPushConfigRequest)(nil), "google.pubsub.v1beta2.ModifyPushConfigRequest") + proto.RegisterType((*PullRequest)(nil), "google.pubsub.v1beta2.PullRequest") + proto.RegisterType((*PullResponse)(nil), "google.pubsub.v1beta2.PullResponse") + proto.RegisterType((*ModifyAckDeadlineRequest)(nil), "google.pubsub.v1beta2.ModifyAckDeadlineRequest") + proto.RegisterType((*AcknowledgeRequest)(nil), "google.pubsub.v1beta2.AcknowledgeRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Subscriber service + +type SubscriberClient interface { + // Creates a subscription to a given topic for a given subscriber. + // If the subscription already exists, returns ALREADY_EXISTS. + // If the corresponding topic doesn't exist, returns NOT_FOUND. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic. + CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All pending messages in the subscription + // are immediately dropped. Calls to Pull after deletion will return + // NOT_FOUND. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription, or its topic unless the same topic is specified. + DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful to + // indicate that more time is needed to process a message by the subscriber, + // or to make the message available for redelivery if the processing was + // interrupted. + ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + // Acknowledges the messages associated with the ack tokens in the + // AcknowledgeRequest. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + // Pulls messages from the server. Returns an empty list if there are no + // messages available in the backlog. The server may return UNAVAILABLE if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) + // Modifies the PushConfig for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified + // by an empty PushConfig) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for + // delivery continuously through the call regardless of changes to the + // PushConfig. + ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) +} + +type subscriberClient struct { + cc *grpc.ClientConn +} + +func NewSubscriberClient(cc *grpc.ClientConn) SubscriberClient { + return &subscriberClient{cc} +} + +func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/CreateSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/GetSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) { + out := new(ListSubscriptionsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/ListSubscriptions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/DeleteSubscription", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/ModifyAckDeadline", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/Acknowledge", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { + out := new(PullResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/Pull", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Subscriber/ModifyPushConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Subscriber service + +type SubscriberServer interface { + // Creates a subscription to a given topic for a given subscriber. + // If the subscription already exists, returns ALREADY_EXISTS. + // If the corresponding topic doesn't exist, returns NOT_FOUND. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic. + CreateSubscription(context.Context, *Subscription) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All pending messages in the subscription + // are immediately dropped. Calls to Pull after deletion will return + // NOT_FOUND. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription, or its topic unless the same topic is specified. + DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*google_protobuf.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful to + // indicate that more time is needed to process a message by the subscriber, + // or to make the message available for redelivery if the processing was + // interrupted. + ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*google_protobuf.Empty, error) + // Acknowledges the messages associated with the ack tokens in the + // AcknowledgeRequest. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(context.Context, *AcknowledgeRequest) (*google_protobuf.Empty, error) + // Pulls messages from the server. Returns an empty list if there are no + // messages available in the backlog. The server may return UNAVAILABLE if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(context.Context, *PullRequest) (*PullResponse, error) + // Modifies the PushConfig for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified + // by an empty PushConfig) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for + // delivery continuously through the call regardless of changes to the + // PushConfig. + ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*google_protobuf.Empty, error) +} + +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + s.RegisterService(&_Subscriber_serviceDesc, srv) +} + +func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Subscription) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/CreateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/GetSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/ListSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/DeleteSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyAckDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/ModifyAckDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AcknowledgeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Acknowledge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/Acknowledge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Pull(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/Pull", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyPushConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyPushConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Subscriber/ModifyPushConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Subscriber_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1beta2.Subscriber", + HandlerType: (*SubscriberServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSubscription", + Handler: _Subscriber_CreateSubscription_Handler, + }, + { + MethodName: "GetSubscription", + Handler: _Subscriber_GetSubscription_Handler, + }, + { + MethodName: "ListSubscriptions", + Handler: _Subscriber_ListSubscriptions_Handler, + }, + { + MethodName: "DeleteSubscription", + Handler: _Subscriber_DeleteSubscription_Handler, + }, + { + MethodName: "ModifyAckDeadline", + Handler: _Subscriber_ModifyAckDeadline_Handler, + }, + { + MethodName: "Acknowledge", + Handler: _Subscriber_Acknowledge_Handler, + }, + { + MethodName: "Pull", + Handler: _Subscriber_Pull_Handler, + }, + { + MethodName: "ModifyPushConfig", + Handler: _Subscriber_ModifyPushConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/pubsub/v1beta2/pubsub.proto", +} + +// Client API for Publisher service + +type PublisherClient interface { + // Creates the given topic with the given name. + CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) + // Adds one or more messages to the topic. Returns NOT_FOUND if the topic does + // not exist. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Lists matching topics. + ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) + // Lists the name of the subscriptions for this topic. + ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) + // Deletes the topic with the given name. Returns NOT_FOUND if the topic does + // not exist. After a topic is deleted, a new topic may be created with the + // same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted. + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) +} + +type publisherClient struct { + cc *grpc.ClientConn +} + +func NewPublisherClient(cc *grpc.ClientConn) PublisherClient { + return &publisherClient{cc} +} + +func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/CreateTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) { + out := new(PublishResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/Publish", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/GetTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { + out := new(ListTopicsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/ListTopics", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) { + out := new(ListTopicSubscriptionsResponse) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/ListTopicSubscriptions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/google.pubsub.v1beta2.Publisher/DeleteTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Publisher service + +type PublisherServer interface { + // Creates the given topic with the given name. + CreateTopic(context.Context, *Topic) (*Topic, error) + // Adds one or more messages to the topic. Returns NOT_FOUND if the topic does + // not exist. + Publish(context.Context, *PublishRequest) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(context.Context, *GetTopicRequest) (*Topic, error) + // Lists matching topics. + ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) + // Lists the name of the subscriptions for this topic. + ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) + // Deletes the topic with the given name. Returns NOT_FOUND if the topic does + // not exist. After a topic is deleted, a new topic may be created with the + // same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted. + DeleteTopic(context.Context, *DeleteTopicRequest) (*google_protobuf.Empty, error) +} + +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + s.RegisterService(&_Publisher_serviceDesc, srv) +} + +func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Topic) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).CreateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/CreateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).GetTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/GetTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/ListTopics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/ListTopicSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1beta2.Publisher/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Publisher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1beta2.Publisher", + HandlerType: (*PublisherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTopic", + Handler: _Publisher_CreateTopic_Handler, + }, + { + MethodName: "Publish", + Handler: _Publisher_Publish_Handler, + }, + { + MethodName: "GetTopic", + Handler: _Publisher_GetTopic_Handler, + }, + { + MethodName: "ListTopics", + Handler: _Publisher_ListTopics_Handler, + }, + { + MethodName: "ListTopicSubscriptions", + Handler: _Publisher_ListTopicSubscriptions_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _Publisher_DeleteTopic_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/pubsub/v1beta2/pubsub.proto", +} + +func init() { proto.RegisterFile("google/pubsub/v1beta2/pubsub.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1107 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x5f, 0x6f, 0xdb, 0x54, + 0x14, 0x97, 0x93, 0xfe, 0xcb, 0x71, 0x4a, 0xda, 0xab, 0xad, 0x73, 0x53, 0x06, 0x9d, 0x37, 0x4a, + 0x86, 0xb4, 0x64, 0x0b, 0x45, 0x42, 0x8c, 0x7f, 0xed, 0x56, 0x4d, 0x91, 0xa8, 0xc8, 0xdc, 0x3e, + 0xa0, 0x09, 0x2d, 0x72, 0xec, 0x5b, 0xc7, 0xc4, 0xb1, 0x3d, 0x5f, 0xbb, 0x34, 0x7b, 0x43, 0x08, + 0xc1, 0x13, 0x1f, 0x85, 0xaf, 0xc0, 0x03, 0x1f, 0x82, 0xaf, 0x83, 0x7c, 0xef, 0xb5, 0x63, 0xa7, + 0xbe, 0xa9, 0xdb, 0x89, 0x37, 0xdf, 0x73, 0xcf, 0xff, 0x73, 0x7e, 0xe7, 0x1e, 0x83, 0x6a, 0x79, + 0x9e, 0xe5, 0xe0, 0x8e, 0x1f, 0x0d, 0x49, 0x34, 0xec, 0x9c, 0x3f, 0x19, 0xe2, 0x50, 0xef, 0xf2, + 0x63, 0xdb, 0x0f, 0xbc, 0xd0, 0x43, 0xb7, 0x19, 0x4f, 0x9b, 0x13, 0x39, 0x4f, 0x73, 0x27, 0x11, + 0x8d, 0x99, 0x86, 0xd1, 0x59, 0x07, 0x4f, 0xfc, 0x70, 0xca, 0x64, 0xd4, 0x1d, 0x58, 0x3e, 0xf5, + 0x7c, 0xdb, 0x40, 0x08, 0x96, 0x5c, 0x7d, 0x82, 0x15, 0x69, 0x57, 0x6a, 0xd5, 0x34, 0xfa, 0xad, + 0xfe, 0x2b, 0xc1, 0x7a, 0x9f, 0x2a, 0x3b, 0xc6, 0x84, 0xe8, 0x16, 0x8e, 0xb9, 0x4c, 0x3d, 0xd4, + 0x29, 0x57, 0x5d, 0xa3, 0xdf, 0xe8, 0x14, 0x40, 0x0f, 0xc3, 0xc0, 0x1e, 0x46, 0x21, 0x26, 0x4a, + 0x65, 0xb7, 0xda, 0x92, 0xbb, 0xfb, 0xed, 0x42, 0x5f, 0xda, 0x39, 0x6d, 0xed, 0x83, 0x54, 0xec, + 0xc8, 0x0d, 0x83, 0xa9, 0x96, 0xd1, 0x83, 0xee, 0x02, 0x4c, 0x18, 0xdb, 0xc0, 0x36, 0x95, 0x2a, + 0xf5, 0xaa, 0xc6, 0x29, 0x3d, 0xb3, 0xf9, 0x15, 0x34, 0xe6, 0xa4, 0xd1, 0x06, 0x54, 0xc7, 0x78, + 0xca, 0x03, 0x88, 0x3f, 0xd1, 0x2d, 0x58, 0x3e, 0xd7, 0x9d, 0x08, 0x2b, 0x15, 0x4a, 0x63, 0x87, + 0x2f, 0x2a, 0x9f, 0x4b, 0xea, 0xc7, 0xd0, 0x78, 0x81, 0x43, 0x1a, 0xb9, 0x86, 0xdf, 0x44, 0x98, + 0x84, 0x31, 0x73, 0x18, 0x9f, 0xb9, 0x02, 0x76, 0x50, 0x47, 0xf0, 0x5e, 0x3f, 0x1a, 0x3a, 0x36, + 0x19, 0x2d, 0xe4, 0x43, 0xdf, 0xc2, 0x1a, 0x77, 0x2e, 0x49, 0xc1, 0x83, 0x32, 0x29, 0xd0, 0x52, + 0x29, 0xb5, 0x0b, 0x8d, 0xd4, 0x12, 0xf1, 0x3d, 0x97, 0x60, 0xf4, 0x21, 0xc8, 0xb3, 0x1c, 0x10, + 0x45, 0xda, 0xad, 0xb6, 0x6a, 0x1a, 0xa4, 0x49, 0x20, 0xaa, 0x0d, 0x9b, 0xdf, 0xd9, 0x84, 0xc5, + 0x41, 0x12, 0x07, 0x15, 0x58, 0xf5, 0x03, 0xef, 0x27, 0x6c, 0x84, 0xdc, 0xc5, 0xe4, 0x88, 0x76, + 0xa0, 0xe6, 0xc7, 0xca, 0x88, 0xfd, 0x96, 0xe5, 0x64, 0x59, 0x5b, 0x8b, 0x09, 0x27, 0xf6, 0x5b, + 0x1c, 0x27, 0x9c, 0x5e, 0x86, 0xde, 0x18, 0xbb, 0x49, 0xc2, 0x63, 0xca, 0x69, 0x4c, 0x50, 0x03, + 0x40, 0x59, 0x53, 0xdc, 0xc3, 0x7d, 0x58, 0xa1, 0xf1, 0x33, 0xe7, 0xe4, 0xee, 0xfb, 0x82, 0xa0, + 0x59, 0xa6, 0x39, 0x2f, 0xda, 0x83, 0x86, 0x8b, 0x2f, 0xc2, 0x41, 0xc6, 0x1e, 0xab, 0xd0, 0x7a, + 0x4c, 0xee, 0xa7, 0x36, 0xdf, 0xc0, 0xdd, 0xd4, 0xe6, 0x49, 0x34, 0x24, 0x46, 0x60, 0xfb, 0xa1, + 0xed, 0xb9, 0x64, 0x71, 0x2d, 0xde, 0x25, 0x4c, 0x17, 0x3e, 0x10, 0x99, 0xe4, 0x21, 0x3f, 0x80, + 0x75, 0x92, 0xbd, 0xe0, 0x65, 0xc9, 0x13, 0x4b, 0x87, 0xf8, 0x09, 0xa0, 0xe7, 0xd8, 0xc1, 0x21, + 0x2e, 0xd1, 0x8b, 0x7f, 0x49, 0x50, 0xcf, 0xfa, 0x54, 0x84, 0xd9, 0x99, 0x68, 0x25, 0x9b, 0x92, + 0x43, 0x90, 0xfd, 0x88, 0x8c, 0x06, 0x86, 0xe7, 0x9e, 0xd9, 0x96, 0xb2, 0xb4, 0x2b, 0xb5, 0xe4, + 0xee, 0x3d, 0x61, 0x87, 0x92, 0xd1, 0x33, 0xca, 0xa8, 0x81, 0x9f, 0x7e, 0xa3, 0xc7, 0x70, 0x4b, + 0x37, 0xc6, 0x03, 0x13, 0xeb, 0xa6, 0x63, 0xbb, 0x78, 0x40, 0xb0, 0xe1, 0xb9, 0x26, 0x51, 0x96, + 0x69, 0x86, 0x91, 0x6e, 0x8c, 0x9f, 0xf3, 0xab, 0x13, 0x76, 0xa3, 0xfe, 0x23, 0x01, 0xcc, 0x94, + 0xa1, 0xfb, 0xb0, 0x4e, 0x9d, 0xc0, 0xae, 0xe9, 0x7b, 0xb6, 0x9b, 0xb4, 0x67, 0x3d, 0x26, 0x1e, + 0x71, 0x1a, 0x7a, 0x59, 0x30, 0x4d, 0x9e, 0x5c, 0xe9, 0xe8, 0xa2, 0x51, 0xf2, 0xae, 0xb3, 0x62, + 0x04, 0x0d, 0x0d, 0x1b, 0xd8, 0x3e, 0xc7, 0x66, 0x32, 0x06, 0x6f, 0xc3, 0x4a, 0x9c, 0x0a, 0xdb, + 0x4c, 0x0a, 0xa4, 0x1b, 0xe3, 0x9e, 0x89, 0xbe, 0x86, 0x55, 0x0e, 0x4e, 0xaa, 0xa5, 0xec, 0x0c, + 0x48, 0x84, 0xd4, 0x2f, 0x61, 0xeb, 0x05, 0x0e, 0xb3, 0x25, 0x4e, 0x1a, 0x42, 0x85, 0x7a, 0xb6, + 0xbf, 0x92, 0xcc, 0x65, 0x69, 0xaa, 0x0f, 0x4a, 0xdc, 0xba, 0x85, 0x40, 0xf9, 0x7f, 0x66, 0xc2, + 0x9f, 0x12, 0x6c, 0x17, 0x98, 0xe4, 0x40, 0xe9, 0x15, 0x01, 0x45, 0xee, 0xde, 0x17, 0xe4, 0x24, + 0x17, 0xf6, 0x0d, 0xd1, 0xf4, 0x0d, 0x6c, 0x33, 0x34, 0xdd, 0x34, 0x87, 0xbf, 0x48, 0x70, 0xe7, + 0xd8, 0x33, 0xed, 0xb3, 0x69, 0x06, 0x04, 0xe5, 0xe5, 0xe7, 0x71, 0x56, 0xb9, 0x01, 0xce, 0xd4, + 0x5f, 0x25, 0x90, 0xfb, 0x91, 0xe3, 0x5c, 0xc7, 0xee, 0x23, 0x40, 0x01, 0x0e, 0xa3, 0xc0, 0x1d, + 0xd8, 0x93, 0x09, 0x36, 0x6d, 0x3d, 0xc4, 0xce, 0x94, 0x9a, 0x5f, 0xd3, 0x36, 0xd9, 0x4d, 0x6f, + 0x76, 0x81, 0xee, 0x41, 0x7d, 0xa2, 0x5f, 0x0c, 0xd2, 0x17, 0xab, 0x4a, 0xeb, 0x2e, 0x4f, 0xf4, + 0x8b, 0xe3, 0xe4, 0x39, 0x32, 0xa0, 0xce, 0x9c, 0xe0, 0xd5, 0x3c, 0x81, 0xcd, 0x80, 0xa3, 0x60, + 0x26, 0xc7, 0x2a, 0xba, 0x27, 0x88, 0x6f, 0x0e, 0x35, 0xda, 0x46, 0x90, 0x27, 0x10, 0xf5, 0x77, + 0x09, 0x14, 0x96, 0xee, 0x83, 0xd9, 0xf4, 0xb8, 0x4e, 0xdc, 0x33, 0x20, 0x56, 0xb2, 0x40, 0x14, + 0x8d, 0xaa, 0xaa, 0x70, 0x54, 0xbd, 0x04, 0x74, 0x60, 0x8c, 0x5d, 0xef, 0x67, 0x07, 0x9b, 0xd6, + 0xb5, 0x5c, 0xb8, 0x03, 0xab, 0xcc, 0x05, 0x36, 0xad, 0x6a, 0xda, 0x0a, 0xf5, 0x81, 0x74, 0xff, + 0x58, 0x01, 0xe0, 0x7d, 0x38, 0xc4, 0x01, 0x7a, 0x0d, 0xe8, 0x59, 0x80, 0xf5, 0x7c, 0x6f, 0xa2, + 0x32, 0x68, 0x68, 0x96, 0x61, 0x42, 0x98, 0xae, 0x34, 0x39, 0xd2, 0x23, 0x81, 0x5c, 0xf1, 0x90, + 0x29, 0x67, 0xe6, 0x9c, 0xad, 0x1c, 0x39, 0xc8, 0xa3, 0x8e, 0x40, 0x52, 0x34, 0x8f, 0x9a, 0x8f, + 0xcb, 0x0b, 0xf0, 0xfe, 0x7b, 0x9d, 0x3c, 0x94, 0x39, 0x6f, 0x44, 0x7a, 0x84, 0x53, 0xa0, 0xb9, + 0x95, 0x4a, 0xf0, 0x75, 0xb8, 0x7d, 0x14, 0xaf, 0xc3, 0xe8, 0x47, 0xd8, 0xbc, 0xd4, 0x89, 0xc2, + 0xb8, 0x44, 0x3d, 0x2b, 0xd4, 0xde, 0x07, 0x39, 0xd3, 0x5e, 0xe8, 0xa1, 0x40, 0xef, 0xe5, 0x16, + 0x14, 0x6a, 0xfc, 0x1e, 0x96, 0x62, 0x7c, 0x22, 0x55, 0x38, 0x5c, 0xd2, 0x09, 0x22, 0x2c, 0x6c, + 0x0e, 0xe0, 0xaf, 0x60, 0x63, 0x7e, 0xf2, 0xa1, 0xf6, 0xc2, 0xf8, 0x2f, 0x8d, 0x48, 0x91, 0xb3, + 0xdd, 0xbf, 0x97, 0xa0, 0xc6, 0x97, 0x5b, 0x1c, 0xa0, 0x1e, 0xc8, 0x0c, 0x09, 0xec, 0xcf, 0x63, + 0xe1, 0xce, 0xd8, 0x5c, 0x78, 0x8b, 0x7e, 0x80, 0x55, 0xae, 0x17, 0x7d, 0x24, 0x7e, 0x6b, 0x33, + 0xeb, 0x7b, 0x73, 0xef, 0x2a, 0x36, 0x9e, 0x8e, 0x3e, 0xac, 0x25, 0x7f, 0x08, 0x68, 0x4f, 0x8c, + 0xa3, 0xec, 0xda, 0x76, 0x85, 0xaf, 0x3a, 0xc0, 0x6c, 0x83, 0x46, 0xad, 0x05, 0x08, 0xc8, 0xed, + 0xf3, 0xcd, 0x87, 0x25, 0x38, 0xb9, 0xd3, 0xbf, 0x49, 0xb0, 0x55, 0xbc, 0xbe, 0xa2, 0xfd, 0xab, + 0xb4, 0x14, 0xe2, 0xf4, 0xb3, 0x6b, 0x4a, 0xa5, 0xc9, 0x93, 0x33, 0x5b, 0xad, 0xb0, 0xdd, 0x2f, + 0x6f, 0xbe, 0xa2, 0x0e, 0x3a, 0xb4, 0x60, 0xdb, 0xf0, 0x26, 0xc5, 0x7a, 0x0e, 0x65, 0xb6, 0x4f, + 0xf5, 0x63, 0x91, 0xbe, 0xf4, 0xea, 0x29, 0xe7, 0xb2, 0x3c, 0x47, 0x77, 0xad, 0xb6, 0x17, 0x58, + 0x1d, 0x0b, 0xbb, 0x54, 0x61, 0x87, 0x5d, 0xe9, 0xbe, 0x4d, 0xe6, 0x7e, 0xa5, 0x9f, 0xb2, 0xe3, + 0x70, 0x85, 0xf2, 0x7d, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x2d, 0xb0, 0x67, 0x71, + 0x0f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go new file mode 100644 index 000000000..dbd73b37c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -0,0 +1,252 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/code.proto + +/* +Package code is a generated protocol buffer package. + +It is generated from these files: + google/rpc/code.proto + +It has these top-level messages: +*/ +package code + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The canonical error codes for Google APIs. +// +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +type Code int32 + +const ( + // Not an error; returned on success + // + // HTTP Mapping: 200 OK + Code_OK Code = 0 + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + Code_CANCELLED Code = 1 + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + Code_UNKNOWN Code = 2 + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + Code_INVALID_ARGUMENT Code = 3 + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + Code_DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented whitelist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + Code_NOT_FOUND Code = 5 + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + Code_ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + Code_PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + Code_UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + Code_RESOURCE_EXHAUSTED Code = 8 + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level + // (e.g., when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence). + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + Code_FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + Code_ABORTED Code = 10 + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + Code_OUT_OF_RANGE Code = 11 + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + Code_UNIMPLEMENTED Code = 12 + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + Code_INTERNAL Code = 13 + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + Code_UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + Code_DATA_LOSS Code = 15 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", +} +var Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, +} + +func (x Code) String() string { + return proto.EnumName(Code_name, int32(x)) +} +func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func init() { + proto.RegisterEnum("google.rpc.Code", Code_name, Code_value) +} + +func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31, + 0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c, + 0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42, + 0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1, + 0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61, + 0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c, + 0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c, + 0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34, + 0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c, + 0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97, + 0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3, + 0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60, + 0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8, + 0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75, + 0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40, + 0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31, + 0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53, + 0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9, + 0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90, + 0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d, + 0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9, + 0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 000000000..068bfb2ed --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,495 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/error_details.proto + +/* +Package errdetails is a generated protocol buffer package. + +It is generated from these files: + google/rpc/error_details.proto + +It has these top-level messages: + RetryInfo + DebugInfo + QuotaFailure + PreconditionFailure + BadRequest + RequestInfo + ResourceInfo + Help + LocalizedMessage +*/ +package errdetails + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retires have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + // Clients should wait at least this long between retrying the same request. + RetryDelay *google_protobuf.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay" json:"retry_delay,omitempty"` +} + +func (m *RetryInfo) Reset() { *m = RetryInfo{} } +func (m *RetryInfo) String() string { return proto.CompactTextString(m) } +func (*RetryInfo) ProtoMessage() {} +func (*RetryInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *RetryInfo) GetRetryDelay() *google_protobuf.Duration { + if m != nil { + return m.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` +} + +func (m *DebugInfo) Reset() { *m = DebugInfo{} } +func (m *DebugInfo) String() string { return proto.CompactTextString(m) } +func (*DebugInfo) ProtoMessage() {} +func (*DebugInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *DebugInfo) GetStackEntries() []string { + if m != nil { + return m.StackEntries + } + return nil +} + +func (m *DebugInfo) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryDetail and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"` +} + +func (m *QuotaFailure) Reset() { *m = QuotaFailure{} } +func (m *QuotaFailure) String() string { return proto.CompactTextString(m) } +func (*QuotaFailure) ProtoMessage() {} +func (*QuotaFailure) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *QuotaFailure_Violation) Reset() { *m = QuotaFailure_Violation{} } +func (m *QuotaFailure_Violation) String() string { return proto.CompactTextString(m) } +func (*QuotaFailure_Violation) ProtoMessage() {} +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *QuotaFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *QuotaFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"` +} + +func (m *PreconditionFailure) Reset() { *m = PreconditionFailure{} } +func (m *PreconditionFailure) String() string { return proto.CompactTextString(m) } +func (*PreconditionFailure) ProtoMessage() {} +func (*PreconditionFailure) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation types. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would + // indicate which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` +} + +func (m *PreconditionFailure_Violation) Reset() { *m = PreconditionFailure_Violation{} } +func (m *PreconditionFailure_Violation) String() string { return proto.CompactTextString(m) } +func (*PreconditionFailure_Violation) ProtoMessage() {} +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *PreconditionFailure_Violation) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PreconditionFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *PreconditionFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations" json:"field_violations,omitempty"` +} + +func (m *BadRequest) Reset() { *m = BadRequest{} } +func (m *BadRequest) String() string { return proto.CompactTextString(m) } +func (*BadRequest) ProtoMessage() {} +func (*BadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if m != nil { + return m.FieldViolations + } + return nil +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *BadRequest_FieldViolation) Reset() { *m = BadRequest_FieldViolation{} } +func (m *BadRequest_FieldViolation) String() string { return proto.CompactTextString(m) } +func (*BadRequest_FieldViolation) ProtoMessage() {} +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *BadRequest_FieldViolation) GetField() string { + if m != nil { + return m.Field + } + return "" +} + +func (m *BadRequest_FieldViolation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData" json:"serving_data,omitempty"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (m *RequestInfo) String() string { return proto.CompactTextString(m) } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *RequestInfo) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *RequestInfo) GetServingData() string { + if m != nil { + return m.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` +} + +func (m *ResourceInfo) Reset() { *m = ResourceInfo{} } +func (m *ResourceInfo) String() string { return proto.CompactTextString(m) } +func (*ResourceInfo) ProtoMessage() {} +func (*ResourceInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ResourceInfo) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +func (m *ResourceInfo) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceInfo) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *ResourceInfo) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links" json:"links,omitempty"` +} + +func (m *Help) Reset() { *m = Help{} } +func (m *Help) String() string { return proto.CompactTextString(m) } +func (*Help) ProtoMessage() {} +func (*Help) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Help) GetLinks() []*Help_Link { + if m != nil { + return m.Links + } + return nil +} + +// Describes a URL link. +type Help_Link struct { + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` +} + +func (m *Help_Link) Reset() { *m = Help_Link{} } +func (m *Help_Link) String() string { return proto.CompactTextString(m) } +func (*Help_Link) ProtoMessage() {} +func (*Help_Link) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +func (m *Help_Link) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Help_Link) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *LocalizedMessage) Reset() { *m = LocalizedMessage{} } +func (m *LocalizedMessage) String() string { return proto.CompactTextString(m) } +func (*LocalizedMessage) ProtoMessage() {} +func (*LocalizedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *LocalizedMessage) GetLocale() string { + if m != nil { + return m.Locale + } + return "" +} + +func (m *LocalizedMessage) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*RetryInfo)(nil), "google.rpc.RetryInfo") + proto.RegisterType((*DebugInfo)(nil), "google.rpc.DebugInfo") + proto.RegisterType((*QuotaFailure)(nil), "google.rpc.QuotaFailure") + proto.RegisterType((*QuotaFailure_Violation)(nil), "google.rpc.QuotaFailure.Violation") + proto.RegisterType((*PreconditionFailure)(nil), "google.rpc.PreconditionFailure") + proto.RegisterType((*PreconditionFailure_Violation)(nil), "google.rpc.PreconditionFailure.Violation") + proto.RegisterType((*BadRequest)(nil), "google.rpc.BadRequest") + proto.RegisterType((*BadRequest_FieldViolation)(nil), "google.rpc.BadRequest.FieldViolation") + proto.RegisterType((*RequestInfo)(nil), "google.rpc.RequestInfo") + proto.RegisterType((*ResourceInfo)(nil), "google.rpc.ResourceInfo") + proto.RegisterType((*Help)(nil), "google.rpc.Help") + proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link") + proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage") +} + +func init() { proto.RegisterFile("google/rpc/error_details.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 595 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x95, 0x9b, 0xb4, 0x9f, 0x7c, 0x93, 0xaf, 0x14, 0xf3, 0xa3, 0x10, 0x09, 0x14, 0x8c, 0x90, + 0x8a, 0x90, 0x1c, 0xa9, 0xec, 0xca, 0x02, 0x29, 0xb8, 0x7f, 0x52, 0x81, 0x60, 0x21, 0x16, 0xb0, + 0xb0, 0x26, 0xf6, 0x8d, 0x35, 0x74, 0xe2, 0x31, 0x33, 0xe3, 0xa2, 0xf0, 0x14, 0xec, 0xd9, 0xb1, + 0xe2, 0x25, 0x78, 0x37, 0x34, 0x9e, 0x99, 0xc6, 0x6d, 0x0a, 0x62, 0x37, 0xe7, 0xcc, 0x99, 0xe3, + 0x73, 0xaf, 0xae, 0x2f, 0x3c, 0x28, 0x38, 0x2f, 0x18, 0x8e, 0x45, 0x95, 0x8d, 0x51, 0x08, 0x2e, + 0xd2, 0x1c, 0x15, 0xa1, 0x4c, 0x46, 0x95, 0xe0, 0x8a, 0x07, 0x60, 0xee, 0x23, 0x51, 0x65, 0x43, + 0xa7, 0x6d, 0x6e, 0x66, 0xf5, 0x7c, 0x9c, 0xd7, 0x82, 0x28, 0xca, 0x4b, 0xa3, 0x0d, 0x8f, 0xc0, + 0x4f, 0x50, 0x89, 0xe5, 0x49, 0x39, 0xe7, 0xc1, 0x3e, 0xf4, 0x84, 0x06, 0x69, 0x8e, 0x8c, 0x2c, + 0x07, 0xde, 0xc8, 0xdb, 0xed, 0xed, 0xdd, 0x8b, 0xac, 0x9d, 0xb3, 0x88, 0x62, 0x6b, 0x91, 0x40, + 0xa3, 0x8e, 0xb5, 0x38, 0x3c, 0x06, 0x3f, 0xc6, 0x59, 0x5d, 0x34, 0x46, 0x8f, 0xe0, 0x7f, 0xa9, + 0x48, 0x76, 0x96, 0x62, 0xa9, 0x04, 0x45, 0x39, 0xf0, 0x46, 0x9d, 0x5d, 0x3f, 0xe9, 0x37, 0xe4, + 0x81, 0xe1, 0x82, 0xbb, 0xb0, 0x65, 0x72, 0x0f, 0x36, 0x46, 0xde, 0xae, 0x9f, 0x58, 0x14, 0x7e, + 0xf7, 0xa0, 0xff, 0xb6, 0xe6, 0x8a, 0x1c, 0x12, 0xca, 0x6a, 0x81, 0xc1, 0x04, 0xe0, 0x9c, 0x72, + 0xd6, 0x7c, 0xd3, 0x58, 0xf5, 0xf6, 0xc2, 0x68, 0x55, 0x64, 0xd4, 0x56, 0x47, 0xef, 0x9d, 0x34, + 0x69, 0xbd, 0x1a, 0x1e, 0x81, 0x7f, 0x71, 0x11, 0x0c, 0xe0, 0x3f, 0x59, 0xcf, 0x3e, 0x61, 0xa6, + 0x9a, 0x1a, 0xfd, 0xc4, 0xc1, 0x60, 0x04, 0xbd, 0x1c, 0x65, 0x26, 0x68, 0xa5, 0x85, 0x36, 0x58, + 0x9b, 0x0a, 0x7f, 0x79, 0x70, 0x6b, 0x2a, 0x30, 0xe3, 0x65, 0x4e, 0x35, 0xe1, 0x42, 0x9e, 0x5c, + 0x13, 0xf2, 0x49, 0x3b, 0xe4, 0x35, 0x8f, 0xfe, 0x90, 0xf5, 0x63, 0x3b, 0x6b, 0x00, 0x5d, 0xb5, + 0xac, 0xd0, 0x06, 0x6d, 0xce, 0xed, 0xfc, 0x1b, 0x7f, 0xcd, 0xdf, 0x59, 0xcf, 0xff, 0xd3, 0x03, + 0x98, 0x90, 0x3c, 0xc1, 0xcf, 0x35, 0x4a, 0x15, 0x4c, 0x61, 0x67, 0x4e, 0x91, 0xe5, 0xe9, 0x5a, + 0xf8, 0xc7, 0xed, 0xf0, 0xab, 0x17, 0xd1, 0xa1, 0x96, 0xaf, 0x82, 0xdf, 0x98, 0x5f, 0xc2, 0x72, + 0x78, 0x0c, 0xdb, 0x97, 0x25, 0xc1, 0x6d, 0xd8, 0x6c, 0x44, 0xb6, 0x06, 0x03, 0xfe, 0xa1, 0xd5, + 0x6f, 0xa0, 0x67, 0x3f, 0xda, 0x0c, 0xd5, 0x7d, 0x00, 0x61, 0x60, 0x4a, 0x9d, 0x97, 0x6f, 0x99, + 0x93, 0x3c, 0x78, 0x08, 0x7d, 0x89, 0xe2, 0x9c, 0x96, 0x45, 0x9a, 0x13, 0x45, 0x9c, 0xa1, 0xe5, + 0x62, 0xa2, 0x48, 0xf8, 0xcd, 0x83, 0x7e, 0x82, 0x92, 0xd7, 0x22, 0x43, 0x37, 0xa7, 0xc2, 0xe2, + 0xb4, 0xd5, 0xe5, 0xbe, 0x23, 0xdf, 0xe9, 0x6e, 0xb7, 0x45, 0x25, 0x59, 0xa0, 0x75, 0xbe, 0x10, + 0xbd, 0x26, 0x0b, 0xd4, 0x35, 0xf2, 0x2f, 0x25, 0x0a, 0xdb, 0x72, 0x03, 0xae, 0xd6, 0xd8, 0x5d, + 0xaf, 0x91, 0x43, 0xf7, 0x18, 0x59, 0x15, 0x3c, 0x85, 0x4d, 0x46, 0xcb, 0x33, 0xd7, 0xfc, 0x3b, + 0xed, 0xe6, 0x6b, 0x41, 0x74, 0x4a, 0xcb, 0xb3, 0xc4, 0x68, 0x86, 0xfb, 0xd0, 0xd5, 0xf0, 0xaa, + 0xbd, 0xb7, 0x66, 0x1f, 0xec, 0x40, 0xa7, 0x16, 0xee, 0x07, 0xd3, 0xc7, 0x30, 0x86, 0x9d, 0x53, + 0x9e, 0x11, 0x46, 0xbf, 0x62, 0xfe, 0x0a, 0xa5, 0x24, 0x05, 0xea, 0x3f, 0x91, 0x69, 0xce, 0xd5, + 0x6f, 0x91, 0x9e, 0xb3, 0x85, 0x91, 0xb8, 0x39, 0xb3, 0x70, 0xc2, 0x60, 0x3b, 0xe3, 0x8b, 0x56, + 0xc8, 0xc9, 0xcd, 0x03, 0xbd, 0x89, 0x62, 0xb3, 0x88, 0xa6, 0x7a, 0x55, 0x4c, 0xbd, 0x0f, 0x2f, + 0xac, 0xa0, 0xe0, 0x8c, 0x94, 0x45, 0xc4, 0x45, 0x31, 0x2e, 0xb0, 0x6c, 0x16, 0xc9, 0xd8, 0x5c, + 0x91, 0x8a, 0x4a, 0xb7, 0xc8, 0xec, 0x16, 0x7b, 0xbe, 0x3a, 0xfe, 0xd8, 0xe8, 0x24, 0xd3, 0x97, + 0xb3, 0xad, 0xe6, 0xc5, 0xb3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x15, 0x46, 0x2d, 0xf9, + 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..8867ae781 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +/* +Package status is a generated protocol buffer package. + +It is generated from these files: + google/rpc/status.proto + +It has these top-level messages: + Status +*/ +package status + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*google_protobuf.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go new file mode 100644 index 000000000..c943c997e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/admin/database/v1/spanner_database_admin.pb.go @@ -0,0 +1,920 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/admin/database/v1/spanner_database_admin.proto + +/* +Package database is a generated protocol buffer package. + +It is generated from these files: + google/spanner/admin/database/v1/spanner_database_admin.proto + +It has these top-level messages: + Database + ListDatabasesRequest + ListDatabasesResponse + CreateDatabaseRequest + CreateDatabaseMetadata + GetDatabaseRequest + UpdateDatabaseDdlRequest + UpdateDatabaseDdlMetadata + DropDatabaseRequest + GetDatabaseDdlRequest + GetDatabaseDdlResponse +*/ +package database + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates the current state of the database. +type Database_State int32 + +const ( + // Not specified. + Database_STATE_UNSPECIFIED Database_State = 0 + // The database is still being created. Operations on the database may fail + // with `FAILED_PRECONDITION` in this state. + Database_CREATING Database_State = 1 + // The database is fully created and ready for use. + Database_READY Database_State = 2 +) + +var Database_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "CREATING", + 2: "READY", +} +var Database_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, +} + +func (x Database_State) String() string { + return proto.EnumName(Database_State_name, int32(x)) +} +func (Database_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// A Cloud Spanner database. +type Database struct { + // Required. The name of the database. Values are of the form + // `projects//instances//databases/`, + // where `` is as specified in the `CREATE DATABASE` + // statement. This name can be passed to other API methods to + // identify the database. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Output only. The current database state. + State Database_State `protobuf:"varint,2,opt,name=state,enum=google.spanner.admin.database.v1.Database_State" json:"state,omitempty"` +} + +func (m *Database) Reset() { *m = Database{} } +func (m *Database) String() string { return proto.CompactTextString(m) } +func (*Database) ProtoMessage() {} +func (*Database) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Database) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Database) GetState() Database_State { + if m != nil { + return m.State + } + return Database_STATE_UNSPECIFIED +} + +// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +type ListDatabasesRequest struct { + // Required. The instance whose databases should be listed. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of databases to be returned in the response. If 0 or less, + // defaults to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a + // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListDatabasesRequest) Reset() { *m = ListDatabasesRequest{} } +func (m *ListDatabasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesRequest) ProtoMessage() {} +func (*ListDatabasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ListDatabasesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDatabasesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDatabasesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +type ListDatabasesResponse struct { + // Databases that matched the request. + Databases []*Database `protobuf:"bytes,1,rep,name=databases" json:"databases,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more + // of the matching databases. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDatabasesResponse) Reset() { *m = ListDatabasesResponse{} } +func (m *ListDatabasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDatabasesResponse) ProtoMessage() {} +func (*ListDatabasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListDatabasesResponse) GetDatabases() []*Database { + if m != nil { + return m.Databases + } + return nil +} + +func (m *ListDatabasesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +type CreateDatabaseRequest struct { + // Required. The name of the instance that will serve the new database. + // Values are of the form `projects//instances/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. A `CREATE DATABASE` statement, which specifies the ID of the + // new database. The database ID must conform to the regular expression + // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. + CreateStatement string `protobuf:"bytes,2,opt,name=create_statement,json=createStatement" json:"create_statement,omitempty"` + // An optional list of DDL statements to run inside the newly created + // database. Statements can create tables, indexes, etc. These + // statements execute atomically with the creation of the database: + // if there is an error in any statement, the database is not created. + ExtraStatements []string `protobuf:"bytes,3,rep,name=extra_statements,json=extraStatements" json:"extra_statements,omitempty"` +} + +func (m *CreateDatabaseRequest) Reset() { *m = CreateDatabaseRequest{} } +func (m *CreateDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseRequest) ProtoMessage() {} +func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *CreateDatabaseRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDatabaseRequest) GetCreateStatement() string { + if m != nil { + return m.CreateStatement + } + return "" +} + +func (m *CreateDatabaseRequest) GetExtraStatements() []string { + if m != nil { + return m.ExtraStatements + } + return nil +} + +// Metadata type for the operation returned by +// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +type CreateDatabaseMetadata struct { + // The database being created. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *CreateDatabaseMetadata) Reset() { *m = CreateDatabaseMetadata{} } +func (m *CreateDatabaseMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateDatabaseMetadata) ProtoMessage() {} +func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *CreateDatabaseMetadata) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +type GetDatabaseRequest struct { + // Required. The name of the requested database. Values are of the form + // `projects//instances//databases/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetDatabaseRequest) Reset() { *m = GetDatabaseRequest{} } +func (m *GetDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseRequest) ProtoMessage() {} +func (*GetDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetDatabaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Enqueues the given DDL statements to be applied, in order but not +// necessarily all at once, to the database schema at some point (or +// points) in the future. The server checks that the statements +// are executable (syntactically valid, name tables that exist, etc.) +// before enqueueing them, but they may still fail upon +// later execution (e.g., if a statement from another batch of +// statements is applied first and it conflicts in some way, or if +// there is some data-related problem like a `NULL` value in a column to +// which `NOT NULL` would be added). If a statement fails, all +// subsequent statements in the batch are automatically cancelled. +// +// Each batch of statements is assigned a name which can be used with +// the [Operations][google.longrunning.Operations] API to monitor +// progress. See the +// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more +// details. +type UpdateDatabaseDdlRequest struct { + // Required. The database to update. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // DDL statements to be applied to the database. + Statements []string `protobuf:"bytes,2,rep,name=statements" json:"statements,omitempty"` + // If empty, the new update request is assigned an + // automatically-generated operation ID. Otherwise, `operation_id` + // is used to construct the name of the resulting + // [Operation][google.longrunning.Operation]. + // + // Specifying an explicit operation ID simplifies determining + // whether the statements were executed in the event that the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, + // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and + // `operation_id` fields can be combined to form the + // [name][google.longrunning.Operation.name] of the resulting + // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. + // + // `operation_id` should be unique within the database, and must be + // a valid identifier: `[a-z][a-z0-9_]*`. Note that + // automatically-generated operation IDs always begin with an + // underscore. If the named operation already exists, + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns + // `ALREADY_EXISTS`. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` +} + +func (m *UpdateDatabaseDdlRequest) Reset() { *m = UpdateDatabaseDdlRequest{} } +func (m *UpdateDatabaseDdlRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseDdlRequest) ProtoMessage() {} +func (*UpdateDatabaseDdlRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *UpdateDatabaseDdlRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *UpdateDatabaseDdlRequest) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func (m *UpdateDatabaseDdlRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +// Metadata type for the operation returned by +// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. +type UpdateDatabaseDdlMetadata struct { + // The database being modified. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` + // For an update this list contains all the statements. For an + // individual statement, this list contains only that statement. + Statements []string `protobuf:"bytes,2,rep,name=statements" json:"statements,omitempty"` + // Reports the commit timestamps of all statements that have + // succeeded so far, where `commit_timestamps[i]` is the commit + // timestamp for the statement `statements[i]`. + CommitTimestamps []*google_protobuf3.Timestamp `protobuf:"bytes,3,rep,name=commit_timestamps,json=commitTimestamps" json:"commit_timestamps,omitempty"` +} + +func (m *UpdateDatabaseDdlMetadata) Reset() { *m = UpdateDatabaseDdlMetadata{} } +func (m *UpdateDatabaseDdlMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateDatabaseDdlMetadata) ProtoMessage() {} +func (*UpdateDatabaseDdlMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *UpdateDatabaseDdlMetadata) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *UpdateDatabaseDdlMetadata) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func (m *UpdateDatabaseDdlMetadata) GetCommitTimestamps() []*google_protobuf3.Timestamp { + if m != nil { + return m.CommitTimestamps + } + return nil +} + +// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +type DropDatabaseRequest struct { + // Required. The database to be dropped. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *DropDatabaseRequest) Reset() { *m = DropDatabaseRequest{} } +func (m *DropDatabaseRequest) String() string { return proto.CompactTextString(m) } +func (*DropDatabaseRequest) ProtoMessage() {} +func (*DropDatabaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *DropDatabaseRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +type GetDatabaseDdlRequest struct { + // Required. The database whose schema we wish to get. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *GetDatabaseDdlRequest) Reset() { *m = GetDatabaseDdlRequest{} } +func (m *GetDatabaseDdlRequest) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseDdlRequest) ProtoMessage() {} +func (*GetDatabaseDdlRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *GetDatabaseDdlRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +type GetDatabaseDdlResponse struct { + // A list of formatted DDL statements defining the schema of the database + // specified in the request. + Statements []string `protobuf:"bytes,1,rep,name=statements" json:"statements,omitempty"` +} + +func (m *GetDatabaseDdlResponse) Reset() { *m = GetDatabaseDdlResponse{} } +func (m *GetDatabaseDdlResponse) String() string { return proto.CompactTextString(m) } +func (*GetDatabaseDdlResponse) ProtoMessage() {} +func (*GetDatabaseDdlResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *GetDatabaseDdlResponse) GetStatements() []string { + if m != nil { + return m.Statements + } + return nil +} + +func init() { + proto.RegisterType((*Database)(nil), "google.spanner.admin.database.v1.Database") + proto.RegisterType((*ListDatabasesRequest)(nil), "google.spanner.admin.database.v1.ListDatabasesRequest") + proto.RegisterType((*ListDatabasesResponse)(nil), "google.spanner.admin.database.v1.ListDatabasesResponse") + proto.RegisterType((*CreateDatabaseRequest)(nil), "google.spanner.admin.database.v1.CreateDatabaseRequest") + proto.RegisterType((*CreateDatabaseMetadata)(nil), "google.spanner.admin.database.v1.CreateDatabaseMetadata") + proto.RegisterType((*GetDatabaseRequest)(nil), "google.spanner.admin.database.v1.GetDatabaseRequest") + proto.RegisterType((*UpdateDatabaseDdlRequest)(nil), "google.spanner.admin.database.v1.UpdateDatabaseDdlRequest") + proto.RegisterType((*UpdateDatabaseDdlMetadata)(nil), "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata") + proto.RegisterType((*DropDatabaseRequest)(nil), "google.spanner.admin.database.v1.DropDatabaseRequest") + proto.RegisterType((*GetDatabaseDdlRequest)(nil), "google.spanner.admin.database.v1.GetDatabaseDdlRequest") + proto.RegisterType((*GetDatabaseDdlResponse)(nil), "google.spanner.admin.database.v1.GetDatabaseDdlResponse") + proto.RegisterEnum("google.spanner.admin.database.v1.Database_State", Database_State_name, Database_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DatabaseAdmin service + +type DatabaseAdminClient interface { + // Lists Cloud Spanner databases. + ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) + // Creates a new Cloud Spanner database and starts to prepare it for serving. + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track preparation of the database. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets the state of a Cloud Spanner database. + GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) + // Updates the schema of a Cloud Spanner database by + // creating/altering/dropping tables, columns, indexes, etc. The returned + // [long-running operation][google.longrunning.Operation] will have a name of + // the format `/operations/` and can be used to + // track execution of the schema change(s). The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Drops (aka deletes) a Cloud Spanner database. + DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Returns the schema of a Cloud Spanner database as a list of formatted + // DDL statements. This method does not show pending schema updates, those may + // be queried using the [Operations][google.longrunning.Operations] API. + GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) + // Sets the access control policy on a database resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.databases.setIamPolicy` permission on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for a database resource. Returns an empty + // policy if a database exists but does not have a policy set. + // + // Authorization requires `spanner.databases.getIamPolicy` permission on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified database resource. + // + // Attempting this RPC on a non-existent Cloud Spanner database will result in + // a NOT_FOUND error if the user has `spanner.databases.list` permission on + // the containing Cloud Spanner instance. Otherwise returns an empty set of + // permissions. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type databaseAdminClient struct { + cc *grpc.ClientConn +} + +func NewDatabaseAdminClient(cc *grpc.ClientConn) DatabaseAdminClient { + return &databaseAdminClient{cc} +} + +func (c *databaseAdminClient) ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) { + out := new(ListDatabasesResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) { + out := new(Database) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) { + out := new(GetDatabaseDdlResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseAdminClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DatabaseAdmin service + +type DatabaseAdminServer interface { + // Lists Cloud Spanner databases. + ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) + // Creates a new Cloud Spanner database and starts to prepare it for serving. + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track preparation of the database. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Database][google.spanner.admin.database.v1.Database], if successful. + CreateDatabase(context.Context, *CreateDatabaseRequest) (*google_longrunning.Operation, error) + // Gets the state of a Cloud Spanner database. + GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error) + // Updates the schema of a Cloud Spanner database by + // creating/altering/dropping tables, columns, indexes, etc. The returned + // [long-running operation][google.longrunning.Operation] will have a name of + // the format `/operations/` and can be used to + // track execution of the schema change(s). The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*google_longrunning.Operation, error) + // Drops (aka deletes) a Cloud Spanner database. + DropDatabase(context.Context, *DropDatabaseRequest) (*google_protobuf2.Empty, error) + // Returns the schema of a Cloud Spanner database as a list of formatted + // DDL statements. This method does not show pending schema updates, those may + // be queried using the [Operations][google.longrunning.Operations] API. + GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error) + // Sets the access control policy on a database resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.databases.setIamPolicy` permission on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for a database resource. Returns an empty + // policy if a database exists but does not have a policy set. + // + // Authorization requires `spanner.databases.getIamPolicy` permission on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified database resource. + // + // Attempting this RPC on a non-existent Cloud Spanner database will result in + // a NOT_FOUND error if the user has `spanner.databases.list` permission on + // the containing Cloud Spanner instance. Otherwise returns an empty set of + // permissions. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterDatabaseAdminServer(s *grpc.Server, srv DatabaseAdminServer) { + s.RegisterService(&_DatabaseAdmin_serviceDesc, srv) +} + +func _DatabaseAdmin_ListDatabases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatabasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).ListDatabases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).ListDatabases(ctx, req.(*ListDatabasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_CreateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).CreateDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).CreateDatabase(ctx, req.(*CreateDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetDatabase(ctx, req.(*GetDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_UpdateDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDatabaseDdlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, req.(*UpdateDatabaseDdlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_DropDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DropDatabaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).DropDatabase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).DropDatabase(ctx, req.(*DropDatabaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatabaseDdlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, req.(*GetDatabaseDdlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DatabaseAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseAdminServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseAdminServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DatabaseAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.admin.database.v1.DatabaseAdmin", + HandlerType: (*DatabaseAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListDatabases", + Handler: _DatabaseAdmin_ListDatabases_Handler, + }, + { + MethodName: "CreateDatabase", + Handler: _DatabaseAdmin_CreateDatabase_Handler, + }, + { + MethodName: "GetDatabase", + Handler: _DatabaseAdmin_GetDatabase_Handler, + }, + { + MethodName: "UpdateDatabaseDdl", + Handler: _DatabaseAdmin_UpdateDatabaseDdl_Handler, + }, + { + MethodName: "DropDatabase", + Handler: _DatabaseAdmin_DropDatabase_Handler, + }, + { + MethodName: "GetDatabaseDdl", + Handler: _DatabaseAdmin_GetDatabaseDdl_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _DatabaseAdmin_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _DatabaseAdmin_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _DatabaseAdmin_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/spanner/admin/database/v1/spanner_database_admin.proto", +} + +func init() { + proto.RegisterFile("google/spanner/admin/database/v1/spanner_database_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1020 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xc7, 0x19, 0xa7, 0xa9, 0x92, 0x27, 0x2f, 0x4d, 0x06, 0x1c, 0xb9, 0x5b, 0x5a, 0xcc, 0x82, + 0x2a, 0xd7, 0x12, 0xbb, 0xd8, 0x69, 0x48, 0x30, 0x0a, 0x22, 0xb5, 0x5d, 0xd7, 0x12, 0xb4, 0x96, + 0xed, 0x22, 0xc1, 0xc5, 0x9a, 0xd8, 0xc3, 0xb2, 0xc5, 0xbb, 0xb3, 0xec, 0x8c, 0xab, 0xb6, 0xa8, + 0x17, 0x24, 0x0e, 0x9c, 0x01, 0x89, 0x1b, 0x88, 0x03, 0x07, 0x4e, 0x9c, 0x40, 0xe2, 0xc8, 0x57, + 0xe0, 0x2b, 0xf0, 0x41, 0xd0, 0xcc, 0xee, 0xd8, 0xeb, 0x75, 0x12, 0xdb, 0x1c, 0x7a, 0xf3, 0x3e, + 0xaf, 0xbf, 0xe7, 0xd9, 0xf9, 0x8f, 0x17, 0x8e, 0x1d, 0xc6, 0x9c, 0x21, 0xb5, 0x79, 0x40, 0x7c, + 0x9f, 0x86, 0x36, 0x19, 0x78, 0xae, 0x6f, 0x0f, 0x88, 0x20, 0xa7, 0x84, 0x53, 0xfb, 0x71, 0x49, + 0x7b, 0x7a, 0xda, 0xd6, 0x53, 0x21, 0x56, 0x10, 0x32, 0xc1, 0x70, 0x3e, 0x4a, 0xb7, 0xe2, 0x20, + 0x2b, 0xf2, 0xe9, 0x50, 0xeb, 0x71, 0xc9, 0x78, 0x35, 0x6e, 0x40, 0x02, 0xd7, 0x26, 0xbe, 0xcf, + 0x04, 0x11, 0x2e, 0xf3, 0x79, 0x94, 0x6f, 0x64, 0x93, 0xde, 0x91, 0xf8, 0x3c, 0x36, 0xdf, 0x88, + 0xcd, 0x2e, 0xf1, 0x24, 0x82, 0x4b, 0xbc, 0x5e, 0xc0, 0x86, 0x6e, 0xff, 0x69, 0xec, 0x37, 0xa6, + 0xfd, 0x53, 0xbe, 0x37, 0x62, 0xdf, 0x90, 0xf9, 0x4e, 0x38, 0xf2, 0x7d, 0xd7, 0x77, 0x6c, 0x16, + 0xd0, 0x70, 0xaa, 0xef, 0xb5, 0x38, 0x48, 0x3d, 0x9d, 0x8e, 0x3e, 0xb3, 0xa9, 0x17, 0x08, 0x5d, + 0xe1, 0xb5, 0xb4, 0x53, 0xb8, 0x1e, 0xe5, 0x82, 0x78, 0x41, 0x14, 0x60, 0xfe, 0x84, 0x60, 0xad, + 0x16, 0xcf, 0x88, 0x31, 0x5c, 0xf2, 0x89, 0x47, 0x73, 0x28, 0x8f, 0x0a, 0xeb, 0x6d, 0xf5, 0x1b, + 0xdf, 0x85, 0x55, 0x2e, 0x88, 0xa0, 0xb9, 0x4c, 0x1e, 0x15, 0xb6, 0xcb, 0x6f, 0x5b, 0xf3, 0xd6, + 0x64, 0xe9, 0x72, 0x56, 0x47, 0xe6, 0xb5, 0xa3, 0x74, 0xf3, 0x10, 0x56, 0xd5, 0x33, 0xce, 0xc2, + 0x6e, 0xa7, 0x7b, 0xd2, 0xad, 0xf7, 0x1e, 0xde, 0xef, 0xb4, 0xea, 0xd5, 0xe6, 0xdd, 0x66, 0xbd, + 0xb6, 0xf3, 0x12, 0xde, 0x84, 0xb5, 0x6a, 0xbb, 0x7e, 0xd2, 0x6d, 0xde, 0x6f, 0xec, 0x20, 0xbc, + 0x0e, 0xab, 0xed, 0xfa, 0x49, 0xed, 0x93, 0x9d, 0x8c, 0xf9, 0x08, 0x5e, 0xf9, 0xd0, 0xe5, 0x42, + 0x57, 0xe5, 0x6d, 0xfa, 0xe5, 0x88, 0x72, 0x81, 0xf7, 0xe0, 0x72, 0x40, 0x42, 0xea, 0x8b, 0x18, + 0x37, 0x7e, 0xc2, 0xd7, 0x60, 0x3d, 0x20, 0x0e, 0xed, 0x71, 0xf7, 0x19, 0xcd, 0xad, 0xe4, 0x51, + 0x61, 0xb5, 0xbd, 0x26, 0x0d, 0x1d, 0xf7, 0x19, 0xc5, 0xd7, 0x01, 0x94, 0x53, 0xb0, 0x2f, 0xa8, + 0x9f, 0xbb, 0xa4, 0x12, 0x55, 0x78, 0x57, 0x1a, 0xcc, 0x6f, 0x11, 0x64, 0x53, 0xcd, 0x78, 0xc0, + 0x7c, 0x4e, 0xf1, 0x3d, 0x58, 0xd7, 0x33, 0xf2, 0x1c, 0xca, 0xaf, 0x14, 0x36, 0xca, 0xc5, 0xc5, + 0x57, 0xd1, 0x9e, 0x24, 0xe3, 0x9b, 0x70, 0xc5, 0xa7, 0x4f, 0x44, 0x2f, 0xc1, 0x91, 0x51, 0x1c, + 0x5b, 0xd2, 0xdc, 0x1a, 0xb3, 0x7c, 0x83, 0x20, 0x5b, 0x0d, 0x29, 0x11, 0x74, 0x5c, 0x65, 0xce, + 0xe4, 0xb7, 0x60, 0xa7, 0xaf, 0x12, 0x7a, 0x6a, 0xe5, 0x9e, 0x8c, 0x88, 0x4a, 0x5f, 0x89, 0xec, + 0x1d, 0x6d, 0x96, 0xa1, 0xf4, 0x89, 0x08, 0xc9, 0x24, 0x92, 0xe7, 0x56, 0xf2, 0x2b, 0x32, 0x54, + 0xd9, 0xc7, 0x91, 0xdc, 0xbc, 0x0d, 0x7b, 0xd3, 0x18, 0x1f, 0x51, 0x41, 0xe4, 0x38, 0xd8, 0x80, + 0x35, 0x3d, 0x56, 0x4c, 0x32, 0x7e, 0x36, 0x0b, 0x80, 0x1b, 0x54, 0xa4, 0xc9, 0xcf, 0x38, 0x60, + 0xe6, 0x53, 0xc8, 0x3d, 0x0c, 0x06, 0x89, 0xfa, 0xb5, 0xc1, 0x50, 0xc7, 0x5f, 0xd0, 0x01, 0xdf, + 0x00, 0x48, 0xc0, 0x67, 0x14, 0x7c, 0xc2, 0x82, 0x5f, 0x87, 0xcd, 0xb1, 0x56, 0x7a, 0xee, 0x40, + 0x1d, 0x85, 0xf5, 0xf6, 0xc6, 0xd8, 0xd6, 0x1c, 0x98, 0x3f, 0x23, 0xb8, 0x3a, 0xd3, 0x7b, 0x91, + 0xf1, 0xe6, 0x36, 0x6f, 0xc0, 0x6e, 0x9f, 0x79, 0x9e, 0x2b, 0x7a, 0x63, 0xc1, 0x45, 0x0b, 0xde, + 0x28, 0x1b, 0xfa, 0xd8, 0x68, 0x4d, 0x5a, 0x5d, 0x1d, 0xd2, 0xde, 0x89, 0x92, 0xc6, 0x06, 0x6e, + 0x96, 0xe0, 0xe5, 0x5a, 0xc8, 0x82, 0xf4, 0x22, 0x2f, 0x5a, 0xfd, 0x3e, 0x64, 0x13, 0xab, 0x5f, + 0x6c, 0x9b, 0xe6, 0x11, 0xec, 0xa5, 0x93, 0xe2, 0x93, 0x3f, 0x3d, 0x2a, 0x4a, 0x8f, 0x5a, 0xfe, + 0x61, 0x13, 0xb6, 0x74, 0xde, 0x89, 0x54, 0x00, 0xfe, 0x13, 0xc1, 0xd6, 0x94, 0x8a, 0xf0, 0x3b, + 0xf3, 0xa5, 0x72, 0x96, 0xc6, 0x8d, 0xc3, 0xa5, 0xf3, 0x22, 0x68, 0xf3, 0xe0, 0xeb, 0x7f, 0xfe, + 0xfd, 0x2e, 0x63, 0xe3, 0xb7, 0xe4, 0x9d, 0xfa, 0x55, 0xa4, 0x8f, 0xe3, 0x20, 0x64, 0x8f, 0x68, + 0x5f, 0x70, 0xbb, 0x68, 0xbb, 0x3e, 0x17, 0xc4, 0xef, 0x53, 0x6e, 0x17, 0x9f, 0xdb, 0x13, 0x6d, + 0xfe, 0x82, 0x60, 0x7b, 0xfa, 0xb0, 0xe3, 0x05, 0x10, 0xce, 0x54, 0xa9, 0x71, 0x5d, 0x27, 0x26, + 0x6e, 0x6f, 0xeb, 0x81, 0x3e, 0x7d, 0xe6, 0x91, 0x22, 0x2c, 0x9b, 0xcb, 0x11, 0x56, 0x50, 0x11, + 0xff, 0x8a, 0x60, 0x23, 0xf1, 0xae, 0xf0, 0xed, 0xf9, 0x84, 0xb3, 0x52, 0x34, 0x96, 0xb8, 0xbd, + 0x52, 0xdb, 0x94, 0xaa, 0x3d, 0x87, 0x74, 0x02, 0x6a, 0x17, 0x9f, 0xe3, 0xdf, 0x11, 0xec, 0xce, + 0xc8, 0x0b, 0x57, 0xe6, 0x37, 0x3e, 0xef, 0x3e, 0x98, 0xb7, 0xd3, 0x0f, 0x14, 0x67, 0xa5, 0x7c, + 0xa0, 0x38, 0x75, 0xc5, 0x45, 0x58, 0xed, 0xc1, 0x60, 0x28, 0x77, 0xfb, 0x23, 0x82, 0xcd, 0xa4, + 0xde, 0xf0, 0xc1, 0x02, 0x6b, 0x9a, 0xd5, 0xa7, 0xb1, 0x37, 0x23, 0xf2, 0xba, 0xfc, 0x57, 0x36, + 0xdf, 0x55, 0x84, 0xfb, 0xc5, 0xd2, 0xd2, 0x84, 0xf8, 0x6f, 0x04, 0xdb, 0xd3, 0x12, 0x5d, 0xe4, + 0x6c, 0x9e, 0x79, 0x13, 0x18, 0x47, 0xcb, 0x27, 0xc6, 0xc2, 0x3a, 0x56, 0x03, 0x1c, 0xe2, 0xff, + 0xb7, 0x62, 0xfc, 0x3d, 0x82, 0xcd, 0x0e, 0x15, 0x4d, 0xe2, 0xb5, 0xd4, 0x87, 0x0e, 0x36, 0x35, + 0x89, 0x4b, 0x3c, 0xd9, 0x36, 0xe9, 0xd4, 0xb4, 0xd9, 0x54, 0x4c, 0xe4, 0x35, 0x9b, 0x0a, 0xa5, + 0x6a, 0xbe, 0xaf, 0x50, 0x42, 0xca, 0xd9, 0x28, 0xec, 0x2f, 0x84, 0x52, 0xe1, 0x89, 0x2e, 0xf2, + 0xb5, 0x4b, 0xac, 0xc6, 0x45, 0x58, 0x8d, 0x17, 0x82, 0xe5, 0xa4, 0xb0, 0xfe, 0x42, 0x80, 0xbb, + 0x94, 0x2b, 0x23, 0x0d, 0x3d, 0x97, 0x73, 0xf9, 0xdd, 0x87, 0x0b, 0xa9, 0xc6, 0xb3, 0x21, 0x1a, + 0xf1, 0xd6, 0x02, 0x91, 0xf1, 0x8b, 0x7d, 0xa0, 0xb0, 0x9b, 0x66, 0x6d, 0x79, 0x6c, 0x31, 0x53, + 0xb5, 0x82, 0x8a, 0x77, 0xfe, 0x40, 0xf0, 0x66, 0x9f, 0x79, 0x73, 0x4f, 0xda, 0x9d, 0xab, 0x9d, + 0xc8, 0x35, 0xf5, 0x27, 0xd2, 0x92, 0xba, 0x69, 0xa1, 0x4f, 0xef, 0xc5, 0xe9, 0x0e, 0x1b, 0x12, + 0xdf, 0xb1, 0x58, 0xe8, 0xd8, 0x0e, 0xf5, 0x95, 0xaa, 0xec, 0xc8, 0x45, 0x02, 0x97, 0x9f, 0xff, + 0xcd, 0xff, 0x9e, 0xfe, 0xfd, 0x5b, 0xe6, 0x66, 0x23, 0x2a, 0x55, 0x1d, 0xb2, 0xd1, 0xc0, 0x8a, + 0x9b, 0x5a, 0xaa, 0xdb, 0xe4, 0x9b, 0xf5, 0xe3, 0xd2, 0xe9, 0x65, 0x55, 0x7d, 0xff, 0xbf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0xc8, 0x83, 0x50, 0x0c, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go new file mode 100644 index 000000000..f29d5abc7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/admin/instance/v1/spanner_instance_admin.pb.go @@ -0,0 +1,1281 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +/* +Package instance is a generated protocol buffer package. + +It is generated from these files: + google/spanner/admin/instance/v1/spanner_instance_admin.proto + +It has these top-level messages: + InstanceConfig + Instance + ListInstanceConfigsRequest + ListInstanceConfigsResponse + GetInstanceConfigRequest + GetInstanceRequest + CreateInstanceRequest + ListInstancesRequest + ListInstancesResponse + UpdateInstanceRequest + DeleteInstanceRequest + CreateInstanceMetadata + UpdateInstanceMetadata +*/ +package instance + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates the current state of the instance. +type Instance_State int32 + +const ( + // Not specified. + Instance_STATE_UNSPECIFIED Instance_State = 0 + // The instance is still being created. Resources may not be + // available yet, and operations such as database creation may not + // work. + Instance_CREATING Instance_State = 1 + // The instance is fully created and ready to do work such as + // creating databases. + Instance_READY Instance_State = 2 +) + +var Instance_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "CREATING", + 2: "READY", +} +var Instance_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "CREATING": 1, + "READY": 2, +} + +func (x Instance_State) String() string { + return proto.EnumName(Instance_State_name, int32(x)) +} +func (Instance_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// A possible configuration for a Cloud Spanner instance. Configurations +// define the geographic placement of nodes and their replication. +type InstanceConfig struct { + // A unique identifier for the instance configuration. Values + // are of the form + // `projects//instanceConfigs/[a-z][-a-z0-9]*` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The name of this instance configuration as it appears in UIs. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *InstanceConfig) Reset() { *m = InstanceConfig{} } +func (m *InstanceConfig) String() string { return proto.CompactTextString(m) } +func (*InstanceConfig) ProtoMessage() {} +func (*InstanceConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *InstanceConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstanceConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// An isolated set of Cloud Spanner resources on which databases can be hosted. +type Instance struct { + // Required. A unique identifier for the instance, which cannot be changed + // after the instance is created. Values are of the form + // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final + // segment of the name must be between 6 and 30 characters in length. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The name of the instance's configuration. Values are of the form + // `projects//instanceConfigs/`. See + // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. + Config string `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"` + // Required. The descriptive name for this instance as it appears in UIs. + // Must be unique per project and between 4 and 30 characters in length. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Required. The number of nodes allocated to this instance. + NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount" json:"node_count,omitempty"` + // Output only. The current instance state. For + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be + // either omitted or set to `CREATING`. For + // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be + // either omitted or set to `READY`. + State Instance_State `protobuf:"varint,6,opt,name=state,enum=google.spanner.admin.instance.v1.Instance_State" json:"state,omitempty"` + // Cloud Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. Cloud Labels can be used to filter collections of + // resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, + // firewall, load balancing, etc.). + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. + // * Label values must be between 0 and 63 characters long and must conform + // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. + // * No more than 64 labels can be associated with a given resource. + // + // See https://goo.gl/xmQnxf for more information on and examples of labels. + // + // If you plan to use labels in your own code, please note that additional + // characters may be allowed in the future. And so you are advised to use an + // internal label representation, such as JSON, which doesn't rely upon + // specific characters being disallowed. For example, representing labels + // as the string: name + "_" + value would prove problematic if we were to + // allow "_" in a future release. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Instance) Reset() { *m = Instance{} } +func (m *Instance) String() string { return proto.CompactTextString(m) } +func (*Instance) ProtoMessage() {} +func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Instance) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Instance) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +func (m *Instance) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Instance) GetNodeCount() int32 { + if m != nil { + return m.NodeCount + } + return 0 +} + +func (m *Instance) GetState() Instance_State { + if m != nil { + return m.State + } + return Instance_STATE_UNSPECIFIED +} + +func (m *Instance) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +type ListInstanceConfigsRequest struct { + // Required. The name of the project for which a list of supported instance + // configurations is requested. Values are of the form + // `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of instance configurations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] + // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListInstanceConfigsRequest) Reset() { *m = ListInstanceConfigsRequest{} } +func (m *ListInstanceConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstanceConfigsRequest) ProtoMessage() {} +func (*ListInstanceConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListInstanceConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstanceConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstanceConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. +type ListInstanceConfigsResponse struct { + // The list of requested instance configurations. + InstanceConfigs []*InstanceConfig `protobuf:"bytes,1,rep,name=instance_configs,json=instanceConfigs" json:"instance_configs,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to + // fetch more of the matching instance configurations. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstanceConfigsResponse) Reset() { *m = ListInstanceConfigsResponse{} } +func (m *ListInstanceConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstanceConfigsResponse) ProtoMessage() {} +func (*ListInstanceConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ListInstanceConfigsResponse) GetInstanceConfigs() []*InstanceConfig { + if m != nil { + return m.InstanceConfigs + } + return nil +} + +func (m *ListInstanceConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for +// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. +type GetInstanceConfigRequest struct { + // Required. The name of the requested instance configuration. Values are of + // the form `projects//instanceConfigs/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceConfigRequest) Reset() { *m = GetInstanceConfigRequest{} } +func (m *GetInstanceConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceConfigRequest) ProtoMessage() {} +func (*GetInstanceConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GetInstanceConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. +type GetInstanceRequest struct { + // Required. The name of the requested instance. Values are of the form + // `projects//instances/`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} } +func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*GetInstanceRequest) ProtoMessage() {} +func (*GetInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +type CreateInstanceRequest struct { + // Required. The name of the project in which to create the instance. Values + // are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The ID of the instance to create. Valid identifiers are of the + // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 6 and 30 characters in + // length. + InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` + // Required. The instance to create. The name may be omitted, but if + // specified must be `/instances/`. + Instance *Instance `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` +} + +func (m *CreateInstanceRequest) Reset() { *m = CreateInstanceRequest{} } +func (m *CreateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceRequest) ProtoMessage() {} +func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CreateInstanceRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateInstanceRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +func (m *CreateInstanceRequest) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +type ListInstancesRequest struct { + // Required. The name of the project for which a list of instances is + // requested. Values are of the form `projects/`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Number of instances to be returned in the response. If 0 or less, defaults + // to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a + // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // An expression for filtering the results of the request. Filter rules are + // case insensitive. The fields eligible for filtering are: + // + // * name + // * display_name + // * labels.key where key is the name of a label + // + // Some examples of using filters are: + // + // * name:* --> The instance has a name. + // * name:Howl --> The instance's name contains the string "howl". + // * name:HOWL --> Equivalent to above. + // * NAME:howl --> Equivalent to above. + // * labels.env:* --> The instance has the label "env". + // * labels.env:dev --> The instance has the label "env" and the value of + // the label contains the string "dev". + // * name:howl labels.env:dev --> The instance's name contains "howl" and + // it has the label "env" with its value + // containing "dev". + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} } +func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInstancesRequest) ProtoMessage() {} +func (*ListInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ListInstancesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInstancesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListInstancesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListInstancesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. +type ListInstancesResponse struct { + // The list of requested instances. + Instances []*Instance `protobuf:"bytes,1,rep,name=instances" json:"instances,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more + // of the matching instances. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} } +func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInstancesResponse) ProtoMessage() {} +func (*ListInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ListInstancesResponse) GetInstances() []*Instance { + if m != nil { + return m.Instances + } + return nil +} + +func (m *ListInstancesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +type UpdateInstanceRequest struct { + // Required. The instance to update, which must always include the instance + // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. + // The field mask must always be specified; this prevents any future fields in + // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know + // about them. + FieldMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask" json:"field_mask,omitempty"` +} + +func (m *UpdateInstanceRequest) Reset() { *m = UpdateInstanceRequest{} } +func (m *UpdateInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceRequest) ProtoMessage() {} +func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *UpdateInstanceRequest) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *UpdateInstanceRequest) GetFieldMask() *google_protobuf3.FieldMask { + if m != nil { + return m.FieldMask + } + return nil +} + +// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. +type DeleteInstanceRequest struct { + // Required. The name of the instance to be deleted. Values are of the form + // `projects//instances/` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} } +func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInstanceRequest) ProtoMessage() {} +func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *DeleteInstanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Metadata type for the operation returned by +// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. +type CreateInstanceMetadata struct { + // The instance being created. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // The time at which the + // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was + // received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + CancelTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"` + // The time at which this operation failed or was completed successfully. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *CreateInstanceMetadata) Reset() { *m = CreateInstanceMetadata{} } +func (m *CreateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*CreateInstanceMetadata) ProtoMessage() {} +func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CreateInstanceMetadata) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *CreateInstanceMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *CreateInstanceMetadata) GetCancelTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CancelTime + } + return nil +} + +func (m *CreateInstanceMetadata) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +// Metadata type for the operation returned by +// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. +type UpdateInstanceMetadata struct { + // The desired end state of the update. + Instance *Instance `protobuf:"bytes,1,opt,name=instance" json:"instance,omitempty"` + // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] + // request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + CancelTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"` + // The time at which this operation failed or was completed successfully. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` +} + +func (m *UpdateInstanceMetadata) Reset() { *m = UpdateInstanceMetadata{} } +func (m *UpdateInstanceMetadata) String() string { return proto.CompactTextString(m) } +func (*UpdateInstanceMetadata) ProtoMessage() {} +func (*UpdateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *UpdateInstanceMetadata) GetInstance() *Instance { + if m != nil { + return m.Instance + } + return nil +} + +func (m *UpdateInstanceMetadata) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *UpdateInstanceMetadata) GetCancelTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CancelTime + } + return nil +} + +func (m *UpdateInstanceMetadata) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func init() { + proto.RegisterType((*InstanceConfig)(nil), "google.spanner.admin.instance.v1.InstanceConfig") + proto.RegisterType((*Instance)(nil), "google.spanner.admin.instance.v1.Instance") + proto.RegisterType((*ListInstanceConfigsRequest)(nil), "google.spanner.admin.instance.v1.ListInstanceConfigsRequest") + proto.RegisterType((*ListInstanceConfigsResponse)(nil), "google.spanner.admin.instance.v1.ListInstanceConfigsResponse") + proto.RegisterType((*GetInstanceConfigRequest)(nil), "google.spanner.admin.instance.v1.GetInstanceConfigRequest") + proto.RegisterType((*GetInstanceRequest)(nil), "google.spanner.admin.instance.v1.GetInstanceRequest") + proto.RegisterType((*CreateInstanceRequest)(nil), "google.spanner.admin.instance.v1.CreateInstanceRequest") + proto.RegisterType((*ListInstancesRequest)(nil), "google.spanner.admin.instance.v1.ListInstancesRequest") + proto.RegisterType((*ListInstancesResponse)(nil), "google.spanner.admin.instance.v1.ListInstancesResponse") + proto.RegisterType((*UpdateInstanceRequest)(nil), "google.spanner.admin.instance.v1.UpdateInstanceRequest") + proto.RegisterType((*DeleteInstanceRequest)(nil), "google.spanner.admin.instance.v1.DeleteInstanceRequest") + proto.RegisterType((*CreateInstanceMetadata)(nil), "google.spanner.admin.instance.v1.CreateInstanceMetadata") + proto.RegisterType((*UpdateInstanceMetadata)(nil), "google.spanner.admin.instance.v1.UpdateInstanceMetadata") + proto.RegisterEnum("google.spanner.admin.instance.v1.Instance_State", Instance_State_name, Instance_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for InstanceAdmin service + +type InstanceAdminClient interface { + // Lists the supported instance configurations for a given project. + ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) + // Gets information about a particular instance configuration. + GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) + // Lists all instances in the given project. + ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) + // Gets information about a particular instance. + GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) + // Creates an instance and begins preparing it to begin serving. The + // returned [long-running operation][google.longrunning.Operation] + // can be used to track the progress of preparing the new + // instance. The instance name is assigned by the caller. If the + // named instance already exists, `CreateInstance` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance is readable via the API, with all requested attributes + // but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance immediately unreadable + // via the API. + // * The instance can be deleted. + // * All other attempts to modify the instance are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can be created in the instance. + // * The instance's allocated resource levels are readable via the API. + // * The instance's state becomes `READY`. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track creation of the instance. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates an instance, and begins allocating or releasing resources + // as requested. The returned [long-running + // operation][google.longrunning.Operation] can be used to track the + // progress of updating the instance. If the named instance does not + // exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance's allocation + // has been requested, billing is based on the newly-requested level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + // restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance are rejected. + // * Reading the instance via the API continues to give the pre-request + // resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance's + // tables. + // * The instance's new resource levels are readable via the API. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track the instance modification. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + // + // Authorization requires `spanner.instances.update` permission on + // resource [name][google.spanner.admin.instance.v1.Instance.name]. + UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes an instance. + // + // Immediately upon completion of the request: + // + // * Billing ceases for all of the instance's reserved resources. + // + // Soon afterward: + // + // * The instance and *all of its databases* immediately and + // irrevocably disappear from the API. All data in the databases + // is permanently deleted. + DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.instances.setIamPolicy` on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + // + // Authorization requires `spanner.instances.getIamPolicy` on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified instance resource. + // + // Attempting this RPC on a non-existent Cloud Spanner instance resource will + // result in a NOT_FOUND error if the user has `spanner.instances.list` + // permission on the containing Google Cloud Project. Otherwise returns an + // empty set of permissions. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type instanceAdminClient struct { + cc *grpc.ClientConn +} + +func NewInstanceAdminClient(cc *grpc.ClientConn) InstanceAdminClient { + return &instanceAdminClient{cc} +} + +func (c *instanceAdminClient) ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) { + out := new(ListInstanceConfigsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) { + out := new(InstanceConfig) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) { + out := new(ListInstancesResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) { + out := new(Instance) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceAdminClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for InstanceAdmin service + +type InstanceAdminServer interface { + // Lists the supported instance configurations for a given project. + ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error) + // Gets information about a particular instance configuration. + GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error) + // Lists all instances in the given project. + ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) + // Gets information about a particular instance. + GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) + // Creates an instance and begins preparing it to begin serving. The + // returned [long-running operation][google.longrunning.Operation] + // can be used to track the progress of preparing the new + // instance. The instance name is assigned by the caller. If the + // named instance already exists, `CreateInstance` returns + // `ALREADY_EXISTS`. + // + // Immediately upon completion of this request: + // + // * The instance is readable via the API, with all requested attributes + // but no allocated resources. Its state is `CREATING`. + // + // Until completion of the returned operation: + // + // * Cancelling the operation renders the instance immediately unreadable + // via the API. + // * The instance can be deleted. + // * All other attempts to modify the instance are rejected. + // + // Upon completion of the returned operation: + // + // * Billing for all successfully-allocated resources begins (some types + // may have lower than the requested levels). + // * Databases can be created in the instance. + // * The instance's allocated resource levels are readable via the API. + // * The instance's state becomes `READY`. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track creation of the instance. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + CreateInstance(context.Context, *CreateInstanceRequest) (*google_longrunning.Operation, error) + // Updates an instance, and begins allocating or releasing resources + // as requested. The returned [long-running + // operation][google.longrunning.Operation] can be used to track the + // progress of updating the instance. If the named instance does not + // exist, returns `NOT_FOUND`. + // + // Immediately upon completion of this request: + // + // * For resource types for which a decrease in the instance's allocation + // has been requested, billing is based on the newly-requested level. + // + // Until completion of the returned operation: + // + // * Cancelling the operation sets its metadata's + // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins + // restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, + // after which point it terminates with a `CANCELLED` status. + // * All other attempts to modify the instance are rejected. + // * Reading the instance via the API continues to give the pre-request + // resource levels. + // + // Upon completion of the returned operation: + // + // * Billing begins for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources are available for serving the instance's + // tables. + // * The instance's new resource levels are readable via the API. + // + // The returned [long-running operation][google.longrunning.Operation] will + // have a name of the format `/operations/` and + // can be used to track the instance modification. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Instance][google.spanner.admin.instance.v1.Instance], if successful. + // + // Authorization requires `spanner.instances.update` permission on + // resource [name][google.spanner.admin.instance.v1.Instance.name]. + UpdateInstance(context.Context, *UpdateInstanceRequest) (*google_longrunning.Operation, error) + // Deletes an instance. + // + // Immediately upon completion of the request: + // + // * Billing ceases for all of the instance's reserved resources. + // + // Soon afterward: + // + // * The instance and *all of its databases* immediately and + // irrevocably disappear from the API. All data in the databases + // is permanently deleted. + DeleteInstance(context.Context, *DeleteInstanceRequest) (*google_protobuf2.Empty, error) + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + // + // Authorization requires `spanner.instances.setIamPolicy` on + // [resource][google.iam.v1.SetIamPolicyRequest.resource]. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + // + // Authorization requires `spanner.instances.getIamPolicy` on + // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that the caller has on the specified instance resource. + // + // Attempting this RPC on a non-existent Cloud Spanner instance resource will + // result in a NOT_FOUND error if the user has `spanner.instances.list` + // permission on the containing Google Cloud Project. Otherwise returns an + // empty set of permissions. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterInstanceAdminServer(s *grpc.Server, srv InstanceAdminServer) { + s.RegisterService(&_InstanceAdmin_serviceDesc, srv) +} + +func _InstanceAdmin_ListInstanceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstanceConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, req.(*ListInstanceConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetInstanceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetInstanceConfig(ctx, req.(*GetInstanceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInstancesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).ListInstances(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).CreateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).UpdateInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).UpdateInstance(ctx, req.(*UpdateInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInstanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).DeleteInstance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceAdminServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceAdminServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _InstanceAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.admin.instance.v1.InstanceAdmin", + HandlerType: (*InstanceAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListInstanceConfigs", + Handler: _InstanceAdmin_ListInstanceConfigs_Handler, + }, + { + MethodName: "GetInstanceConfig", + Handler: _InstanceAdmin_GetInstanceConfig_Handler, + }, + { + MethodName: "ListInstances", + Handler: _InstanceAdmin_ListInstances_Handler, + }, + { + MethodName: "GetInstance", + Handler: _InstanceAdmin_GetInstance_Handler, + }, + { + MethodName: "CreateInstance", + Handler: _InstanceAdmin_CreateInstance_Handler, + }, + { + MethodName: "UpdateInstance", + Handler: _InstanceAdmin_UpdateInstance_Handler, + }, + { + MethodName: "DeleteInstance", + Handler: _InstanceAdmin_DeleteInstance_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _InstanceAdmin_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _InstanceAdmin_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _InstanceAdmin_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/spanner/admin/instance/v1/spanner_instance_admin.proto", +} + +func init() { + proto.RegisterFile("google/spanner/admin/instance/v1/spanner_instance_admin.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x57, 0xcf, 0x8f, 0xdb, 0xc4, + 0x17, 0xff, 0xce, 0x6e, 0xb3, 0xdd, 0xbc, 0x6c, 0xb7, 0xdb, 0xf9, 0x36, 0x55, 0x48, 0x29, 0x4d, + 0x5d, 0x54, 0xd2, 0x80, 0x6c, 0x1a, 0xe8, 0xaf, 0x2d, 0x7b, 0xd8, 0xa6, 0xd9, 0x34, 0x52, 0xbb, + 0xac, 0x9c, 0x14, 0x09, 0x38, 0x44, 0xd3, 0x64, 0x36, 0x98, 0xb5, 0xc7, 0xc6, 0x33, 0xa9, 0xd8, + 0xa2, 0x5e, 0x2a, 0x0e, 0x20, 0x21, 0x71, 0x40, 0x42, 0xa8, 0x17, 0x24, 0x8e, 0x20, 0x71, 0xe0, + 0xc0, 0x3f, 0xc0, 0x9f, 0xc0, 0xbf, 0x80, 0xf8, 0x3b, 0xd0, 0x8c, 0x3d, 0x69, 0xec, 0x24, 0x9b, + 0xa4, 0xa2, 0x27, 0x6e, 0x9e, 0xf7, 0xf3, 0xe3, 0xcf, 0x7b, 0x7e, 0xcf, 0x03, 0x5b, 0x7d, 0xdf, + 0xef, 0xbb, 0xd4, 0xe2, 0x01, 0x61, 0x8c, 0x86, 0x16, 0xe9, 0x79, 0x0e, 0xb3, 0x1c, 0xc6, 0x05, + 0x61, 0x5d, 0x6a, 0x3d, 0xba, 0xa2, 0x35, 0x1d, 0x2d, 0xeb, 0x28, 0x13, 0x33, 0x08, 0x7d, 0xe1, + 0xe3, 0x52, 0xe4, 0x6e, 0xc6, 0x46, 0x66, 0xa4, 0xd3, 0xa6, 0xe6, 0xa3, 0x2b, 0xc5, 0x57, 0xe3, + 0x04, 0x24, 0x70, 0x2c, 0xc2, 0x98, 0x2f, 0x88, 0x70, 0x7c, 0xc6, 0x23, 0xff, 0x62, 0x7e, 0x54, + 0x3b, 0x10, 0x9f, 0xc4, 0xe2, 0xd7, 0x62, 0xb1, 0x43, 0x3c, 0x09, 0xc1, 0x21, 0x5e, 0x27, 0xf0, + 0x5d, 0xa7, 0x7b, 0x18, 0xeb, 0x8b, 0x49, 0x7d, 0x42, 0x77, 0x31, 0xd6, 0xb9, 0x3e, 0xeb, 0x87, + 0x03, 0xc6, 0x1c, 0xd6, 0xb7, 0xfc, 0x80, 0x86, 0x89, 0xbc, 0x67, 0x63, 0x23, 0x75, 0x7a, 0x38, + 0xd8, 0xb7, 0xa8, 0x17, 0x08, 0x1d, 0xa1, 0x94, 0x56, 0xee, 0x3b, 0xd4, 0xed, 0x75, 0x3c, 0xc2, + 0x0f, 0x62, 0x8b, 0xf3, 0x69, 0x0b, 0xe1, 0x78, 0x94, 0x0b, 0xe2, 0x05, 0x91, 0x81, 0xd1, 0x80, + 0xf5, 0x66, 0x4c, 0x42, 0xcd, 0x67, 0xfb, 0x4e, 0x1f, 0x63, 0x38, 0xc6, 0x88, 0x47, 0x0b, 0xa8, + 0x84, 0xca, 0x59, 0x5b, 0x3d, 0xe3, 0x0b, 0xb0, 0xd6, 0x73, 0x78, 0xe0, 0x92, 0xc3, 0x8e, 0xd2, + 0x2d, 0x29, 0x5d, 0x2e, 0x96, 0xed, 0x12, 0x8f, 0x1a, 0x5f, 0x2e, 0xc3, 0xaa, 0x8e, 0x34, 0x31, + 0xc6, 0x19, 0x58, 0xe9, 0xaa, 0x0c, 0xb1, 0x77, 0x7c, 0x1a, 0x8b, 0xbd, 0x3c, 0x16, 0x1b, 0x9f, + 0x03, 0x60, 0x7e, 0x8f, 0x76, 0xba, 0xfe, 0x80, 0x89, 0x42, 0xa6, 0x84, 0xca, 0x19, 0x3b, 0x2b, + 0x25, 0x35, 0x29, 0xc0, 0x3b, 0x90, 0xe1, 0x82, 0x08, 0x5a, 0x58, 0x29, 0xa1, 0xf2, 0x7a, 0xf5, + 0x6d, 0x73, 0x56, 0xad, 0x4d, 0x0d, 0xd4, 0x6c, 0x49, 0x3f, 0x3b, 0x72, 0xc7, 0xbb, 0xb0, 0xe2, + 0x92, 0x87, 0xd4, 0xe5, 0x85, 0xe3, 0xa5, 0xe5, 0x72, 0xae, 0x7a, 0x6d, 0x81, 0x40, 0xf7, 0x94, + 0x63, 0x9d, 0x89, 0xf0, 0xd0, 0x8e, 0xa3, 0x14, 0x6f, 0x42, 0x6e, 0x44, 0x8c, 0x37, 0x60, 0xf9, + 0x80, 0x1e, 0xc6, 0x9c, 0xc8, 0x47, 0x7c, 0x1a, 0x32, 0x8f, 0x88, 0x3b, 0xd0, 0x7c, 0x46, 0x87, + 0xcd, 0xa5, 0x1b, 0xc8, 0xb8, 0x0e, 0x19, 0x05, 0x0d, 0xe7, 0xe1, 0x54, 0xab, 0xbd, 0xdd, 0xae, + 0x77, 0x1e, 0xec, 0xb6, 0xf6, 0xea, 0xb5, 0xe6, 0x4e, 0xb3, 0x7e, 0x67, 0xe3, 0x7f, 0x78, 0x0d, + 0x56, 0x6b, 0x76, 0x7d, 0xbb, 0xdd, 0xdc, 0x6d, 0x6c, 0x20, 0x9c, 0x85, 0x8c, 0x5d, 0xdf, 0xbe, + 0xf3, 0xe1, 0xc6, 0x92, 0x11, 0x40, 0xf1, 0x9e, 0xc3, 0x45, 0xb2, 0xa6, 0xdc, 0xa6, 0x9f, 0x0d, + 0x28, 0x17, 0xb2, 0x06, 0x01, 0x09, 0x29, 0x13, 0x31, 0x8a, 0xf8, 0x84, 0xcf, 0x42, 0x36, 0x20, + 0x7d, 0xda, 0xe1, 0xce, 0xe3, 0x08, 0x4c, 0xc6, 0x5e, 0x95, 0x82, 0x96, 0xf3, 0x58, 0xb1, 0xaf, + 0x94, 0xc2, 0x3f, 0xa0, 0x2c, 0x2e, 0x8f, 0x32, 0x6f, 0x4b, 0x81, 0xf1, 0x13, 0x82, 0xb3, 0x13, + 0x53, 0xf2, 0xc0, 0x67, 0x9c, 0xe2, 0x8f, 0x61, 0x63, 0xf8, 0x45, 0x46, 0x25, 0xe7, 0x05, 0xa4, + 0xf8, 0x5d, 0xa0, 0x50, 0x51, 0x50, 0xfb, 0xa4, 0x93, 0x4c, 0x82, 0x2f, 0xc1, 0x49, 0x46, 0x3f, + 0x17, 0x9d, 0x11, 0x80, 0x11, 0x97, 0x27, 0xa4, 0x78, 0x6f, 0x08, 0xd2, 0x84, 0x42, 0x83, 0xa6, + 0x20, 0x6a, 0x52, 0x26, 0x34, 0xab, 0x51, 0x06, 0x3c, 0x62, 0x7f, 0x94, 0xe5, 0x0f, 0x08, 0xf2, + 0xb5, 0x90, 0x12, 0x41, 0xd3, 0xd6, 0xd3, 0xc8, 0x3e, 0x0f, 0xb9, 0x21, 0x21, 0x4e, 0x2f, 0xc6, + 0x0b, 0x5a, 0xd4, 0xec, 0xe1, 0x1d, 0x58, 0xd5, 0x27, 0x45, 0x77, 0xae, 0x5a, 0x99, 0x9f, 0x29, + 0x7b, 0xe8, 0x6b, 0x3c, 0x45, 0x70, 0x7a, 0xb4, 0x32, 0x2f, 0xb3, 0x0d, 0x64, 0xcc, 0x7d, 0xc7, + 0x15, 0x34, 0x2c, 0x1c, 0x8b, 0x62, 0x46, 0x27, 0xe3, 0x6b, 0x04, 0xf9, 0x14, 0x88, 0xb8, 0x31, + 0xee, 0x42, 0x56, 0x43, 0xd5, 0x1d, 0xb1, 0xc8, 0x7b, 0x3e, 0x77, 0x9e, 0xbb, 0x0b, 0x9e, 0x21, + 0xc8, 0x3f, 0x08, 0x7a, 0x13, 0x6a, 0x35, 0x4a, 0x39, 0x7a, 0x71, 0xca, 0xf1, 0x4d, 0x80, 0xe7, + 0x33, 0x58, 0x81, 0xc8, 0x55, 0x8b, 0x3a, 0x92, 0x1e, 0xc2, 0xe6, 0x8e, 0x34, 0xb9, 0x4f, 0xf8, + 0x81, 0x9d, 0xdd, 0xd7, 0x8f, 0xc6, 0x9b, 0x90, 0xbf, 0x43, 0x5d, 0x3a, 0x8e, 0x6d, 0x52, 0xd7, + 0x7d, 0xbb, 0x04, 0x67, 0x92, 0x5d, 0x77, 0x9f, 0x0a, 0xd2, 0x23, 0x82, 0xfc, 0x9b, 0xaf, 0xc2, + 0x05, 0x09, 0x45, 0x47, 0xae, 0x8c, 0xa9, 0xaf, 0xd2, 0xd6, 0xfb, 0xc4, 0xce, 0x2a, 0x6b, 0x79, + 0xc6, 0xb7, 0x20, 0xd7, 0x95, 0x31, 0xdc, 0xc8, 0x77, 0x79, 0xa6, 0x2f, 0x44, 0xe6, 0xca, 0xf9, + 0x2a, 0xac, 0x52, 0xd6, 0x8b, 0x3c, 0x8f, 0xcd, 0xf4, 0x3c, 0x4e, 0x59, 0x4f, 0x9e, 0x14, 0x23, + 0xc9, 0xda, 0xfe, 0xc7, 0x19, 0xa9, 0xfe, 0xbd, 0x06, 0x27, 0xf4, 0x5b, 0x6c, 0xcb, 0xf7, 0xc3, + 0x7f, 0x20, 0xf8, 0xff, 0x84, 0x51, 0x8d, 0xdf, 0x9b, 0x4d, 0xc7, 0xf4, 0xa5, 0x52, 0xdc, 0x7a, + 0x41, 0xef, 0x68, 0x0c, 0x18, 0xd6, 0xd3, 0x3f, 0xff, 0xfa, 0x6e, 0xe9, 0x32, 0x7e, 0x43, 0xfe, + 0x20, 0x7d, 0x11, 0x4d, 0xa2, 0xad, 0x20, 0xf4, 0x3f, 0xa5, 0x5d, 0xc1, 0xad, 0xca, 0x13, 0x2b, + 0x3d, 0xf3, 0x7f, 0x43, 0x70, 0x6a, 0x6c, 0x98, 0xe3, 0xcd, 0xd9, 0x28, 0xa6, 0x6d, 0x80, 0xe2, + 0xc2, 0x8b, 0x28, 0x05, 0x5a, 0x7e, 0x92, 0x23, 0x90, 0xd3, 0x88, 0xad, 0xca, 0x13, 0xfc, 0x0b, + 0x82, 0x13, 0x89, 0x31, 0x88, 0xaf, 0x2d, 0x46, 0xdb, 0x90, 0xee, 0xeb, 0x0b, 0xfb, 0xc5, 0x44, + 0x5f, 0x56, 0x98, 0x2f, 0xe2, 0x0b, 0xb3, 0x88, 0xe6, 0xf8, 0x19, 0x82, 0xdc, 0x08, 0x5b, 0xf8, + 0xdd, 0x85, 0xc8, 0xd5, 0x48, 0x17, 0xf8, 0xca, 0x52, 0xe0, 0xa6, 0x11, 0xaa, 0xa8, 0xfc, 0x1e, + 0xc1, 0x7a, 0x72, 0xf6, 0xe1, 0x39, 0x38, 0x99, 0xb8, 0xa3, 0x8b, 0xe7, 0xb4, 0xe3, 0xc8, 0x4f, + 0xb8, 0xf9, 0xbe, 0xfe, 0x09, 0x37, 0xde, 0x52, 0xa8, 0x2e, 0x19, 0xb3, 0x29, 0xdb, 0x44, 0x15, + 0xfc, 0x23, 0x82, 0xf5, 0xe4, 0x08, 0x9a, 0x07, 0xd8, 0xc4, 0x85, 0x34, 0x0b, 0xd8, 0x55, 0x05, + 0xcc, 0xaa, 0x56, 0x14, 0xb0, 0x61, 0xb8, 0xa3, 0x78, 0x93, 0x08, 0xbf, 0x41, 0xb0, 0x9e, 0x5c, + 0x32, 0xf3, 0x20, 0x9c, 0xb8, 0x96, 0x8a, 0x67, 0xc6, 0x66, 0x50, 0x5d, 0x5e, 0x4d, 0x74, 0x25, + 0x2b, 0x73, 0x54, 0xf2, 0x2b, 0x04, 0x6b, 0x2d, 0x2a, 0x9a, 0xc4, 0xdb, 0x53, 0x17, 0x23, 0x6c, + 0xe8, 0x98, 0x0e, 0xf1, 0x64, 0xe6, 0x51, 0xa5, 0xce, 0x9b, 0x4f, 0xd9, 0x44, 0x5a, 0x63, 0x4b, + 0xa5, 0xbd, 0x6e, 0x54, 0x55, 0xda, 0x90, 0x72, 0x7f, 0x10, 0x76, 0xa7, 0x93, 0xc1, 0x47, 0x22, + 0x4b, 0x66, 0x24, 0x94, 0xc6, 0x51, 0x50, 0x1a, 0x2f, 0x0d, 0x4a, 0x3f, 0x05, 0xe5, 0x57, 0x04, + 0xb8, 0x4d, 0xb9, 0x12, 0xd2, 0xd0, 0x73, 0x38, 0x97, 0xf7, 0x41, 0x5c, 0x4e, 0x25, 0x1b, 0x37, + 0xd1, 0xb0, 0x2e, 0xcf, 0x61, 0x19, 0xcf, 0x84, 0x9a, 0x82, 0xba, 0x65, 0xdc, 0x98, 0x0f, 0xaa, + 0x18, 0x8b, 0xb4, 0x89, 0x2a, 0xb7, 0x7f, 0x47, 0xf0, 0x7a, 0xd7, 0xf7, 0x66, 0x36, 0xd2, 0xed, + 0x57, 0x5a, 0x91, 0x2a, 0xb1, 0x95, 0xf6, 0x64, 0xfb, 0xec, 0xa1, 0x8f, 0xee, 0xc6, 0xee, 0x7d, + 0xdf, 0x25, 0xac, 0x6f, 0xfa, 0x61, 0xdf, 0xea, 0x53, 0xa6, 0x9a, 0xcb, 0x8a, 0x54, 0x24, 0x70, + 0xf8, 0xf4, 0xfb, 0xff, 0x2d, 0xfd, 0xfc, 0xf3, 0xd2, 0xa5, 0x46, 0x14, 0xaa, 0xe6, 0xfa, 0x83, + 0x9e, 0x19, 0x27, 0x35, 0x55, 0xb6, 0xe7, 0x37, 0xb6, 0x0f, 0xae, 0x3c, 0x5c, 0x51, 0xd1, 0xdf, + 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x53, 0x3b, 0xf8, 0x5c, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go new file mode 100644 index 000000000..cb5e86eb9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go @@ -0,0 +1,438 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/keys.proto + +/* +Package spanner is a generated protocol buffer package. + +It is generated from these files: + google/spanner/v1/keys.proto + google/spanner/v1/mutation.proto + google/spanner/v1/query_plan.proto + google/spanner/v1/result_set.proto + google/spanner/v1/spanner.proto + google/spanner/v1/transaction.proto + google/spanner/v1/type.proto + +It has these top-level messages: + KeyRange + KeySet + Mutation + PlanNode + QueryPlan + ResultSet + PartialResultSet + ResultSetMetadata + ResultSetStats + CreateSessionRequest + Session + GetSessionRequest + DeleteSessionRequest + ExecuteSqlRequest + ReadRequest + BeginTransactionRequest + CommitRequest + CommitResponse + RollbackRequest + TransactionOptions + Transaction + TransactionSelector + Type + StructType +*/ +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// KeyRange represents a range of rows in a table or index. +// +// A range has a start key and an end key. These keys can be open or +// closed, indicating if the range includes rows with that key. +// +// Keys are represented by lists, where the ith value in the list +// corresponds to the ith component of the table or index primary key. +// Individual values are encoded as described [here][google.spanner.v1.TypeCode]. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10) +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// ["Bob", "2014-09-23"] +// ["Alfred", "2015-06-12"] +// +// Since the `UserEvents` table's `PRIMARY KEY` clause names two +// columns, each `UserEvents` key has two elements; the first is the +// `UserName`, and the second is the `EventDate`. +// +// Key ranges with multiple components are interpreted +// lexicographically by component using the table or index key's declared +// sort order. For example, the following range returns all events for +// user `"Bob"` that occurred in the year 2015: +// +// "start_closed": ["Bob", "2015-01-01"] +// "end_closed": ["Bob", "2015-12-31"] +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if the key is closed, then rows that exactly match the +// provided components are included; if the key is open, then rows +// that exactly match are not included. +// +// For example, the following range includes all events for `"Bob"` that +// occurred during and after the year 2000: +// +// "start_closed": ["Bob", "2000-01-01"] +// "end_closed": ["Bob"] +// +// The next example retrieves all events for `"Bob"`: +// +// "start_closed": ["Bob"] +// "end_closed": ["Bob"] +// +// To retrieve events before the year 2000: +// +// "start_closed": ["Bob"] +// "end_open": ["Bob", "2000-01-01"] +// +// The following range includes all rows in the table: +// +// "start_closed": [] +// "end_closed": [] +// +// This range returns all users whose `UserName` begins with any +// character from A to C: +// +// "start_closed": ["A"] +// "end_open": ["D"] +// +// This range returns all users whose `UserName` begins with B: +// +// "start_closed": ["B"] +// "end_open": ["C"] +// +// Key ranges honor column sort order. For example, suppose a table is +// defined as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 +// and 100 inclusive: +// +// "start_closed": ["100"] +// "end_closed": ["1"] +// +// Note that 100 is passed as the start, and 1 is passed as the end, +// because `Key` is a descending column in the schema. +type KeyRange struct { + // The start key must be provided. It can be either closed or open. + // + // Types that are valid to be assigned to StartKeyType: + // *KeyRange_StartClosed + // *KeyRange_StartOpen + StartKeyType isKeyRange_StartKeyType `protobuf_oneof:"start_key_type"` + // The end key must be provided. It can be either closed or open. + // + // Types that are valid to be assigned to EndKeyType: + // *KeyRange_EndClosed + // *KeyRange_EndOpen + EndKeyType isKeyRange_EndKeyType `protobuf_oneof:"end_key_type"` +} + +func (m *KeyRange) Reset() { *m = KeyRange{} } +func (m *KeyRange) String() string { return proto.CompactTextString(m) } +func (*KeyRange) ProtoMessage() {} +func (*KeyRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isKeyRange_StartKeyType interface { + isKeyRange_StartKeyType() +} +type isKeyRange_EndKeyType interface { + isKeyRange_EndKeyType() +} + +type KeyRange_StartClosed struct { + StartClosed *google_protobuf1.ListValue `protobuf:"bytes,1,opt,name=start_closed,json=startClosed,oneof"` +} +type KeyRange_StartOpen struct { + StartOpen *google_protobuf1.ListValue `protobuf:"bytes,2,opt,name=start_open,json=startOpen,oneof"` +} +type KeyRange_EndClosed struct { + EndClosed *google_protobuf1.ListValue `protobuf:"bytes,3,opt,name=end_closed,json=endClosed,oneof"` +} +type KeyRange_EndOpen struct { + EndOpen *google_protobuf1.ListValue `protobuf:"bytes,4,opt,name=end_open,json=endOpen,oneof"` +} + +func (*KeyRange_StartClosed) isKeyRange_StartKeyType() {} +func (*KeyRange_StartOpen) isKeyRange_StartKeyType() {} +func (*KeyRange_EndClosed) isKeyRange_EndKeyType() {} +func (*KeyRange_EndOpen) isKeyRange_EndKeyType() {} + +func (m *KeyRange) GetStartKeyType() isKeyRange_StartKeyType { + if m != nil { + return m.StartKeyType + } + return nil +} +func (m *KeyRange) GetEndKeyType() isKeyRange_EndKeyType { + if m != nil { + return m.EndKeyType + } + return nil +} + +func (m *KeyRange) GetStartClosed() *google_protobuf1.ListValue { + if x, ok := m.GetStartKeyType().(*KeyRange_StartClosed); ok { + return x.StartClosed + } + return nil +} + +func (m *KeyRange) GetStartOpen() *google_protobuf1.ListValue { + if x, ok := m.GetStartKeyType().(*KeyRange_StartOpen); ok { + return x.StartOpen + } + return nil +} + +func (m *KeyRange) GetEndClosed() *google_protobuf1.ListValue { + if x, ok := m.GetEndKeyType().(*KeyRange_EndClosed); ok { + return x.EndClosed + } + return nil +} + +func (m *KeyRange) GetEndOpen() *google_protobuf1.ListValue { + if x, ok := m.GetEndKeyType().(*KeyRange_EndOpen); ok { + return x.EndOpen + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*KeyRange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _KeyRange_OneofMarshaler, _KeyRange_OneofUnmarshaler, _KeyRange_OneofSizer, []interface{}{ + (*KeyRange_StartClosed)(nil), + (*KeyRange_StartOpen)(nil), + (*KeyRange_EndClosed)(nil), + (*KeyRange_EndOpen)(nil), + } +} + +func _KeyRange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*KeyRange) + // start_key_type + switch x := m.StartKeyType.(type) { + case *KeyRange_StartClosed: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartClosed); err != nil { + return err + } + case *KeyRange_StartOpen: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartOpen); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("KeyRange.StartKeyType has unexpected type %T", x) + } + // end_key_type + switch x := m.EndKeyType.(type) { + case *KeyRange_EndClosed: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndClosed); err != nil { + return err + } + case *KeyRange_EndOpen: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndOpen); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("KeyRange.EndKeyType has unexpected type %T", x) + } + return nil +} + +func _KeyRange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*KeyRange) + switch tag { + case 1: // start_key_type.start_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.StartKeyType = &KeyRange_StartClosed{msg} + return true, err + case 2: // start_key_type.start_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.StartKeyType = &KeyRange_StartOpen{msg} + return true, err + case 3: // end_key_type.end_closed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.EndKeyType = &KeyRange_EndClosed{msg} + return true, err + case 4: // end_key_type.end_open + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.ListValue) + err := b.DecodeMessage(msg) + m.EndKeyType = &KeyRange_EndOpen{msg} + return true, err + default: + return false, nil + } +} + +func _KeyRange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*KeyRange) + // start_key_type + switch x := m.StartKeyType.(type) { + case *KeyRange_StartClosed: + s := proto.Size(x.StartClosed) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *KeyRange_StartOpen: + s := proto.Size(x.StartOpen) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // end_key_type + switch x := m.EndKeyType.(type) { + case *KeyRange_EndClosed: + s := proto.Size(x.EndClosed) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *KeyRange_EndOpen: + s := proto.Size(x.EndOpen) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All +// the keys are expected to be in the same table or index. The keys need +// not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for example +// if two ranges, two keys, or a key and a range overlap), Cloud Spanner +// behaves as if the key were only specified once. +type KeySet struct { + // A list of specific keys. Entries in `keys` should have exactly as + // many elements as there are columns in the primary or index key + // with which this `KeySet` is used. Individual key values are + // encoded as described [here][google.spanner.v1.TypeCode]. + Keys []*google_protobuf1.ListValue `protobuf:"bytes,1,rep,name=keys" json:"keys,omitempty"` + // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about + // key range specifications. + Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges" json:"ranges,omitempty"` + // For convenience `all` can be set to `true` to indicate that this + // `KeySet` matches all keys in the table or index. Note that any keys + // specified in `keys` or `ranges` are only yielded once. + All bool `protobuf:"varint,3,opt,name=all" json:"all,omitempty"` +} + +func (m *KeySet) Reset() { *m = KeySet{} } +func (m *KeySet) String() string { return proto.CompactTextString(m) } +func (*KeySet) ProtoMessage() {} +func (*KeySet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *KeySet) GetKeys() []*google_protobuf1.ListValue { + if m != nil { + return m.Keys + } + return nil +} + +func (m *KeySet) GetRanges() []*KeyRange { + if m != nil { + return m.Ranges + } + return nil +} + +func (m *KeySet) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func init() { + proto.RegisterType((*KeyRange)(nil), "google.spanner.v1.KeyRange") + proto.RegisterType((*KeySet)(nil), "google.spanner.v1.KeySet") +} + +func init() { proto.RegisterFile("google/spanner/v1/keys.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6b, 0xf2, 0x30, + 0x18, 0xc7, 0xdf, 0x56, 0xf1, 0xd5, 0x28, 0xe2, 0x5b, 0x78, 0x59, 0x71, 0x3b, 0x88, 0xa7, 0x9d, + 0x52, 0x9c, 0x87, 0x0d, 0x3c, 0x0c, 0xea, 0x61, 0x03, 0x07, 0x93, 0x0a, 0x1e, 0x76, 0x91, 0x68, + 0x9f, 0x85, 0x62, 0x96, 0x84, 0x26, 0x95, 0xf5, 0xb4, 0xaf, 0xb2, 0xf3, 0x3e, 0xe5, 0x48, 0x9a, + 0x8e, 0x81, 0xb0, 0x79, 0x4b, 0xf8, 0x3f, 0xbf, 0xe7, 0x97, 0x3e, 0x4f, 0xd1, 0x05, 0x15, 0x82, + 0x32, 0x88, 0x94, 0x24, 0x9c, 0x43, 0x1e, 0x1d, 0x26, 0xd1, 0x1e, 0x4a, 0x85, 0x65, 0x2e, 0xb4, + 0x08, 0xfe, 0x55, 0x29, 0x76, 0x29, 0x3e, 0x4c, 0x86, 0x35, 0x40, 0x64, 0x16, 0x11, 0xce, 0x85, + 0x26, 0x3a, 0x13, 0xdc, 0x01, 0x5f, 0xa9, 0xbd, 0x6d, 0x8b, 0xe7, 0x48, 0xe9, 0xbc, 0xd8, 0xe9, + 0x2a, 0x1d, 0xbf, 0xfb, 0xa8, 0xbd, 0x80, 0x32, 0x21, 0x9c, 0x42, 0x70, 0x8b, 0x7a, 0x4a, 0x93, + 0x5c, 0x6f, 0x76, 0x4c, 0x28, 0x48, 0x43, 0x6f, 0xe4, 0x5d, 0x76, 0xaf, 0x86, 0xd8, 0x29, 0xeb, + 0x0e, 0xf8, 0x21, 0x53, 0x7a, 0x4d, 0x58, 0x01, 0xf7, 0x7f, 0x92, 0xae, 0x25, 0xe6, 0x16, 0x08, + 0x66, 0x08, 0x55, 0x0d, 0x84, 0x04, 0x1e, 0xfa, 0x27, 0xe0, 0x1d, 0x5b, 0xff, 0x28, 0x81, 0x1b, + 0x18, 0x78, 0x5a, 0xbb, 0x1b, 0xbf, 0xc2, 0x5e, 0xd2, 0x01, 0x9e, 0x3a, 0xf3, 0x35, 0x6a, 0x1b, + 0xd8, 0x7a, 0x9b, 0x27, 0xa0, 0x7f, 0x81, 0xa7, 0xc6, 0x1a, 0x0f, 0x50, 0xbf, 0x7a, 0xf2, 0x1e, + 0xca, 0x8d, 0x2e, 0x25, 0xc4, 0x7d, 0xd4, 0x33, 0xad, 0xea, 0xfb, 0xf8, 0x0d, 0xb5, 0x16, 0x50, + 0xae, 0x40, 0x07, 0x18, 0x35, 0xcd, 0x26, 0x42, 0x6f, 0xd4, 0xf8, 0x59, 0x90, 0xd8, 0xba, 0x60, + 0x8a, 0x5a, 0xb9, 0x19, 0xac, 0x0a, 0x7d, 0x4b, 0x9c, 0xe3, 0xa3, 0xe5, 0xe1, 0x7a, 0xf8, 0x89, + 0x2b, 0x0d, 0x06, 0xa8, 0x41, 0x18, 0xb3, 0xdf, 0xdf, 0x4e, 0xcc, 0x31, 0x7e, 0x45, 0xff, 0x77, + 0xe2, 0xe5, 0x98, 0x8d, 0x3b, 0x0b, 0x28, 0xd5, 0xd2, 0xd8, 0x97, 0xde, 0xd3, 0x8d, 0xcb, 0xa9, + 0x60, 0x84, 0x53, 0x2c, 0x72, 0x1a, 0x51, 0xe0, 0xf6, 0x6d, 0x51, 0x15, 0x11, 0x99, 0xa9, 0x6f, + 0x7f, 0xd5, 0xcc, 0x1d, 0x3f, 0xfc, 0xb3, 0xbb, 0x0a, 0x9d, 0x33, 0x51, 0xa4, 0x78, 0xe5, 0x04, + 0xeb, 0xc9, 0xb6, 0x65, 0xf1, 0xe9, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x78, 0x80, 0x47, + 0x93, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go new file mode 100644 index 000000000..2d1e60427 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/mutation.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A modification to one or more Cloud Spanner rows. Mutations can be +// applied to a Cloud Spanner database by sending them in a +// [Commit][google.spanner.v1.Spanner.Commit] call. +type Mutation struct { + // Required. The operation to perform. + // + // Types that are valid to be assigned to Operation: + // *Mutation_Insert + // *Mutation_Update + // *Mutation_InsertOrUpdate + // *Mutation_Replace + // *Mutation_Delete_ + Operation isMutation_Operation `protobuf_oneof:"operation"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +type isMutation_Operation interface { + isMutation_Operation() +} + +type Mutation_Insert struct { + Insert *Mutation_Write `protobuf:"bytes,1,opt,name=insert,oneof"` +} +type Mutation_Update struct { + Update *Mutation_Write `protobuf:"bytes,2,opt,name=update,oneof"` +} +type Mutation_InsertOrUpdate struct { + InsertOrUpdate *Mutation_Write `protobuf:"bytes,3,opt,name=insert_or_update,json=insertOrUpdate,oneof"` +} +type Mutation_Replace struct { + Replace *Mutation_Write `protobuf:"bytes,4,opt,name=replace,oneof"` +} +type Mutation_Delete_ struct { + Delete *Mutation_Delete `protobuf:"bytes,5,opt,name=delete,oneof"` +} + +func (*Mutation_Insert) isMutation_Operation() {} +func (*Mutation_Update) isMutation_Operation() {} +func (*Mutation_InsertOrUpdate) isMutation_Operation() {} +func (*Mutation_Replace) isMutation_Operation() {} +func (*Mutation_Delete_) isMutation_Operation() {} + +func (m *Mutation) GetOperation() isMutation_Operation { + if m != nil { + return m.Operation + } + return nil +} + +func (m *Mutation) GetInsert() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Insert); ok { + return x.Insert + } + return nil +} + +func (m *Mutation) GetUpdate() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Update); ok { + return x.Update + } + return nil +} + +func (m *Mutation) GetInsertOrUpdate() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_InsertOrUpdate); ok { + return x.InsertOrUpdate + } + return nil +} + +func (m *Mutation) GetReplace() *Mutation_Write { + if x, ok := m.GetOperation().(*Mutation_Replace); ok { + return x.Replace + } + return nil +} + +func (m *Mutation) GetDelete() *Mutation_Delete { + if x, ok := m.GetOperation().(*Mutation_Delete_); ok { + return x.Delete + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Mutation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Mutation_OneofMarshaler, _Mutation_OneofUnmarshaler, _Mutation_OneofSizer, []interface{}{ + (*Mutation_Insert)(nil), + (*Mutation_Update)(nil), + (*Mutation_InsertOrUpdate)(nil), + (*Mutation_Replace)(nil), + (*Mutation_Delete_)(nil), + } +} + +func _Mutation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Insert); err != nil { + return err + } + case *Mutation_Update: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Mutation_InsertOrUpdate: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InsertOrUpdate); err != nil { + return err + } + case *Mutation_Replace: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replace); err != nil { + return err + } + case *Mutation_Delete_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Mutation.Operation has unexpected type %T", x) + } + return nil +} + +func _Mutation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Mutation) + switch tag { + case 1: // operation.insert + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Insert{msg} + return true, err + case 2: // operation.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Update{msg} + return true, err + case 3: // operation.insert_or_update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_InsertOrUpdate{msg} + return true, err + case 4: // operation.replace + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Write) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Replace{msg} + return true, err + case 5: // operation.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mutation_Delete) + err := b.DecodeMessage(msg) + m.Operation = &Mutation_Delete_{msg} + return true, err + default: + return false, nil + } +} + +func _Mutation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Mutation) + // operation + switch x := m.Operation.(type) { + case *Mutation_Insert: + s := proto.Size(x.Insert) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_InsertOrUpdate: + s := proto.Size(x.InsertOrUpdate) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Replace: + s := proto.Size(x.Replace) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Mutation_Delete_: + s := proto.Size(x.Delete) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and +// [replace][google.spanner.v1.Mutation.replace] operations. +type Mutation_Write struct { + // Required. The table whose rows will be written. + Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` + // The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written. + // + // The list of columns must contain enough columns to allow + // Cloud Spanner to derive values for all primary key columns in the + // row(s) to be modified. + Columns []string `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"` + // The values to be written. `values` can contain more than one + // list of values. If it does, then multiple rows are written, one + // for each entry in `values`. Each list in `values` must have + // exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns] + // above. Sending multiple lists is equivalent to sending multiple + // `Mutation`s, each containing one `values` entry and repeating + // [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are + // encoded as described [here][google.spanner.v1.TypeCode]. + Values []*google_protobuf1.ListValue `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` +} + +func (m *Mutation_Write) Reset() { *m = Mutation_Write{} } +func (m *Mutation_Write) String() string { return proto.CompactTextString(m) } +func (*Mutation_Write) ProtoMessage() {} +func (*Mutation_Write) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +func (m *Mutation_Write) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Mutation_Write) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *Mutation_Write) GetValues() []*google_protobuf1.ListValue { + if m != nil { + return m.Values + } + return nil +} + +// Arguments to [delete][google.spanner.v1.Mutation.delete] operations. +type Mutation_Delete struct { + // Required. The table whose rows will be deleted. + Table string `protobuf:"bytes,1,opt,name=table" json:"table,omitempty"` + // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. + KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet" json:"key_set,omitempty"` +} + +func (m *Mutation_Delete) Reset() { *m = Mutation_Delete{} } +func (m *Mutation_Delete) String() string { return proto.CompactTextString(m) } +func (*Mutation_Delete) ProtoMessage() {} +func (*Mutation_Delete) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 1} } + +func (m *Mutation_Delete) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Mutation_Delete) GetKeySet() *KeySet { + if m != nil { + return m.KeySet + } + return nil +} + +func init() { + proto.RegisterType((*Mutation)(nil), "google.spanner.v1.Mutation") + proto.RegisterType((*Mutation_Write)(nil), "google.spanner.v1.Mutation.Write") + proto.RegisterType((*Mutation_Delete)(nil), "google.spanner.v1.Mutation.Delete") +} + +func init() { proto.RegisterFile("google/spanner/v1/mutation.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 402 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x41, 0x6b, 0xdb, 0x30, + 0x14, 0xc7, 0x97, 0x38, 0x71, 0x16, 0x85, 0x8d, 0xcd, 0x6c, 0xcc, 0x33, 0x3b, 0x78, 0x39, 0xe5, + 0x24, 0x13, 0xef, 0x32, 0xc8, 0x76, 0xc9, 0x06, 0x1b, 0x6c, 0xa1, 0xc1, 0xa1, 0x29, 0xf4, 0x12, + 0x14, 0xe7, 0xd5, 0x18, 0x2b, 0x92, 0x91, 0xe4, 0x40, 0xa0, 0x9f, 0xa5, 0x1f, 0xa0, 0x9f, 0xb2, + 0x58, 0x92, 0x4b, 0x68, 0xda, 0x92, 0x9e, 0xec, 0xc7, 0xfb, 0xff, 0xfe, 0xff, 0xf7, 0x24, 0xa1, + 0x30, 0xe3, 0x3c, 0xa3, 0x10, 0xc9, 0x92, 0x30, 0x06, 0x22, 0xda, 0x8d, 0xa3, 0x6d, 0xa5, 0x88, + 0xca, 0x39, 0xc3, 0xa5, 0xe0, 0x8a, 0x7b, 0xef, 0x8d, 0x02, 0x5b, 0x05, 0xde, 0x8d, 0x83, 0x2f, + 0x16, 0x22, 0x65, 0x1e, 0x11, 0xc6, 0xb8, 0xd1, 0x4b, 0x03, 0xdc, 0x77, 0x75, 0xb5, 0xae, 0xae, + 0x22, 0xa9, 0x44, 0x95, 0xaa, 0x07, 0xdd, 0x83, 0xc0, 0x02, 0xf6, 0x96, 0x1d, 0xde, 0x74, 0xd0, + 0xeb, 0x99, 0xcd, 0xf7, 0x26, 0xc8, 0xcd, 0x99, 0x04, 0xa1, 0xfc, 0x56, 0xd8, 0x1a, 0x0d, 0xe2, + 0xaf, 0xf8, 0x68, 0x14, 0xdc, 0x88, 0xf1, 0x85, 0xc8, 0x15, 0xfc, 0x7d, 0x95, 0x58, 0xa4, 0x86, + 0xab, 0x72, 0x43, 0x14, 0xf8, 0xed, 0x17, 0xc0, 0x06, 0xf1, 0x66, 0xe8, 0x9d, 0xb1, 0x59, 0x71, + 0xb1, 0xb2, 0x36, 0xce, 0xe9, 0x36, 0x6f, 0x0d, 0x7c, 0x26, 0xce, 0x8d, 0xdd, 0x4f, 0xd4, 0x13, + 0x50, 0x52, 0x92, 0x82, 0xdf, 0x39, 0xdd, 0xa5, 0x61, 0xbc, 0x1f, 0xc8, 0xdd, 0x00, 0x05, 0x05, + 0x7e, 0x57, 0xd3, 0xc3, 0xe7, 0xe8, 0xdf, 0x5a, 0x59, 0xef, 0x62, 0x98, 0xa0, 0x40, 0x5d, 0xed, + 0xe8, 0x7d, 0x40, 0x5d, 0x45, 0xd6, 0x14, 0xf4, 0x69, 0xf6, 0x13, 0x53, 0x78, 0x3e, 0xea, 0xa5, + 0x9c, 0x56, 0x5b, 0x26, 0xfd, 0x76, 0xe8, 0x8c, 0xfa, 0x49, 0x53, 0x7a, 0x31, 0x72, 0x77, 0x84, + 0x56, 0x20, 0x7d, 0x27, 0x74, 0x46, 0x83, 0x38, 0x68, 0x62, 0x9b, 0x8b, 0xc5, 0xff, 0x73, 0xa9, + 0x96, 0xb5, 0x24, 0xb1, 0xca, 0x20, 0x41, 0xae, 0x19, 0xe0, 0x89, 0xb4, 0x18, 0xf5, 0x0a, 0xd8, + 0xaf, 0x24, 0x28, 0x7b, 0x2d, 0x9f, 0x1f, 0xd9, 0xe5, 0x1f, 0xec, 0x17, 0xa0, 0x12, 0xb7, 0xd0, + 0xdf, 0xe9, 0x00, 0xf5, 0x79, 0x09, 0x42, 0xaf, 0x37, 0xbd, 0x46, 0x1f, 0x53, 0xbe, 0x3d, 0x86, + 0xa6, 0x6f, 0x9a, 0x13, 0x98, 0xd7, 0xd3, 0xcd, 0x5b, 0x97, 0xdf, 0xad, 0x26, 0xe3, 0x94, 0xb0, + 0x0c, 0x73, 0x91, 0x45, 0x19, 0x30, 0x3d, 0x7b, 0x64, 0x5a, 0xa4, 0xcc, 0xe5, 0xc1, 0x3b, 0x9c, + 0xd8, 0xdf, 0xdb, 0xf6, 0xa7, 0x3f, 0x06, 0xfd, 0x45, 0x79, 0xb5, 0xc1, 0x0b, 0x1b, 0xb2, 0x1c, + 0xaf, 0x5d, 0x8d, 0x7f, 0xbb, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x94, 0x52, 0x99, 0xf3, 0x36, 0x03, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go new file mode 100644 index 000000000..4c8408d12 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go @@ -0,0 +1,285 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/query_plan.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of +// nodes that can appear in a query plan. +type PlanNode_Kind int32 + +const ( + // Not specified. + PlanNode_KIND_UNSPECIFIED PlanNode_Kind = 0 + // Denotes a Relational operator node in the expression tree. Relational + // operators represent iterative processing of rows during query execution. + // For example, a `TableScan` operation that reads rows from a table. + PlanNode_RELATIONAL PlanNode_Kind = 1 + // Denotes a Scalar node in the expression tree. Scalar nodes represent + // non-iterable entities in the query plan. For example, constants or + // arithmetic operators appearing inside predicate expressions or references + // to column names. + PlanNode_SCALAR PlanNode_Kind = 2 +) + +var PlanNode_Kind_name = map[int32]string{ + 0: "KIND_UNSPECIFIED", + 1: "RELATIONAL", + 2: "SCALAR", +} +var PlanNode_Kind_value = map[string]int32{ + "KIND_UNSPECIFIED": 0, + "RELATIONAL": 1, + "SCALAR": 2, +} + +func (x PlanNode_Kind) String() string { + return proto.EnumName(PlanNode_Kind_name, int32(x)) +} +func (PlanNode_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +// Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. +type PlanNode struct { + // The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes]. + Index int32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` + // Used to determine the type of node. May be needed for visualizing + // different kinds of nodes differently. For example, If the node is a + // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation + // which can be used to directly embed a description of the node in its + // parent. + Kind PlanNode_Kind `protobuf:"varint,2,opt,name=kind,enum=google.spanner.v1.PlanNode_Kind" json:"kind,omitempty"` + // The display name for the node. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // List of child node `index`es and their relationship to this parent. + ChildLinks []*PlanNode_ChildLink `protobuf:"bytes,4,rep,name=child_links,json=childLinks" json:"child_links,omitempty"` + // Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. + ShortRepresentation *PlanNode_ShortRepresentation `protobuf:"bytes,5,opt,name=short_representation,json=shortRepresentation" json:"short_representation,omitempty"` + // Attributes relevant to the node contained in a group of key-value pairs. + // For example, a Parameter Reference node could have the following + // information in its metadata: + // + // { + // "parameter_reference": "param1", + // "parameter_type": "array" + // } + Metadata *google_protobuf1.Struct `protobuf:"bytes,6,opt,name=metadata" json:"metadata,omitempty"` + // The execution statistics associated with the node, contained in a group of + // key-value pairs. Only present if the plan was returned as a result of a + // profile query. For example, number of executions, number of rows/time per + // execution etc. + ExecutionStats *google_protobuf1.Struct `protobuf:"bytes,7,opt,name=execution_stats,json=executionStats" json:"execution_stats,omitempty"` +} + +func (m *PlanNode) Reset() { *m = PlanNode{} } +func (m *PlanNode) String() string { return proto.CompactTextString(m) } +func (*PlanNode) ProtoMessage() {} +func (*PlanNode) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *PlanNode) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *PlanNode) GetKind() PlanNode_Kind { + if m != nil { + return m.Kind + } + return PlanNode_KIND_UNSPECIFIED +} + +func (m *PlanNode) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *PlanNode) GetChildLinks() []*PlanNode_ChildLink { + if m != nil { + return m.ChildLinks + } + return nil +} + +func (m *PlanNode) GetShortRepresentation() *PlanNode_ShortRepresentation { + if m != nil { + return m.ShortRepresentation + } + return nil +} + +func (m *PlanNode) GetMetadata() *google_protobuf1.Struct { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PlanNode) GetExecutionStats() *google_protobuf1.Struct { + if m != nil { + return m.ExecutionStats + } + return nil +} + +// Metadata associated with a parent-child relationship appearing in a +// [PlanNode][google.spanner.v1.PlanNode]. +type PlanNode_ChildLink struct { + // The node to which the link points. + ChildIndex int32 `protobuf:"varint,1,opt,name=child_index,json=childIndex" json:"child_index,omitempty"` + // The type of the link. For example, in Hash Joins this could be used to + // distinguish between the build child and the probe child, or in the case + // of the child being an output variable, to represent the tag associated + // with the output variable. + Type string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` + // Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds + // to an output variable of the parent node. The field carries the name of + // the output variable. + // For example, a `TableScan` operator that reads rows from a table will + // have child links to the `SCALAR` nodes representing the output variables + // created for each column that is read by the operator. The corresponding + // `variable` fields will be set to the variable names assigned to the + // columns. + Variable string `protobuf:"bytes,3,opt,name=variable" json:"variable,omitempty"` +} + +func (m *PlanNode_ChildLink) Reset() { *m = PlanNode_ChildLink{} } +func (m *PlanNode_ChildLink) String() string { return proto.CompactTextString(m) } +func (*PlanNode_ChildLink) ProtoMessage() {} +func (*PlanNode_ChildLink) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +func (m *PlanNode_ChildLink) GetChildIndex() int32 { + if m != nil { + return m.ChildIndex + } + return 0 +} + +func (m *PlanNode_ChildLink) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PlanNode_ChildLink) GetVariable() string { + if m != nil { + return m.Variable + } + return "" +} + +// Condensed representation of a node and its subtree. Only present for +// `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode]. +type PlanNode_ShortRepresentation struct { + // A string representation of the expression subtree rooted at this node. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // A mapping of (subquery variable name) -> (subquery node id) for cases + // where the `description` string of this node references a `SCALAR` + // subquery contained in the expression subtree rooted at this node. The + // referenced `SCALAR` subquery may not necessarily be a direct child of + // this node. + Subqueries map[string]int32 `protobuf:"bytes,2,rep,name=subqueries" json:"subqueries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *PlanNode_ShortRepresentation) Reset() { *m = PlanNode_ShortRepresentation{} } +func (m *PlanNode_ShortRepresentation) String() string { return proto.CompactTextString(m) } +func (*PlanNode_ShortRepresentation) ProtoMessage() {} +func (*PlanNode_ShortRepresentation) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} } + +func (m *PlanNode_ShortRepresentation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PlanNode_ShortRepresentation) GetSubqueries() map[string]int32 { + if m != nil { + return m.Subqueries + } + return nil +} + +// Contains an ordered list of nodes appearing in the query plan. +type QueryPlan struct { + // The nodes in the query plan. Plan nodes are returned in pre-order starting + // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in + // `plan_nodes`. + PlanNodes []*PlanNode `protobuf:"bytes,1,rep,name=plan_nodes,json=planNodes" json:"plan_nodes,omitempty"` +} + +func (m *QueryPlan) Reset() { *m = QueryPlan{} } +func (m *QueryPlan) String() string { return proto.CompactTextString(m) } +func (*QueryPlan) ProtoMessage() {} +func (*QueryPlan) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *QueryPlan) GetPlanNodes() []*PlanNode { + if m != nil { + return m.PlanNodes + } + return nil +} + +func init() { + proto.RegisterType((*PlanNode)(nil), "google.spanner.v1.PlanNode") + proto.RegisterType((*PlanNode_ChildLink)(nil), "google.spanner.v1.PlanNode.ChildLink") + proto.RegisterType((*PlanNode_ShortRepresentation)(nil), "google.spanner.v1.PlanNode.ShortRepresentation") + proto.RegisterType((*QueryPlan)(nil), "google.spanner.v1.QueryPlan") + proto.RegisterEnum("google.spanner.v1.PlanNode_Kind", PlanNode_Kind_name, PlanNode_Kind_value) +} + +func init() { proto.RegisterFile("google/spanner/v1/query_plan.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xd1, 0x6e, 0xd3, 0x30, + 0x14, 0x25, 0x5d, 0x5b, 0xd6, 0x5b, 0xd4, 0x15, 0x6f, 0x68, 0x51, 0x41, 0x22, 0x54, 0x42, 0xea, + 0x53, 0xa2, 0x6d, 0x3c, 0x4c, 0x43, 0x08, 0xba, 0xae, 0x9b, 0xaa, 0x55, 0xa5, 0x38, 0xc0, 0x03, + 0x42, 0x8a, 0xdc, 0xc6, 0x74, 0x56, 0x53, 0x3b, 0xc4, 0x4e, 0xb5, 0x3e, 0xf0, 0x0b, 0x7c, 0x08, + 0x3f, 0xc4, 0xef, 0x20, 0x3b, 0x69, 0x18, 0x14, 0x55, 0xe2, 0xed, 0x5e, 0xdf, 0x73, 0x4f, 0x7c, + 0xcf, 0xb9, 0x0e, 0xb4, 0x67, 0x42, 0xcc, 0x22, 0xea, 0xc9, 0x98, 0x70, 0x4e, 0x13, 0x6f, 0x79, + 0xe4, 0x7d, 0x4d, 0x69, 0xb2, 0x0a, 0xe2, 0x88, 0x70, 0x37, 0x4e, 0x84, 0x12, 0xe8, 0x61, 0x86, + 0x71, 0x73, 0x8c, 0xbb, 0x3c, 0x6a, 0x3d, 0xc9, 0xdb, 0x48, 0xcc, 0x3c, 0xc2, 0xb9, 0x50, 0x44, + 0x31, 0xc1, 0x65, 0xd6, 0x50, 0x54, 0x4d, 0x36, 0x49, 0xbf, 0x78, 0x52, 0x25, 0xe9, 0x54, 0x65, + 0xd5, 0xf6, 0xf7, 0x2a, 0xec, 0x8e, 0x23, 0xc2, 0x47, 0x22, 0xa4, 0xe8, 0x00, 0x2a, 0x8c, 0x87, + 0xf4, 0xd6, 0xb6, 0x1c, 0xab, 0x53, 0xc1, 0x59, 0x82, 0x5e, 0x40, 0x79, 0xce, 0x78, 0x68, 0x97, + 0x1c, 0xab, 0xd3, 0x38, 0x76, 0xdc, 0x8d, 0x0b, 0xb8, 0x6b, 0x02, 0xf7, 0x9a, 0xf1, 0x10, 0x1b, + 0x34, 0x7a, 0x06, 0x0f, 0x42, 0x26, 0xe3, 0x88, 0xac, 0x02, 0x4e, 0x16, 0xd4, 0xde, 0x71, 0xac, + 0x4e, 0x0d, 0xd7, 0xf3, 0xb3, 0x11, 0x59, 0x50, 0x74, 0x09, 0xf5, 0xe9, 0x0d, 0x8b, 0xc2, 0x20, + 0x62, 0x7c, 0x2e, 0xed, 0xb2, 0xb3, 0xd3, 0xa9, 0x1f, 0x3f, 0xdf, 0xc6, 0xdf, 0xd3, 0xf0, 0x21, + 0xe3, 0x73, 0x0c, 0xd3, 0x75, 0x28, 0xd1, 0x04, 0x0e, 0xe4, 0x8d, 0x48, 0x54, 0x90, 0xd0, 0x38, + 0xa1, 0x92, 0xf2, 0x4c, 0x00, 0xbb, 0xe2, 0x58, 0x9d, 0xfa, 0xb1, 0xb7, 0x8d, 0xd0, 0xd7, 0x7d, + 0xf8, 0x8f, 0x36, 0xbc, 0x2f, 0x37, 0x0f, 0xd1, 0x09, 0xec, 0x2e, 0xa8, 0x22, 0x21, 0x51, 0xc4, + 0xae, 0x1a, 0xde, 0xc3, 0x35, 0xef, 0x5a, 0x58, 0xd7, 0x37, 0xc2, 0xe2, 0x02, 0x88, 0xde, 0xc0, + 0x1e, 0xbd, 0xa5, 0xd3, 0x54, 0x33, 0x04, 0x52, 0x11, 0x25, 0xed, 0xfb, 0xdb, 0x7b, 0x1b, 0x05, + 0xde, 0xd7, 0xf0, 0xd6, 0x67, 0xa8, 0x15, 0x33, 0xa3, 0xa7, 0x6b, 0xbd, 0xee, 0x9a, 0x94, 0x09, + 0x31, 0x30, 0x4e, 0x21, 0x28, 0xab, 0x55, 0x4c, 0x8d, 0x53, 0x35, 0x6c, 0x62, 0xd4, 0x82, 0xdd, + 0x25, 0x49, 0x18, 0x99, 0x44, 0x6b, 0x0f, 0x8a, 0xbc, 0xf5, 0xd3, 0x82, 0xfd, 0x7f, 0x28, 0x80, + 0x1c, 0xa8, 0x87, 0x54, 0x4e, 0x13, 0x16, 0x1b, 0x1d, 0xad, 0xdc, 0xba, 0xdf, 0x47, 0x28, 0x00, + 0x90, 0xe9, 0x44, 0x2f, 0x27, 0xa3, 0xd2, 0x2e, 0x19, 0xe7, 0x5e, 0xff, 0xa7, 0xd0, 0xae, 0x5f, + 0x30, 0xf4, 0xb9, 0x4a, 0x56, 0xf8, 0x0e, 0x65, 0xeb, 0x15, 0xec, 0xfd, 0x55, 0x46, 0x4d, 0xd8, + 0x99, 0xd3, 0x55, 0x7e, 0x1b, 0x1d, 0xea, 0x7d, 0x5d, 0x92, 0x28, 0xcd, 0x06, 0xae, 0xe0, 0x2c, + 0x39, 0x2b, 0x9d, 0x5a, 0xed, 0x53, 0x28, 0xeb, 0x5d, 0x44, 0x07, 0xd0, 0xbc, 0x1e, 0x8c, 0x2e, + 0x82, 0x0f, 0x23, 0x7f, 0xdc, 0xef, 0x0d, 0x2e, 0x07, 0xfd, 0x8b, 0xe6, 0x3d, 0xd4, 0x00, 0xc0, + 0xfd, 0x61, 0xf7, 0xfd, 0xe0, 0xed, 0xa8, 0x3b, 0x6c, 0x5a, 0x08, 0xa0, 0xea, 0xf7, 0xba, 0xc3, + 0x2e, 0x6e, 0x96, 0xda, 0x57, 0x50, 0x7b, 0xa7, 0xdf, 0x9c, 0xbe, 0x39, 0x3a, 0x03, 0xd0, 0x4f, + 0x2f, 0xe0, 0x22, 0xa4, 0xd2, 0xb6, 0xcc, 0x98, 0x8f, 0xb7, 0x8c, 0x89, 0x6b, 0x71, 0x1e, 0xc9, + 0xf3, 0x6f, 0xf0, 0x68, 0x2a, 0x16, 0x9b, 0xe0, 0xf3, 0x46, 0xc1, 0x3f, 0xd6, 0xee, 0x8f, 0xad, + 0x4f, 0xa7, 0x39, 0x68, 0x26, 0x22, 0xc2, 0x67, 0xae, 0x48, 0x66, 0xde, 0x8c, 0x72, 0xb3, 0x1b, + 0x5e, 0x56, 0x22, 0x31, 0x93, 0x77, 0x7e, 0x0b, 0x2f, 0xf3, 0xf0, 0x47, 0xe9, 0xf0, 0x2a, 0x6b, + 0xed, 0x45, 0x22, 0x0d, 0x5d, 0x3f, 0xff, 0xca, 0xc7, 0xa3, 0x49, 0xd5, 0xb4, 0x9f, 0xfc, 0x0a, + 0x00, 0x00, 0xff, 0xff, 0xcc, 0x06, 0x5f, 0x9c, 0x54, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go new file mode 100644 index 000000000..51b279b3f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go @@ -0,0 +1,311 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/result_set.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Results from [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. +type ResultSet struct { + // Metadata about the result set, such as row type information. + Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // Each element in `rows` is a row whose format is defined by + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element + // in each row matches the ith field in + // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are + // encoded based on type as described + // [here][google.spanner.v1.TypeCode]. + Rows []*google_protobuf1.ListValue `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + // Query plan and execution statistics for the query that produced this + // result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. + Stats *ResultSetStats `protobuf:"bytes,3,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ResultSet) Reset() { *m = ResultSet{} } +func (m *ResultSet) String() string { return proto.CompactTextString(m) } +func (*ResultSet) ProtoMessage() {} +func (*ResultSet) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *ResultSet) GetMetadata() *ResultSetMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ResultSet) GetRows() []*google_protobuf1.ListValue { + if m != nil { + return m.Rows + } + return nil +} + +func (m *ResultSet) GetStats() *ResultSetStats { + if m != nil { + return m.Stats + } + return nil +} + +// Partial results from a streaming read or SQL query. Streaming reads and +// SQL queries better tolerate large result sets, large rows, and large +// values, but are a little trickier to consume. +type PartialResultSet struct { + // Metadata about the result set, such as row type information. + // Only present in the first response. + Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` + // A streamed result set consists of a stream of values, which might + // be split into many `PartialResultSet` messages to accommodate + // large rows and/or large values. Every N complete values defines a + // row, where N is equal to the number of entries in + // [metadata.row_type.fields][google.spanner.v1.StructType.fields]. + // + // Most values are encoded based on type as described + // [here][google.spanner.v1.TypeCode]. + // + // It is possible that the last value in values is "chunked", + // meaning that the rest of the value is sent in subsequent + // `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] + // field. Two or more chunked values can be merged to form a + // complete value as follows: + // + // * `bool/number/null`: cannot be chunked + // * `string`: concatenate the strings + // * `list`: concatenate the lists. If the last element in a list is a + // `string`, `list`, or `object`, merge it with the first element in + // the next list by applying these rules recursively. + // * `object`: concatenate the (field name, field value) pairs. If a + // field name is duplicated, then apply these rules recursively + // to merge the field values. + // + // Some examples of merging: + // + // # Strings are concatenated. + // "foo", "bar" => "foobar" + // + // # Lists of non-strings are concatenated. + // [2, 3], [4] => [2, 3, 4] + // + // # Lists are concatenated, but the last and first elements are merged + // # because they are strings. + // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] + // + // # Lists are concatenated, but the last and first elements are merged + // # because they are lists. Recursively, the last and first elements + // # of the inner lists are merged because they are strings. + // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] + // + // # Non-overlapping object fields are combined. + // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} + // + // # Overlapping object fields are merged. + // {"a": "1"}, {"a": "2"} => {"a": "12"} + // + // # Examples of merging objects containing lists of strings. + // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} + // + // For a more complete example, suppose a streaming SQL query is + // yielding a result set whose rows contain a single string + // field. The following `PartialResultSet`s might be yielded: + // + // { + // "metadata": { ... } + // "values": ["Hello", "W"] + // "chunked_value": true + // "resume_token": "Af65..." + // } + // { + // "values": ["orl"] + // "chunked_value": true + // "resume_token": "Bqp2..." + // } + // { + // "values": ["d"] + // "resume_token": "Zx1B..." + // } + // + // This sequence of `PartialResultSet`s encodes two rows, one + // containing the field value `"Hello"`, and a second containing the + // field value `"World" = "W" + "orl" + "d"`. + Values []*google_protobuf1.Value `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"` + // If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must + // be combined with more values from subsequent `PartialResultSet`s + // to obtain a complete field value. + ChunkedValue bool `protobuf:"varint,3,opt,name=chunked_value,json=chunkedValue" json:"chunked_value,omitempty"` + // Streaming calls might be interrupted for a variety of reasons, such + // as TCP connection loss. If this occurs, the stream of results can + // be resumed by re-sending the original request and including + // `resume_token`. Note that executing any other transaction in the + // same session invalidates the token. + ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Query plan and execution statistics for the query that produced this + // streaming result set. These can be requested by setting + // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent + // only once with the last response in the stream. + Stats *ResultSetStats `protobuf:"bytes,5,opt,name=stats" json:"stats,omitempty"` +} + +func (m *PartialResultSet) Reset() { *m = PartialResultSet{} } +func (m *PartialResultSet) String() string { return proto.CompactTextString(m) } +func (*PartialResultSet) ProtoMessage() {} +func (*PartialResultSet) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *PartialResultSet) GetMetadata() *ResultSetMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PartialResultSet) GetValues() []*google_protobuf1.Value { + if m != nil { + return m.Values + } + return nil +} + +func (m *PartialResultSet) GetChunkedValue() bool { + if m != nil { + return m.ChunkedValue + } + return false +} + +func (m *PartialResultSet) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +func (m *PartialResultSet) GetStats() *ResultSetStats { + if m != nil { + return m.Stats + } + return nil +} + +// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. +type ResultSetMetadata struct { + // Indicates the field names and types for the rows in the result + // set. For example, a SQL query like `"SELECT UserId, UserName FROM + // Users"` could return a `row_type` value like: + // + // "fields": [ + // { "name": "UserId", "type": { "code": "INT64" } }, + // { "name": "UserName", "type": { "code": "STRING" } }, + // ] + RowType *StructType `protobuf:"bytes,1,opt,name=row_type,json=rowType" json:"row_type,omitempty"` + // If the read or SQL query began a transaction as a side-effect, the + // information about the new transaction is yielded here. + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` +} + +func (m *ResultSetMetadata) Reset() { *m = ResultSetMetadata{} } +func (m *ResultSetMetadata) String() string { return proto.CompactTextString(m) } +func (*ResultSetMetadata) ProtoMessage() {} +func (*ResultSetMetadata) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *ResultSetMetadata) GetRowType() *StructType { + if m != nil { + return m.RowType + } + return nil +} + +func (m *ResultSetMetadata) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. +type ResultSetStats struct { + // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result. + QueryPlan *QueryPlan `protobuf:"bytes,1,opt,name=query_plan,json=queryPlan" json:"query_plan,omitempty"` + // Aggregated statistics from the execution of the query. Only present when + // the query is profiled. For example, a query could return the statistics as + // follows: + // + // { + // "rows_returned": "3", + // "elapsed_time": "1.22 secs", + // "cpu_time": "1.19 secs" + // } + QueryStats *google_protobuf1.Struct `protobuf:"bytes,2,opt,name=query_stats,json=queryStats" json:"query_stats,omitempty"` +} + +func (m *ResultSetStats) Reset() { *m = ResultSetStats{} } +func (m *ResultSetStats) String() string { return proto.CompactTextString(m) } +func (*ResultSetStats) ProtoMessage() {} +func (*ResultSetStats) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } + +func (m *ResultSetStats) GetQueryPlan() *QueryPlan { + if m != nil { + return m.QueryPlan + } + return nil +} + +func (m *ResultSetStats) GetQueryStats() *google_protobuf1.Struct { + if m != nil { + return m.QueryStats + } + return nil +} + +func init() { + proto.RegisterType((*ResultSet)(nil), "google.spanner.v1.ResultSet") + proto.RegisterType((*PartialResultSet)(nil), "google.spanner.v1.PartialResultSet") + proto.RegisterType((*ResultSetMetadata)(nil), "google.spanner.v1.ResultSetMetadata") + proto.RegisterType((*ResultSetStats)(nil), "google.spanner.v1.ResultSetStats") +} + +func init() { proto.RegisterFile("google/spanner/v1/result_set.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 482 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcb, 0x6e, 0x13, 0x31, + 0x14, 0x86, 0x35, 0xe9, 0x85, 0xd4, 0x13, 0x10, 0xb5, 0x04, 0x1d, 0x45, 0x05, 0xa5, 0x29, 0x8b, + 0xac, 0x3c, 0x4a, 0x59, 0x10, 0xa9, 0x9b, 0xaa, 0x2c, 0xd8, 0x80, 0x14, 0x9c, 0xa8, 0x0b, 0x36, + 0xa3, 0xd3, 0xc4, 0x0c, 0xa3, 0x3a, 0xf6, 0xd4, 0xf6, 0x24, 0xca, 0x82, 0x25, 0x62, 0xc9, 0x7b, + 0xf0, 0x00, 0x3c, 0x1f, 0xf2, 0x25, 0x17, 0x98, 0x08, 0x09, 0xa9, 0x3b, 0xc7, 0xfe, 0xfe, 0xf3, + 0x9f, 0xff, 0xcc, 0x09, 0xea, 0xe6, 0x52, 0xe6, 0x9c, 0xa5, 0xba, 0x04, 0x21, 0x98, 0x4a, 0xe7, + 0xfd, 0x54, 0x31, 0x5d, 0x71, 0x93, 0x69, 0x66, 0x48, 0xa9, 0xa4, 0x91, 0xf8, 0xd8, 0x33, 0x24, + 0x30, 0x64, 0xde, 0x6f, 0x9f, 0x06, 0x19, 0x94, 0x45, 0x0a, 0x42, 0x48, 0x03, 0xa6, 0x90, 0x42, + 0x7b, 0xc1, 0xfa, 0xd5, 0xfd, 0xba, 0xad, 0x3e, 0xa7, 0xda, 0xa8, 0x6a, 0x12, 0xca, 0xb5, 0x77, + 0x58, 0xde, 0x57, 0x4c, 0x2d, 0xb3, 0x92, 0x83, 0x08, 0xcc, 0x79, 0x9d, 0x31, 0x0a, 0x84, 0x86, + 0x89, 0xf5, 0xf9, 0xcb, 0x66, 0x1b, 0x5a, 0x96, 0xcc, 0xbf, 0x76, 0x7f, 0x45, 0xe8, 0x88, 0xba, + 0x28, 0x23, 0x66, 0xf0, 0x15, 0x6a, 0xce, 0x98, 0x81, 0x29, 0x18, 0x48, 0xa2, 0x4e, 0xd4, 0x8b, + 0x2f, 0x5e, 0x91, 0x5a, 0x2c, 0xb2, 0xe6, 0x3f, 0x04, 0x96, 0xae, 0x55, 0x98, 0xa0, 0x7d, 0x25, + 0x17, 0x3a, 0x69, 0x74, 0xf6, 0x7a, 0xf1, 0x45, 0x7b, 0xa5, 0x5e, 0x65, 0x24, 0xef, 0x0b, 0x6d, + 0x6e, 0x80, 0x57, 0x8c, 0x3a, 0x0e, 0xbf, 0x41, 0x07, 0xda, 0x80, 0xd1, 0xc9, 0x9e, 0xb3, 0x3b, + 0xfb, 0x97, 0xdd, 0xc8, 0x82, 0xd4, 0xf3, 0xdd, 0x6f, 0x0d, 0xf4, 0x74, 0x08, 0xca, 0x14, 0xc0, + 0x1f, 0xb6, 0xff, 0xc3, 0xb9, 0x6d, 0x6f, 0x95, 0xe0, 0x79, 0x2d, 0x81, 0xef, 0x3e, 0x50, 0xf8, + 0x1c, 0x3d, 0x9e, 0x7c, 0xa9, 0xc4, 0x1d, 0x9b, 0x66, 0xee, 0xc6, 0xe5, 0x68, 0xd2, 0x56, 0xb8, + 0x74, 0x30, 0x3e, 0x43, 0x2d, 0xbb, 0x2e, 0x33, 0x96, 0x19, 0x79, 0xc7, 0x44, 0xb2, 0xdf, 0x89, + 0x7a, 0x2d, 0x1a, 0xfb, 0xbb, 0xb1, 0xbd, 0xda, 0xcc, 0xe1, 0xe0, 0x3f, 0xe7, 0xf0, 0x23, 0x42, + 0xc7, 0xb5, 0x40, 0x78, 0x80, 0x9a, 0x4a, 0x2e, 0x32, 0xfb, 0xa1, 0xc3, 0x20, 0x5e, 0xec, 0xa8, + 0x38, 0x72, 0x0b, 0x37, 0x5e, 0x96, 0x8c, 0x3e, 0x52, 0x72, 0x61, 0x0f, 0xf8, 0x0a, 0xc5, 0x5b, + 0x3b, 0x94, 0x34, 0x9c, 0xf8, 0xe5, 0x0e, 0xf1, 0x78, 0x43, 0xd1, 0x6d, 0x49, 0xf7, 0x7b, 0x84, + 0x9e, 0xfc, 0xd9, 0x2b, 0xbe, 0x44, 0x68, 0xb3, 0xbc, 0xa1, 0xa1, 0xd3, 0x1d, 0x35, 0x3f, 0x5a, + 0x68, 0xc8, 0x41, 0xd0, 0xa3, 0xfb, 0xd5, 0x11, 0x0f, 0x50, 0xec, 0xc5, 0x7e, 0x40, 0xbe, 0xa3, + 0x93, 0xda, 0x77, 0xf1, 0x61, 0xa8, 0x37, 0x72, 0xb6, 0xd7, 0x5f, 0xd1, 0xb3, 0x89, 0x9c, 0xd5, + 0x7d, 0xae, 0x37, 0xfd, 0x0d, 0xad, 0x7c, 0x18, 0x7d, 0x1a, 0x04, 0x28, 0x97, 0x1c, 0x44, 0x4e, + 0xa4, 0xca, 0xd3, 0x9c, 0x09, 0x57, 0x3c, 0xf5, 0x4f, 0x50, 0x16, 0x7a, 0xeb, 0x4f, 0x74, 0x19, + 0x8e, 0x3f, 0x1b, 0x27, 0xef, 0xbc, 0xf4, 0x2d, 0x97, 0xd5, 0x94, 0x8c, 0x82, 0xcb, 0x4d, 0xff, + 0xf6, 0xd0, 0xc9, 0x5f, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x15, 0x1f, 0xa6, 0x3e, 0x04, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go new file mode 100644 index 000000000..e37b2f1be --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go @@ -0,0 +1,1215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/spanner.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/serviceconfig" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf1 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Mode in which the query must be processed. +type ExecuteSqlRequest_QueryMode int32 + +const ( + // The default mode where only the query result, without any information + // about the query plan is returned. + ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0 + // This mode returns only the query plan, without any result rows or + // execution statistics information. + ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1 + // This mode returns both the query plan and the execution statistics along + // with the result rows. + ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2 +) + +var ExecuteSqlRequest_QueryMode_name = map[int32]string{ + 0: "NORMAL", + 1: "PLAN", + 2: "PROFILE", +} +var ExecuteSqlRequest_QueryMode_value = map[string]int32{ + "NORMAL": 0, + "PLAN": 1, + "PROFILE": 2, +} + +func (x ExecuteSqlRequest_QueryMode) String() string { + return proto.EnumName(ExecuteSqlRequest_QueryMode_name, int32(x)) +} +func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor4, []int{4, 0} +} + +// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession]. +type CreateSessionRequest struct { + // Required. The database in which the new session is created. + Database string `protobuf:"bytes,1,opt,name=database" json:"database,omitempty"` +} + +func (m *CreateSessionRequest) Reset() { *m = CreateSessionRequest{} } +func (m *CreateSessionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSessionRequest) ProtoMessage() {} +func (*CreateSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *CreateSessionRequest) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +// A session in the Cloud Spanner API. +type Session struct { + // Required. The name of the session. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *Session) Reset() { *m = Session{} } +func (m *Session) String() string { return proto.CompactTextString(m) } +func (*Session) ProtoMessage() {} +func (*Session) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *Session) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. +type GetSessionRequest struct { + // Required. The name of the session to retrieve. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetSessionRequest) Reset() { *m = GetSessionRequest{} } +func (m *GetSessionRequest) String() string { return proto.CompactTextString(m) } +func (*GetSessionRequest) ProtoMessage() {} +func (*GetSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +func (m *GetSessionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. +type DeleteSessionRequest struct { + // Required. The name of the session to delete. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteSessionRequest) Reset() { *m = DeleteSessionRequest{} } +func (m *DeleteSessionRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSessionRequest) ProtoMessage() {} +func (*DeleteSessionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *DeleteSessionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and +// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. +type ExecuteSqlRequest struct { + // Required. The session in which the SQL query should be performed. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // The transaction to use. If none is provided, the default is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // Required. The SQL query string. + Sql string `protobuf:"bytes,3,opt,name=sql" json:"sql,omitempty"` + // The SQL query string can contain parameter placeholders. A parameter + // placeholder consists of `'@'` followed by the parameter + // name. Parameter names consist of any combination of letters, + // numbers, and underscores. + // + // Parameters can appear anywhere that a literal value is expected. The same + // parameter name can be used more than once, for example: + // `"WHERE id > @msg_id AND id < @msg_id + 100"` + // + // It is an error to execute an SQL query with unbound parameters. + // + // Parameter values are specified using `params`, which is a JSON + // object whose keys are parameter names, and whose values are the + // corresponding parameter values. + Params *google_protobuf1.Struct `protobuf:"bytes,4,opt,name=params" json:"params,omitempty"` + // It is not always possible for Cloud Spanner to infer the right SQL type + // from a JSON value. For example, values of type `BYTES` and values + // of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + // + // In these cases, `param_types` can be used to specify the exact + // SQL type for some or all of the SQL query parameters. See the + // definition of [Type][google.spanner.v1.Type] for more information + // about SQL types. + ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If this request is resuming a previously interrupted SQL query + // execution, `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new SQL query execution to resume where the last one left + // off. The rest of the request parameters must exactly match the + // request that yielded this token. + ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` + // Used to control the amount of debugging information returned in + // [ResultSetStats][google.spanner.v1.ResultSetStats]. + QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"` +} + +func (m *ExecuteSqlRequest) Reset() { *m = ExecuteSqlRequest{} } +func (m *ExecuteSqlRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteSqlRequest) ProtoMessage() {} +func (*ExecuteSqlRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } + +func (m *ExecuteSqlRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *ExecuteSqlRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *ExecuteSqlRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *ExecuteSqlRequest) GetParams() *google_protobuf1.Struct { + if m != nil { + return m.Params + } + return nil +} + +func (m *ExecuteSqlRequest) GetParamTypes() map[string]*Type { + if m != nil { + return m.ParamTypes + } + return nil +} + +func (m *ExecuteSqlRequest) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +func (m *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode { + if m != nil { + return m.QueryMode + } + return ExecuteSqlRequest_NORMAL +} + +// The request for [Read][google.spanner.v1.Spanner.Read] and +// [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. +type ReadRequest struct { + // Required. The session in which the read should be performed. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // The transaction to use. If none is provided, the default is a + // temporary read-only transaction with strong concurrency. + Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + // Required. The name of the table in the database to be read. + Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is + // used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set] + // and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information. + Index string `protobuf:"bytes,4,opt,name=index" json:"index,omitempty"` + // The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching + // this request. + Columns []string `protobuf:"bytes,5,rep,name=columns" json:"columns,omitempty"` + // Required. `key_set` identifies the rows to be yielded. `key_set` names the + // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index] + // is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names + // index keys in [index][google.spanner.v1.ReadRequest.index]. + // + // Rows are yielded in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) + // or index key order (if [index][google.spanner.v1.ReadRequest.index] is non-empty). + // + // It is not an error for the `key_set` to name rows that do not + // exist in the database. Read yields nothing for nonexistent rows. + KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet" json:"key_set,omitempty"` + // If greater than zero, only the first `limit` rows are yielded. If `limit` + // is zero, the default is no limit. + Limit int64 `protobuf:"varint,8,opt,name=limit" json:"limit,omitempty"` + // If this request is resuming a previously interrupted read, + // `resume_token` should be copied from the last + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new read to resume where the last read left off. The + // rest of the request parameters must exactly match the request + // that yielded this token. + ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } + +func (m *ReadRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *ReadRequest) GetTransaction() *TransactionSelector { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *ReadRequest) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *ReadRequest) GetIndex() string { + if m != nil { + return m.Index + } + return "" +} + +func (m *ReadRequest) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *ReadRequest) GetKeySet() *KeySet { + if m != nil { + return m.KeySet + } + return nil +} + +func (m *ReadRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ReadRequest) GetResumeToken() []byte { + if m != nil { + return m.ResumeToken + } + return nil +} + +// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +type BeginTransactionRequest struct { + // Required. The session in which the transaction runs. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. Options for the new transaction. + Options *TransactionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } + +func (m *BeginTransactionRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *BeginTransactionRequest) GetOptions() *TransactionOptions { + if m != nil { + return m.Options + } + return nil +} + +// The request for [Commit][google.spanner.v1.Spanner.Commit]. +type CommitRequest struct { + // Required. The session in which the transaction to be committed is running. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. The transaction in which to commit. + // + // Types that are valid to be assigned to Transaction: + // *CommitRequest_TransactionId + // *CommitRequest_SingleUseTransaction + Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"` + // The mutations to be executed when this transaction commits. All + // mutations are applied atomically, in the order they appear in + // this list. + Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations" json:"mutations,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } + +type isCommitRequest_Transaction interface { + isCommitRequest_Transaction() +} + +type CommitRequest_TransactionId struct { + TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"` +} +type CommitRequest_SingleUseTransaction struct { + SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,oneof"` +} + +func (*CommitRequest_TransactionId) isCommitRequest_Transaction() {} +func (*CommitRequest_SingleUseTransaction) isCommitRequest_Transaction() {} + +func (m *CommitRequest) GetTransaction() isCommitRequest_Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *CommitRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *CommitRequest) GetTransactionId() []byte { + if x, ok := m.GetTransaction().(*CommitRequest_TransactionId); ok { + return x.TransactionId + } + return nil +} + +func (m *CommitRequest) GetSingleUseTransaction() *TransactionOptions { + if x, ok := m.GetTransaction().(*CommitRequest_SingleUseTransaction); ok { + return x.SingleUseTransaction + } + return nil +} + +func (m *CommitRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CommitRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CommitRequest_OneofMarshaler, _CommitRequest_OneofUnmarshaler, _CommitRequest_OneofSizer, []interface{}{ + (*CommitRequest_TransactionId)(nil), + (*CommitRequest_SingleUseTransaction)(nil), + } +} + +func _CommitRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CommitRequest) + // transaction + switch x := m.Transaction.(type) { + case *CommitRequest_TransactionId: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.TransactionId) + case *CommitRequest_SingleUseTransaction: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SingleUseTransaction); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CommitRequest.Transaction has unexpected type %T", x) + } + return nil +} + +func _CommitRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CommitRequest) + switch tag { + case 2: // transaction.transaction_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Transaction = &CommitRequest_TransactionId{x} + return true, err + case 3: // transaction.single_use_transaction + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Transaction = &CommitRequest_SingleUseTransaction{msg} + return true, err + default: + return false, nil + } +} + +func _CommitRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CommitRequest) + // transaction + switch x := m.Transaction.(type) { + case *CommitRequest_TransactionId: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TransactionId))) + n += len(x.TransactionId) + case *CommitRequest_SingleUseTransaction: + s := proto.Size(x.SingleUseTransaction) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The response for [Commit][google.spanner.v1.Spanner.Commit]. +type CommitResponse struct { + // The Cloud Spanner timestamp at which the transaction committed. + CommitTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp" json:"commit_timestamp,omitempty"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } + +func (m *CommitResponse) GetCommitTimestamp() *google_protobuf3.Timestamp { + if m != nil { + return m.CommitTimestamp + } + return nil +} + +// The request for [Rollback][google.spanner.v1.Spanner.Rollback]. +type RollbackRequest struct { + // Required. The session in which the transaction to roll back is running. + Session string `protobuf:"bytes,1,opt,name=session" json:"session,omitempty"` + // Required. The transaction to roll back. + TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } + +func (m *RollbackRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *RollbackRequest) GetTransactionId() []byte { + if m != nil { + return m.TransactionId + } + return nil +} + +func init() { + proto.RegisterType((*CreateSessionRequest)(nil), "google.spanner.v1.CreateSessionRequest") + proto.RegisterType((*Session)(nil), "google.spanner.v1.Session") + proto.RegisterType((*GetSessionRequest)(nil), "google.spanner.v1.GetSessionRequest") + proto.RegisterType((*DeleteSessionRequest)(nil), "google.spanner.v1.DeleteSessionRequest") + proto.RegisterType((*ExecuteSqlRequest)(nil), "google.spanner.v1.ExecuteSqlRequest") + proto.RegisterType((*ReadRequest)(nil), "google.spanner.v1.ReadRequest") + proto.RegisterType((*BeginTransactionRequest)(nil), "google.spanner.v1.BeginTransactionRequest") + proto.RegisterType((*CommitRequest)(nil), "google.spanner.v1.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "google.spanner.v1.CommitResponse") + proto.RegisterType((*RollbackRequest)(nil), "google.spanner.v1.RollbackRequest") + proto.RegisterEnum("google.spanner.v1.ExecuteSqlRequest_QueryMode", ExecuteSqlRequest_QueryMode_name, ExecuteSqlRequest_QueryMode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Spanner service + +type SpannerClient interface { + // Creates a new session. A session can be used to perform + // transactions that read and/or modify data in a Cloud Spanner database. + // Sessions are meant to be reused for many consecutive + // transactions. + // + // Sessions can only execute one transaction at a time. To execute + // multiple concurrent read-write/write-only transactions, create + // multiple sessions. Note that standalone reads and queries use a + // transaction internally, and count toward the one transaction + // limit. + // + // Cloud Spanner limits the number of sessions that can exist at any given + // time; thus, it is a good idea to delete idle and/or unneeded sessions. + // Aside from explicit deletes, Cloud Spanner can delete sessions for which no + // operations are sent for more than an hour. If a session is deleted, + // requests to it return `NOT_FOUND`. + // + // Idle sessions can be kept alive by sending a trivial SQL query + // periodically, e.g., `"SELECT 1"`. + CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) + // Gets a session. Returns `NOT_FOUND` if the session does not exist. + // This is mainly useful for determining whether a session is still + // alive. + GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) + // Ends a session, releasing server resources associated with it. + DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Executes an SQL query, returning all rows in a single reply. This + // method cannot be used to return a result set larger than 10 MiB; + // if the query yields more data than that, the query fails with + // a `FAILED_PRECONDITION` error. + // + // Queries inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be fetched in streaming fashion by calling + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result + // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there + // is no limit on the size of the returned result set. However, no + // individual row in the result set can exceed 100 MiB, and no + // column value can exceed 10 MiB. + ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) + // Reads rows from the database using key lookups and scans, as a + // simple key/value style alternative to + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to + // return a result set larger than 10 MiB; if the read matches more + // data than that, the read fails with a `FAILED_PRECONDITION` + // error. + // + // Reads inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be yielded in streaming fashion by calling + // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a + // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the + // size of the returned result set. However, no individual row in + // the result set can exceed 100 MiB, and no column value can exceed + // 10 MiB. + StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) + // Begins a new transaction. This step can often be skipped: + // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + // side-effect. + BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) + // Commits a transaction. The request includes the mutations to be + // applied to rows in the database. + // + // `Commit` might return an `ABORTED` error. This can occur at any time; + // commonly, the cause is conflicts with concurrent + // transactions. However, it can also happen for a variety of other + // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + // the transaction from the beginning, re-using the same session. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // Rolls back a transaction, releasing any locks it holds. It is a good + // idea to call this for any transaction that includes one or more + // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + // ultimately decides not to commit. + // + // `Rollback` returns `OK` if it successfully aborts the transaction, the + // transaction was already aborted, or the transaction is not + // found. `Rollback` never returns `ABORTED`. + Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) +} + +type spannerClient struct { + cc *grpc.ClientConn +} + +func NewSpannerClient(cc *grpc.ClientConn) SpannerClient { + return &spannerClient{cc} +} + +func (c *spannerClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) { + out := new(Session) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/CreateSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) { + out := new(Session) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/GetSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/DeleteSession", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) { + out := new(ResultSet) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteSql", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Spanner_serviceDesc.Streams[0], c.cc, "/google.spanner.v1.Spanner/ExecuteStreamingSql", opts...) + if err != nil { + return nil, err + } + x := &spannerExecuteStreamingSqlClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Spanner_ExecuteStreamingSqlClient interface { + Recv() (*PartialResultSet, error) + grpc.ClientStream +} + +type spannerExecuteStreamingSqlClient struct { + grpc.ClientStream +} + +func (x *spannerExecuteStreamingSqlClient) Recv() (*PartialResultSet, error) { + m := new(PartialResultSet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spannerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) { + out := new(ResultSet) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Read", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Spanner_serviceDesc.Streams[1], c.cc, "/google.spanner.v1.Spanner/StreamingRead", opts...) + if err != nil { + return nil, err + } + x := &spannerStreamingReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Spanner_StreamingReadClient interface { + Recv() (*PartialResultSet, error) + grpc.ClientStream +} + +type spannerStreamingReadClient struct { + grpc.ClientStream +} + +func (x *spannerStreamingReadClient) Recv() (*PartialResultSet, error) { + m := new(PartialResultSet) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spannerClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) { + out := new(Transaction) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/BeginTransaction", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Commit", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spannerClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.spanner.v1.Spanner/Rollback", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Spanner service + +type SpannerServer interface { + // Creates a new session. A session can be used to perform + // transactions that read and/or modify data in a Cloud Spanner database. + // Sessions are meant to be reused for many consecutive + // transactions. + // + // Sessions can only execute one transaction at a time. To execute + // multiple concurrent read-write/write-only transactions, create + // multiple sessions. Note that standalone reads and queries use a + // transaction internally, and count toward the one transaction + // limit. + // + // Cloud Spanner limits the number of sessions that can exist at any given + // time; thus, it is a good idea to delete idle and/or unneeded sessions. + // Aside from explicit deletes, Cloud Spanner can delete sessions for which no + // operations are sent for more than an hour. If a session is deleted, + // requests to it return `NOT_FOUND`. + // + // Idle sessions can be kept alive by sending a trivial SQL query + // periodically, e.g., `"SELECT 1"`. + CreateSession(context.Context, *CreateSessionRequest) (*Session, error) + // Gets a session. Returns `NOT_FOUND` if the session does not exist. + // This is mainly useful for determining whether a session is still + // alive. + GetSession(context.Context, *GetSessionRequest) (*Session, error) + // Ends a session, releasing server resources associated with it. + DeleteSession(context.Context, *DeleteSessionRequest) (*google_protobuf4.Empty, error) + // Executes an SQL query, returning all rows in a single reply. This + // method cannot be used to return a result set larger than 10 MiB; + // if the query yields more data than that, the query fails with + // a `FAILED_PRECONDITION` error. + // + // Queries inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be fetched in streaming fashion by calling + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. + ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error) + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result + // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there + // is no limit on the size of the returned result set. However, no + // individual row in the result set can exceed 100 MiB, and no + // column value can exceed 10 MiB. + ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error + // Reads rows from the database using key lookups and scans, as a + // simple key/value style alternative to + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to + // return a result set larger than 10 MiB; if the read matches more + // data than that, the read fails with a `FAILED_PRECONDITION` + // error. + // + // Reads inside read-write transactions might return `ABORTED`. If + // this occurs, the application should restart the transaction from + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. + // + // Larger result sets can be yielded in streaming fashion by calling + // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + Read(context.Context, *ReadRequest) (*ResultSet, error) + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a + // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the + // size of the returned result set. However, no individual row in + // the result set can exceed 100 MiB, and no column value can exceed + // 10 MiB. + StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error + // Begins a new transaction. This step can often be skipped: + // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + // side-effect. + BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error) + // Commits a transaction. The request includes the mutations to be + // applied to rows in the database. + // + // `Commit` might return an `ABORTED` error. This can occur at any time; + // commonly, the cause is conflicts with concurrent + // transactions. However, it can also happen for a variety of other + // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + // the transaction from the beginning, re-using the same session. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // Rolls back a transaction, releasing any locks it holds. It is a good + // idea to call this for any transaction that includes one or more + // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + // ultimately decides not to commit. + // + // `Rollback` returns `OK` if it successfully aborts the transaction, the + // transaction was already aborted, or the transaction is not + // found. `Rollback` never returns `ABORTED`. + Rollback(context.Context, *RollbackRequest) (*google_protobuf4.Empty, error) +} + +func RegisterSpannerServer(s *grpc.Server, srv SpannerServer) { + s.RegisterService(&_Spanner_serviceDesc, srv) +} + +func _Spanner_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).CreateSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/CreateSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).CreateSession(ctx, req.(*CreateSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_GetSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).GetSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/GetSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).GetSession(ctx, req.(*GetSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_DeleteSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSessionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).DeleteSession(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/DeleteSession", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).DeleteSession(ctx, req.(*DeleteSessionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_ExecuteSql_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteSqlRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).ExecuteSql(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/ExecuteSql", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).ExecuteSql(ctx, req.(*ExecuteSqlRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_ExecuteStreamingSql_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ExecuteSqlRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpannerServer).ExecuteStreamingSql(m, &spannerExecuteStreamingSqlServer{stream}) +} + +type Spanner_ExecuteStreamingSqlServer interface { + Send(*PartialResultSet) error + grpc.ServerStream +} + +type spannerExecuteStreamingSqlServer struct { + grpc.ServerStream +} + +func (x *spannerExecuteStreamingSqlServer) Send(m *PartialResultSet) error { + return x.ServerStream.SendMsg(m) +} + +func _Spanner_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Read(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Read", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Read(ctx, req.(*ReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_StreamingRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpannerServer).StreamingRead(m, &spannerStreamingReadServer{stream}) +} + +type Spanner_StreamingReadServer interface { + Send(*PartialResultSet) error + grpc.ServerStream +} + +type spannerStreamingReadServer struct { + grpc.ServerStream +} + +func (x *spannerStreamingReadServer) Send(m *PartialResultSet) error { + return x.ServerStream.SendMsg(m) +} + +func _Spanner_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BeginTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).BeginTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/BeginTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spanner_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpannerServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.spanner.v1.Spanner/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpannerServer).Rollback(ctx, req.(*RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Spanner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.spanner.v1.Spanner", + HandlerType: (*SpannerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSession", + Handler: _Spanner_CreateSession_Handler, + }, + { + MethodName: "GetSession", + Handler: _Spanner_GetSession_Handler, + }, + { + MethodName: "DeleteSession", + Handler: _Spanner_DeleteSession_Handler, + }, + { + MethodName: "ExecuteSql", + Handler: _Spanner_ExecuteSql_Handler, + }, + { + MethodName: "Read", + Handler: _Spanner_Read_Handler, + }, + { + MethodName: "BeginTransaction", + Handler: _Spanner_BeginTransaction_Handler, + }, + { + MethodName: "Commit", + Handler: _Spanner_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Spanner_Rollback_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteStreamingSql", + Handler: _Spanner_ExecuteStreamingSql_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingRead", + Handler: _Spanner_StreamingRead_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/spanner/v1/spanner.proto", +} + +func init() { proto.RegisterFile("google/spanner/v1/spanner.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 1202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x97, 0xcd, 0x6f, 0x1b, 0xc5, + 0x1b, 0xc7, 0xbb, 0x4e, 0x6a, 0xc7, 0x8f, 0x93, 0xd6, 0x9d, 0x5f, 0xda, 0xf8, 0xe7, 0x96, 0xd6, + 0xdd, 0x52, 0x6a, 0x59, 0xc2, 0x4b, 0x0d, 0x87, 0x62, 0x40, 0xb4, 0x6e, 0xdd, 0x36, 0x6a, 0x5e, + 0x9c, 0xb5, 0xdb, 0x4a, 0x95, 0x90, 0x35, 0xb6, 0x1f, 0xcc, 0xe2, 0x7d, 0xcb, 0xce, 0x6c, 0x54, + 0xab, 0xea, 0x85, 0x2b, 0x17, 0x5e, 0x0e, 0x70, 0xe0, 0x06, 0x27, 0xc4, 0x9d, 0x1b, 0xff, 0x04, + 0xff, 0x02, 0x17, 0xfe, 0x06, 0x2e, 0x68, 0x66, 0x77, 0x9d, 0x8d, 0x77, 0x9b, 0xa4, 0x04, 0x71, + 0xca, 0xbc, 0x7c, 0x9f, 0x79, 0x3e, 0xf3, 0x9d, 0xd9, 0x79, 0x1c, 0xb8, 0x32, 0x76, 0x9c, 0xb1, + 0x89, 0x1a, 0x73, 0xa9, 0x6d, 0xa3, 0xa7, 0xed, 0xdd, 0x8c, 0x9a, 0x75, 0xd7, 0x73, 0xb8, 0x43, + 0xce, 0x05, 0x82, 0x7a, 0x34, 0xba, 0x77, 0xb3, 0x7c, 0x29, 0x8c, 0xa1, 0xae, 0xa1, 0x51, 0xdb, + 0x76, 0x38, 0xe5, 0x86, 0x63, 0xb3, 0x20, 0xa0, 0x7c, 0x3e, 0x3e, 0xeb, 0xf3, 0xcf, 0xc2, 0xe1, + 0x8b, 0xe1, 0xb0, 0xec, 0x0d, 0xfc, 0x4f, 0x35, 0xb4, 0x5c, 0x3e, 0x0d, 0x27, 0x2f, 0xcd, 0x4f, + 0x32, 0xee, 0xf9, 0x43, 0x1e, 0xce, 0x5e, 0x99, 0x9f, 0xe5, 0x86, 0x85, 0x8c, 0x53, 0xcb, 0x9d, + 0x0b, 0x8f, 0x6d, 0x62, 0x82, 0xd3, 0x08, 0xa8, 0x92, 0x9c, 0xb5, 0xfc, 0x80, 0x39, 0x54, 0xa8, + 0x49, 0x85, 0x87, 0xcc, 0x37, 0x79, 0x9f, 0x61, 0x04, 0x71, 0x2d, 0xa9, 0xe1, 0x1e, 0xb5, 0x19, + 0x1d, 0xc6, 0x16, 0x4a, 0x01, 0xe1, 0x53, 0x17, 0x83, 0x59, 0xb5, 0x01, 0xab, 0x77, 0x3d, 0xa4, + 0x1c, 0xbb, 0xc8, 0x98, 0xe1, 0xd8, 0x3a, 0xee, 0xfa, 0xc8, 0x38, 0x29, 0xc3, 0xd2, 0x88, 0x72, + 0x3a, 0xa0, 0x0c, 0x4b, 0x4a, 0x45, 0xa9, 0xe6, 0xf5, 0x59, 0x5f, 0x7d, 0x03, 0x72, 0xa1, 0x9a, + 0x10, 0x58, 0xb4, 0xa9, 0x15, 0x49, 0x64, 0x5b, 0xbd, 0x01, 0xe7, 0x1e, 0x20, 0x9f, 0x5b, 0x2f, + 0x4d, 0x58, 0x83, 0xd5, 0x7b, 0x68, 0x62, 0x22, 0x77, 0x9a, 0xf6, 0xcb, 0x45, 0x38, 0xd7, 0x7e, + 0x8e, 0x43, 0x9f, 0x63, 0x77, 0xd7, 0x8c, 0x94, 0x25, 0xc8, 0xb1, 0x20, 0x36, 0x14, 0x47, 0x5d, + 0xf2, 0x10, 0x0a, 0x31, 0x2b, 0x4a, 0x99, 0x8a, 0x52, 0x2d, 0x34, 0xde, 0xaa, 0x27, 0x2e, 0x4e, + 0xbd, 0xb7, 0xaf, 0xea, 0xa2, 0x89, 0x43, 0xee, 0x78, 0x7a, 0x3c, 0x94, 0x14, 0x61, 0x81, 0xed, + 0x9a, 0xa5, 0x05, 0xb9, 0xbe, 0x68, 0x12, 0x0d, 0xb2, 0x2e, 0xf5, 0xa8, 0xc5, 0x4a, 0x8b, 0x72, + 0xd9, 0xb5, 0x68, 0xd9, 0xe8, 0x32, 0xd4, 0xbb, 0xf2, 0xaa, 0xe8, 0xa1, 0x8c, 0x3c, 0x86, 0x82, + 0x6c, 0xf5, 0x85, 0xf1, 0xac, 0x74, 0xba, 0xb2, 0x50, 0x2d, 0x34, 0xde, 0x4b, 0x81, 0x49, 0xec, + 0xb0, 0xde, 0x11, 0x71, 0x3d, 0x11, 0xd6, 0xb6, 0xb9, 0x37, 0xd5, 0xc1, 0x9d, 0x0d, 0x90, 0xab, + 0xb0, 0x2c, 0xae, 0x84, 0x85, 0x7d, 0xee, 0x4c, 0xd0, 0x2e, 0x65, 0x2b, 0x4a, 0x75, 0x59, 0x2f, + 0x04, 0x63, 0x3d, 0x31, 0x44, 0x36, 0x01, 0x76, 0x7d, 0xf4, 0xa6, 0x7d, 0xcb, 0x19, 0x61, 0x29, + 0x57, 0x51, 0xaa, 0x67, 0x1a, 0xf5, 0x63, 0x25, 0xde, 0x11, 0x61, 0x9b, 0xce, 0x08, 0xf5, 0xfc, + 0x6e, 0xd4, 0x2c, 0x3f, 0x81, 0xb3, 0x73, 0x40, 0xc2, 0x9e, 0x09, 0x4e, 0x43, 0xfb, 0x45, 0x93, + 0xbc, 0x0d, 0xa7, 0xf7, 0xa8, 0xe9, 0x63, 0x68, 0xfa, 0x5a, 0x9a, 0xe9, 0x53, 0x17, 0xf5, 0x40, + 0xd5, 0xcc, 0xdc, 0x52, 0xd4, 0x3a, 0xe4, 0x67, 0xf9, 0x08, 0x40, 0x76, 0x6b, 0x5b, 0xdf, 0xbc, + 0xb3, 0x51, 0x3c, 0x45, 0x96, 0x60, 0xb1, 0xb3, 0x71, 0x67, 0xab, 0xa8, 0x90, 0x02, 0xe4, 0x3a, + 0xfa, 0xf6, 0xfd, 0xf5, 0x8d, 0x76, 0x31, 0xa3, 0xfe, 0x94, 0x81, 0x82, 0x8e, 0x74, 0xf4, 0x5f, + 0xde, 0x83, 0x55, 0x38, 0xcd, 0xe9, 0xc0, 0xc4, 0xf0, 0x26, 0x04, 0x1d, 0x31, 0x6a, 0xd8, 0x23, + 0x7c, 0x2e, 0xaf, 0x42, 0x5e, 0x0f, 0x3a, 0x82, 0x67, 0xe8, 0x98, 0xbe, 0x65, 0x07, 0x87, 0x9d, + 0xd7, 0xa3, 0x2e, 0x69, 0x40, 0x6e, 0x82, 0x53, 0xf1, 0x0d, 0xcb, 0xe3, 0x2a, 0x34, 0xfe, 0x9f, + 0xc2, 0xf2, 0x08, 0xa7, 0x5d, 0xe4, 0x7a, 0x76, 0x22, 0xff, 0x8a, 0x1c, 0xa6, 0x61, 0x19, 0xbc, + 0xb4, 0x54, 0x51, 0xaa, 0x0b, 0x7a, 0xd0, 0x49, 0x9c, 0x7e, 0x3e, 0x71, 0xfa, 0x2a, 0x87, 0xb5, + 0x16, 0x8e, 0x0d, 0x3b, 0xb6, 0xb7, 0xa3, 0x1d, 0xfb, 0x18, 0x72, 0x8e, 0x2b, 0x1f, 0xcf, 0xd0, + 0xad, 0xeb, 0x87, 0xbb, 0xb5, 0x1d, 0x88, 0xf5, 0x28, 0x4a, 0xfd, 0x4b, 0x81, 0x95, 0xbb, 0x8e, + 0x65, 0x19, 0xfc, 0xe8, 0x64, 0x37, 0xe0, 0x4c, 0xcc, 0xe3, 0xbe, 0x31, 0x92, 0x39, 0x97, 0x1f, + 0x9e, 0xd2, 0x57, 0x62, 0xe3, 0xeb, 0x23, 0xf2, 0x09, 0x5c, 0x60, 0x86, 0x3d, 0x36, 0xb1, 0xef, + 0x33, 0xec, 0xc7, 0x8f, 0x74, 0xe1, 0x35, 0x20, 0x1f, 0x9e, 0xd2, 0x57, 0x83, 0x65, 0x1e, 0x33, + 0x8c, 0x4d, 0x93, 0xf7, 0x21, 0x1f, 0xbd, 0xbf, 0xe2, 0xab, 0x16, 0xdf, 0xe7, 0xc5, 0x94, 0x15, + 0x37, 0x43, 0x8d, 0xbe, 0xaf, 0x6e, 0xad, 0x1c, 0xb8, 0x61, 0xea, 0x53, 0x38, 0x13, 0x6d, 0x9e, + 0xb9, 0x8e, 0xcd, 0x90, 0xb4, 0xa1, 0x38, 0x94, 0x23, 0xfd, 0x59, 0x8d, 0x90, 0x36, 0x14, 0x1a, + 0xe5, 0xc4, 0xc3, 0xd1, 0x8b, 0x14, 0xfa, 0xd9, 0x20, 0x66, 0x36, 0xa0, 0xea, 0x70, 0x56, 0x77, + 0x4c, 0x73, 0x40, 0x87, 0x93, 0xa3, 0x7d, 0xbd, 0x9e, 0xee, 0xeb, 0x9c, 0xab, 0x8d, 0x3f, 0x97, + 0x21, 0xd7, 0x0d, 0xb6, 0x47, 0xbe, 0x17, 0xc7, 0x16, 0x2f, 0x05, 0xe4, 0x46, 0x8a, 0x03, 0x69, + 0xc5, 0xa2, 0x5c, 0x4e, 0x11, 0x86, 0x12, 0xb5, 0xf5, 0xc5, 0xef, 0x7f, 0x7c, 0x9b, 0xf9, 0x50, + 0x6d, 0x8a, 0xc2, 0xf3, 0x22, 0xaa, 0x21, 0x1f, 0xb9, 0x9e, 0xf3, 0x39, 0x0e, 0x39, 0xd3, 0x6a, + 0x9a, 0x61, 0x33, 0x4e, 0xed, 0x21, 0x8a, 0x76, 0x34, 0xcf, 0xb4, 0xda, 0x4b, 0x2d, 0xdc, 0x0c, + 0x23, 0x5f, 0x29, 0x00, 0xfb, 0x25, 0x85, 0xbc, 0x99, 0x92, 0x2e, 0x51, 0x71, 0x0e, 0x85, 0xba, + 0x2d, 0xa1, 0x9a, 0xe4, 0x96, 0x84, 0x12, 0x05, 0xe6, 0x18, 0x40, 0x33, 0x1e, 0xad, 0xf6, 0x92, + 0x7c, 0xa3, 0xc0, 0xca, 0x81, 0xe2, 0x95, 0xea, 0x56, 0x5a, 0x79, 0x2b, 0x5f, 0x48, 0x9c, 0x7a, + 0x5b, 0xfc, 0xec, 0x88, 0xa0, 0x6a, 0xff, 0x1c, 0xea, 0x47, 0x05, 0x60, 0xff, 0x25, 0x4f, 0xf5, + 0x29, 0xf1, 0xd0, 0x97, 0x2f, 0xa5, 0xa8, 0x74, 0xf9, 0x4b, 0xa3, 0x8b, 0x5c, 0xdd, 0x91, 0x50, + 0x8f, 0xd4, 0xfb, 0x12, 0x2a, 0x4c, 0xf6, 0x9a, 0x5c, 0x4d, 0x9c, 0x25, 0x6d, 0x2a, 0x35, 0xf2, + 0x9b, 0x02, 0xff, 0x8b, 0x30, 0xb8, 0x87, 0xd4, 0x32, 0xec, 0xf1, 0xf1, 0x71, 0xaf, 0xa5, 0xa8, + 0x3a, 0xd4, 0xe3, 0x06, 0x35, 0xf7, 0xa9, 0x9f, 0x49, 0xea, 0x9e, 0xba, 0xfd, 0x6f, 0x50, 0xc7, + 0x18, 0x9b, 0x4a, 0xed, 0x1d, 0x85, 0x7c, 0xad, 0xc0, 0xa2, 0xa8, 0x3e, 0xe4, 0x72, 0xaa, 0x75, + 0xb3, 0xb2, 0x74, 0x84, 0xb5, 0x8f, 0x24, 0x64, 0x5b, 0xbd, 0x7d, 0x12, 0x48, 0x0f, 0xe9, 0x48, + 0x98, 0xfa, 0x8b, 0x02, 0x2b, 0x33, 0xd2, 0x63, 0xc1, 0x1d, 0xcb, 0xc8, 0x9e, 0x64, 0xdc, 0x52, + 0xd7, 0x4f, 0xc2, 0xc8, 0xe2, 0x5c, 0x81, 0x85, 0xbf, 0x2a, 0x50, 0x9c, 0x2f, 0x4d, 0xa4, 0x96, + 0x42, 0xf4, 0x8a, 0xfa, 0x55, 0xbe, 0x7c, 0xf8, 0x7b, 0xaf, 0x3e, 0x95, 0xe0, 0x3b, 0xea, 0xc6, + 0x49, 0xc0, 0x07, 0x73, 0xc9, 0x85, 0xd1, 0x3f, 0x28, 0x90, 0x0d, 0x1e, 0x78, 0x52, 0x49, 0x7b, + 0x1f, 0xe3, 0x85, 0xaf, 0x7c, 0xf5, 0x10, 0x45, 0x50, 0x1d, 0xd4, 0x4d, 0x09, 0xfa, 0x40, 0x6d, + 0x9d, 0x04, 0x34, 0xa8, 0x15, 0x02, 0xef, 0x3b, 0x05, 0x96, 0xa2, 0x32, 0x41, 0xd4, 0xb4, 0x2b, + 0x70, 0xb0, 0x86, 0xbc, 0xf2, 0x35, 0xda, 0x96, 0x5c, 0xeb, 0xea, 0xbd, 0x13, 0xdd, 0xce, 0x30, + 0x59, 0x53, 0xa9, 0xb5, 0x5e, 0xc0, 0xf9, 0xa1, 0x63, 0x25, 0x89, 0x5a, 0xcb, 0x61, 0x05, 0xea, + 0x08, 0x80, 0x8e, 0xf2, 0xec, 0x56, 0x28, 0x19, 0x3b, 0x26, 0xb5, 0xc7, 0x75, 0xc7, 0x1b, 0x6b, + 0x63, 0xb4, 0x25, 0x9e, 0x16, 0x4c, 0x51, 0xd7, 0x60, 0xb1, 0xff, 0x67, 0x3e, 0x08, 0x9b, 0x3f, + 0x67, 0xd6, 0x1e, 0x04, 0xa1, 0x77, 0x4d, 0xc7, 0x1f, 0xd5, 0xc3, 0x75, 0xeb, 0x4f, 0x6e, 0x0e, + 0xb2, 0x32, 0xfc, 0xdd, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x21, 0x25, 0x65, 0x5b, 0x0e, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go new file mode 100644 index 000000000..7198219bd --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go @@ -0,0 +1,830 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/transaction.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// # Transactions +// +// +// Each session can have at most one active transaction at a time. After the +// active transaction is completed, the session can immediately be +// re-used for the next transaction. It is not necessary to create a +// new session for each transaction. +// +// # Transaction Modes +// +// Cloud Spanner supports two transaction modes: +// +// 1. Locking read-write. This type of transaction is the only way +// to write data into Cloud Spanner. These transactions rely on +// pessimistic locking and, if necessary, two-phase commit. +// Locking read-write transactions may abort, requiring the +// application to retry. +// +// 2. Snapshot read-only. This transaction type provides guaranteed +// consistency across several reads, but does not allow +// writes. Snapshot read-only transactions can be configured to +// read at timestamps in the past. Snapshot read-only +// transactions do not need to be committed. +// +// For transactions that only read, snapshot read-only transactions +// provide simpler semantics and are almost always faster. In +// particular, read-only transactions do not take locks, so they do +// not conflict with read-write transactions. As a consequence of not +// taking locks, they also do not abort, so retry loops are not needed. +// +// Transactions may only read/write data in a single database. They +// may, however, read/write data in different tables within that +// database. +// +// ## Locking Read-Write Transactions +// +// Locking transactions may be used to atomically read-modify-write +// data anywhere in a database. This type of transaction is externally +// consistent. +// +// Clients should attempt to minimize the amount of time a transaction +// is active. Faster transactions commit with higher probability +// and cause less contention. Cloud Spanner attempts to keep read locks +// active as long as the transaction continues to do reads, and the +// transaction has not been terminated by +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of +// inactivity at the client may cause Cloud Spanner to release a +// transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data +// being read. Writes can only be done at commit time, after all reads +// have been completed. +// Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by +// [Commit][google.spanner.v1.Spanner.Commit]. At any time before +// [Commit][google.spanner.v1.Spanner.Commit], the client can send a +// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the +// transaction. +// +// ### Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired +// are still valid at commit time, and it is able to acquire write +// locks for all writes. Cloud Spanner can abort the transaction for any +// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees +// that the transaction has not modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about +// how long the transaction's locks were held for. It is an error to +// use Cloud Spanner locks for any sort of mutual exclusion other than +// between Cloud Spanner transactions themselves. +// +// ### Retrying Aborted Transactions +// +// When a transaction aborts, the application can choose to retry the +// whole transaction again. To maximize the chances of successfully +// committing the retry, the client should execute the retry in the +// same session as the original attempt. The original session's lock +// priority increases with each consecutive abort, meaning that each +// attempt has a slightly better chance of success than the previous. +// +// Under some circumstances (e.g., many transactions attempting to +// modify the same row(s)), a transaction can abort many times in a +// short period before successfully committing. Thus, it is not a good +// idea to cap the number of retries a transaction can attempt; +// instead, it is better to limit the total amount of wall time spent +// retrying. +// +// ### Idle Transactions +// +// A transaction is considered idle if it has no outstanding reads or +// SQL queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they +// don't hold on to locks indefinitely. In that case, the commit will +// fail with error `ABORTED`. +// +// If this behavior is undesirable, periodically executing a simple +// SQL query in the transaction (e.g., `SELECT 1`) prevents the +// transaction from becoming idle. +// +// ## Snapshot Read-Only Transactions +// +// Snapshot read-only transactions provides a simpler method than +// locking read-write transactions for doing several consistent +// reads. However, this type of transaction does not support writes. +// +// Snapshot transactions do not take locks. Instead, they work by +// choosing a Cloud Spanner timestamp, then executing all reads at that +// timestamp. Since they do not acquire locks, they do not block +// concurrent read-write transactions. +// +// Unlike locking read-write transactions, snapshot read-only +// transactions never abort. They can fail if the chosen read +// timestamp is garbage collected; however, the default garbage +// collection policy is generous enough that most applications do not +// need to worry about this in practice. +// +// Snapshot read-only transactions do not need to call +// [Commit][google.spanner.v1.Spanner.Commit] or +// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not +// permitted to do so). +// +// To execute a snapshot transaction, the client specifies a timestamp +// bound, which tells Cloud Spanner how to choose a read timestamp. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, +// stale read-only transactions can execute more quickly than strong +// or read-write transaction, because they are able to execute far +// from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. +// +// ### Strong +// +// Strong reads are guaranteed to see the effects of all transactions +// that have committed before the start of the read. Furthermore, all +// rows yielded by a single read are consistent with each other -- if +// any part of the read observes a transaction, all parts of the read +// see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are +// concurrent writes. If consistency across reads is required, the +// reads should be executed within a transaction or at an exact read +// timestamp. +// +// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. +// +// ### Exact Staleness +// +// These timestamp bounds execute reads at a user-specified +// timestamp. Reads at a timestamp are guaranteed to see a consistent +// prefix of the global transaction history: they observe +// modifications done by all transactions with a commit timestamp <= +// the read timestamp, and observe none of the modifications done by +// transactions with a larger commit timestamp. They will block until +// all conflicting transactions that may be assigned commit timestamps +// <= the read timestamp have finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a +// timestamp. As a result, they execute slightly faster than the +// equivalent boundedly stale concurrency modes. On the other hand, +// boundedly stale reads usually return fresher results. +// +// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and +// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. +// +// ### Bounded Staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, +// subject to a user-provided staleness bound. Cloud Spanner chooses the +// newest timestamp within the staleness bound that allows execution +// of the reads at the closest available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of +// the read observes a transaction, all parts of the read see the +// transaction. Boundedly stale reads are not repeatable: two stale +// reads, even if they use the same staleness bound, can execute at +// different timestamps and thus return inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase +// negotiates a timestamp among all replicas needed to serve the +// read. In the second phase, reads are executed at the negotiated +// timestamp. +// +// As a result of the two phase execution, bounded staleness reads are +// usually a little slower than comparable exact staleness +// reads. However, they are typically able to return fresher +// results, and are more likely to execute at the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of +// which rows will be read, it can only be used with single-use +// read-only transactions. +// +// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and +// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. +// +// ### Old Read Timestamps and Garbage Collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data +// in the background to reclaim storage space. This process is known +// as "version GC". By default, version GC reclaims versions after they +// are one hour old. Because of this, Cloud Spanner cannot perform reads +// at read timestamps more than one hour in the past. This +// restriction also applies to in-progress reads and/or SQL queries whose +// timestamp become too old while executing. Reads and SQL queries with +// too-old read timestamps fail with the error `FAILED_PRECONDITION`. +type TransactionOptions struct { + // Required. The type of transaction. + // + // Types that are valid to be assigned to Mode: + // *TransactionOptions_ReadWrite_ + // *TransactionOptions_ReadOnly_ + Mode isTransactionOptions_Mode `protobuf_oneof:"mode"` +} + +func (m *TransactionOptions) Reset() { *m = TransactionOptions{} } +func (m *TransactionOptions) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions) ProtoMessage() {} +func (*TransactionOptions) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +type isTransactionOptions_Mode interface { + isTransactionOptions_Mode() +} + +type TransactionOptions_ReadWrite_ struct { + ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,oneof"` +} +type TransactionOptions_ReadOnly_ struct { + ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,oneof"` +} + +func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {} +func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {} + +func (m *TransactionOptions) GetMode() isTransactionOptions_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite { + if x, ok := m.GetMode().(*TransactionOptions_ReadWrite_); ok { + return x.ReadWrite + } + return nil +} + +func (m *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly { + if x, ok := m.GetMode().(*TransactionOptions_ReadOnly_); ok { + return x.ReadOnly + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_OneofMarshaler, _TransactionOptions_OneofUnmarshaler, _TransactionOptions_OneofSizer, []interface{}{ + (*TransactionOptions_ReadWrite_)(nil), + (*TransactionOptions_ReadOnly_)(nil), + } +} + +func _TransactionOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadWrite); err != nil { + return err + } + case *TransactionOptions_ReadOnly_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadOnly); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions.Mode has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions) + switch tag { + case 1: // mode.read_write + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadWrite) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadWrite_{msg} + return true, err + case 2: // mode.read_only + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions_ReadOnly) + err := b.DecodeMessage(msg) + m.Mode = &TransactionOptions_ReadOnly_{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions) + // mode + switch x := m.Mode.(type) { + case *TransactionOptions_ReadWrite_: + s := proto.Size(x.ReadWrite) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_: + s := proto.Size(x.ReadOnly) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Options for read-write transactions. +type TransactionOptions_ReadWrite struct { +} + +func (m *TransactionOptions_ReadWrite) Reset() { *m = TransactionOptions_ReadWrite{} } +func (m *TransactionOptions_ReadWrite) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadWrite) ProtoMessage() {} +func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 0} } + +// Options for read-only transactions. +type TransactionOptions_ReadOnly struct { + // How to choose the timestamp for the read-only transaction. + // + // Types that are valid to be assigned to TimestampBound: + // *TransactionOptions_ReadOnly_Strong + // *TransactionOptions_ReadOnly_MinReadTimestamp + // *TransactionOptions_ReadOnly_MaxStaleness + // *TransactionOptions_ReadOnly_ReadTimestamp + // *TransactionOptions_ReadOnly_ExactStaleness + TimestampBound isTransactionOptions_ReadOnly_TimestampBound `protobuf_oneof:"timestamp_bound"` + // If true, the Cloud Spanner-selected read timestamp is included in + // the [Transaction][google.spanner.v1.Transaction] message that describes the transaction. + ReturnReadTimestamp bool `protobuf:"varint,6,opt,name=return_read_timestamp,json=returnReadTimestamp" json:"return_read_timestamp,omitempty"` +} + +func (m *TransactionOptions_ReadOnly) Reset() { *m = TransactionOptions_ReadOnly{} } +func (m *TransactionOptions_ReadOnly) String() string { return proto.CompactTextString(m) } +func (*TransactionOptions_ReadOnly) ProtoMessage() {} +func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 1} } + +type isTransactionOptions_ReadOnly_TimestampBound interface { + isTransactionOptions_ReadOnly_TimestampBound() +} + +type TransactionOptions_ReadOnly_Strong struct { + Strong bool `protobuf:"varint,1,opt,name=strong,oneof"` +} +type TransactionOptions_ReadOnly_MinReadTimestamp struct { + MinReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=min_read_timestamp,json=minReadTimestamp,oneof"` +} +type TransactionOptions_ReadOnly_MaxStaleness struct { + MaxStaleness *google_protobuf2.Duration `protobuf:"bytes,3,opt,name=max_staleness,json=maxStaleness,oneof"` +} +type TransactionOptions_ReadOnly_ReadTimestamp struct { + ReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=read_timestamp,json=readTimestamp,oneof"` +} +type TransactionOptions_ReadOnly_ExactStaleness struct { + ExactStaleness *google_protobuf2.Duration `protobuf:"bytes,5,opt,name=exact_staleness,json=exactStaleness,oneof"` +} + +func (*TransactionOptions_ReadOnly_Strong) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_MinReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_MaxStaleness) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_ReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {} +func (*TransactionOptions_ReadOnly_ExactStaleness) isTransactionOptions_ReadOnly_TimestampBound() {} + +func (m *TransactionOptions_ReadOnly) GetTimestampBound() isTransactionOptions_ReadOnly_TimestampBound { + if m != nil { + return m.TimestampBound + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetStrong() bool { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_Strong); ok { + return x.Strong + } + return false +} + +func (m *TransactionOptions_ReadOnly) GetMinReadTimestamp() *google_protobuf3.Timestamp { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_MinReadTimestamp); ok { + return x.MinReadTimestamp + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetMaxStaleness() *google_protobuf2.Duration { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_MaxStaleness); ok { + return x.MaxStaleness + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetReadTimestamp() *google_protobuf3.Timestamp { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_ReadTimestamp); ok { + return x.ReadTimestamp + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetExactStaleness() *google_protobuf2.Duration { + if x, ok := m.GetTimestampBound().(*TransactionOptions_ReadOnly_ExactStaleness); ok { + return x.ExactStaleness + } + return nil +} + +func (m *TransactionOptions_ReadOnly) GetReturnReadTimestamp() bool { + if m != nil { + return m.ReturnReadTimestamp + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionOptions_ReadOnly) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionOptions_ReadOnly_OneofMarshaler, _TransactionOptions_ReadOnly_OneofUnmarshaler, _TransactionOptions_ReadOnly_OneofSizer, []interface{}{ + (*TransactionOptions_ReadOnly_Strong)(nil), + (*TransactionOptions_ReadOnly_MinReadTimestamp)(nil), + (*TransactionOptions_ReadOnly_MaxStaleness)(nil), + (*TransactionOptions_ReadOnly_ReadTimestamp)(nil), + (*TransactionOptions_ReadOnly_ExactStaleness)(nil), + } +} + +func _TransactionOptions_ReadOnly_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionOptions_ReadOnly) + // timestamp_bound + switch x := m.TimestampBound.(type) { + case *TransactionOptions_ReadOnly_Strong: + t := uint64(0) + if x.Strong { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TransactionOptions_ReadOnly_MinReadTimestamp: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MinReadTimestamp); err != nil { + return err + } + case *TransactionOptions_ReadOnly_MaxStaleness: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MaxStaleness); err != nil { + return err + } + case *TransactionOptions_ReadOnly_ReadTimestamp: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadTimestamp); err != nil { + return err + } + case *TransactionOptions_ReadOnly_ExactStaleness: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExactStaleness); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionOptions_ReadOnly.TimestampBound has unexpected type %T", x) + } + return nil +} + +func _TransactionOptions_ReadOnly_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionOptions_ReadOnly) + switch tag { + case 1: // timestamp_bound.strong + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TimestampBound = &TransactionOptions_ReadOnly_Strong{x != 0} + return true, err + case 2: // timestamp_bound.min_read_timestamp + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Timestamp) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_MinReadTimestamp{msg} + return true, err + case 3: // timestamp_bound.max_staleness + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_MaxStaleness{msg} + return true, err + case 4: // timestamp_bound.read_timestamp + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Timestamp) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_ReadTimestamp{msg} + return true, err + case 5: // timestamp_bound.exact_staleness + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.TimestampBound = &TransactionOptions_ReadOnly_ExactStaleness{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionOptions_ReadOnly_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionOptions_ReadOnly) + // timestamp_bound + switch x := m.TimestampBound.(type) { + case *TransactionOptions_ReadOnly_Strong: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *TransactionOptions_ReadOnly_MinReadTimestamp: + s := proto.Size(x.MinReadTimestamp) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_MaxStaleness: + s := proto.Size(x.MaxStaleness) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_ReadTimestamp: + s := proto.Size(x.ReadTimestamp) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionOptions_ReadOnly_ExactStaleness: + s := proto.Size(x.ExactStaleness) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A transaction. +type Transaction struct { + // `id` may be used to identify the transaction in subsequent + // [Read][google.spanner.v1.Spanner.Read], + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], + // [Commit][google.spanner.v1.Spanner.Commit], or + // [Rollback][google.spanner.v1.Spanner.Rollback] calls. + // + // Single-use read-only transactions do not have IDs, because + // single-use transactions do not support multiple requests. + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // For snapshot read-only transactions, the read timestamp chosen + // for the transaction. Not returned by default: see + // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp]. + ReadTimestamp *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=read_timestamp,json=readTimestamp" json:"read_timestamp,omitempty"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} +func (*Transaction) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } + +func (m *Transaction) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +func (m *Transaction) GetReadTimestamp() *google_protobuf3.Timestamp { + if m != nil { + return m.ReadTimestamp + } + return nil +} + +// This message is used to select the transaction in which a +// [Read][google.spanner.v1.Spanner.Read] or +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. +// +// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions. +type TransactionSelector struct { + // If no fields are set, the default is a single use transaction + // with strong concurrency. + // + // Types that are valid to be assigned to Selector: + // *TransactionSelector_SingleUse + // *TransactionSelector_Id + // *TransactionSelector_Begin + Selector isTransactionSelector_Selector `protobuf_oneof:"selector"` +} + +func (m *TransactionSelector) Reset() { *m = TransactionSelector{} } +func (m *TransactionSelector) String() string { return proto.CompactTextString(m) } +func (*TransactionSelector) ProtoMessage() {} +func (*TransactionSelector) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } + +type isTransactionSelector_Selector interface { + isTransactionSelector_Selector() +} + +type TransactionSelector_SingleUse struct { + SingleUse *TransactionOptions `protobuf:"bytes,1,opt,name=single_use,json=singleUse,oneof"` +} +type TransactionSelector_Id struct { + Id []byte `protobuf:"bytes,2,opt,name=id,proto3,oneof"` +} +type TransactionSelector_Begin struct { + Begin *TransactionOptions `protobuf:"bytes,3,opt,name=begin,oneof"` +} + +func (*TransactionSelector_SingleUse) isTransactionSelector_Selector() {} +func (*TransactionSelector_Id) isTransactionSelector_Selector() {} +func (*TransactionSelector_Begin) isTransactionSelector_Selector() {} + +func (m *TransactionSelector) GetSelector() isTransactionSelector_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *TransactionSelector) GetSingleUse() *TransactionOptions { + if x, ok := m.GetSelector().(*TransactionSelector_SingleUse); ok { + return x.SingleUse + } + return nil +} + +func (m *TransactionSelector) GetId() []byte { + if x, ok := m.GetSelector().(*TransactionSelector_Id); ok { + return x.Id + } + return nil +} + +func (m *TransactionSelector) GetBegin() *TransactionOptions { + if x, ok := m.GetSelector().(*TransactionSelector_Begin); ok { + return x.Begin + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransactionSelector) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransactionSelector_OneofMarshaler, _TransactionSelector_OneofUnmarshaler, _TransactionSelector_OneofSizer, []interface{}{ + (*TransactionSelector_SingleUse)(nil), + (*TransactionSelector_Id)(nil), + (*TransactionSelector_Begin)(nil), + } +} + +func _TransactionSelector_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransactionSelector) + // selector + switch x := m.Selector.(type) { + case *TransactionSelector_SingleUse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SingleUse); err != nil { + return err + } + case *TransactionSelector_Id: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Id) + case *TransactionSelector_Begin: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Begin); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransactionSelector.Selector has unexpected type %T", x) + } + return nil +} + +func _TransactionSelector_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransactionSelector) + switch tag { + case 1: // selector.single_use + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Selector = &TransactionSelector_SingleUse{msg} + return true, err + case 2: // selector.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Selector = &TransactionSelector_Id{x} + return true, err + case 3: // selector.begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransactionOptions) + err := b.DecodeMessage(msg) + m.Selector = &TransactionSelector_Begin{msg} + return true, err + default: + return false, nil + } +} + +func _TransactionSelector_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransactionSelector) + // selector + switch x := m.Selector.(type) { + case *TransactionSelector_SingleUse: + s := proto.Size(x.SingleUse) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransactionSelector_Id: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Id))) + n += len(x.Id) + case *TransactionSelector_Begin: + s := proto.Size(x.Begin) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*TransactionOptions)(nil), "google.spanner.v1.TransactionOptions") + proto.RegisterType((*TransactionOptions_ReadWrite)(nil), "google.spanner.v1.TransactionOptions.ReadWrite") + proto.RegisterType((*TransactionOptions_ReadOnly)(nil), "google.spanner.v1.TransactionOptions.ReadOnly") + proto.RegisterType((*Transaction)(nil), "google.spanner.v1.Transaction") + proto.RegisterType((*TransactionSelector)(nil), "google.spanner.v1.TransactionSelector") +} + +func init() { proto.RegisterFile("google/spanner/v1/transaction.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xd1, 0x6e, 0xd3, 0x30, + 0x14, 0x86, 0xd3, 0x6e, 0xab, 0xba, 0xd3, 0xae, 0xeb, 0x3c, 0x4d, 0x94, 0x08, 0x01, 0x2a, 0x42, + 0xe2, 0xca, 0x51, 0xc7, 0x0d, 0x12, 0x42, 0x82, 0x6e, 0x82, 0x08, 0x09, 0xad, 0x4a, 0x07, 0x48, + 0xdc, 0x04, 0xb7, 0x31, 0x91, 0xa5, 0xc4, 0x8e, 0x6c, 0x67, 0x74, 0x57, 0xdc, 0xf0, 0x34, 0x3c, + 0x02, 0x6f, 0xc1, 0x1b, 0xa1, 0x38, 0x4e, 0x9b, 0x35, 0x17, 0xeb, 0x5d, 0xdc, 0xf3, 0xff, 0xff, + 0xf9, 0x7c, 0x8e, 0x55, 0x78, 0x16, 0x0b, 0x11, 0x27, 0xd4, 0x53, 0x19, 0xe1, 0x9c, 0x4a, 0xef, + 0x66, 0xe2, 0x69, 0x49, 0xb8, 0x22, 0x4b, 0xcd, 0x04, 0xc7, 0x99, 0x14, 0x5a, 0xa0, 0x93, 0x52, + 0x84, 0xad, 0x08, 0xdf, 0x4c, 0xdc, 0x47, 0xd6, 0x47, 0x32, 0xe6, 0x11, 0xce, 0x85, 0x26, 0x85, + 0x5e, 0x95, 0x06, 0xf7, 0xb1, 0xad, 0x9a, 0xd3, 0x22, 0xff, 0xe1, 0x45, 0xb9, 0x24, 0x9b, 0x40, + 0xf7, 0xc9, 0x76, 0x5d, 0xb3, 0x94, 0x2a, 0x4d, 0xd2, 0xac, 0x14, 0x8c, 0xff, 0xed, 0x03, 0xba, + 0xde, 0x70, 0x5c, 0x65, 0x26, 0x1d, 0xcd, 0x00, 0x24, 0x25, 0x51, 0xf8, 0x53, 0x32, 0x4d, 0x47, + 0xad, 0xa7, 0xad, 0x17, 0xbd, 0x73, 0x0f, 0x37, 0xe8, 0x70, 0xd3, 0x8a, 0x03, 0x4a, 0xa2, 0xaf, + 0x85, 0xcd, 0x77, 0x82, 0x43, 0x59, 0x1d, 0xd0, 0x27, 0x30, 0x87, 0x50, 0xf0, 0xe4, 0x76, 0xd4, + 0x36, 0x81, 0x78, 0xf7, 0xc0, 0x2b, 0x9e, 0xdc, 0xfa, 0x4e, 0xd0, 0x95, 0xf6, 0xdb, 0xed, 0xc1, + 0xe1, 0xba, 0x91, 0xfb, 0x7b, 0x0f, 0xba, 0x95, 0x0a, 0x8d, 0xa0, 0xa3, 0xb4, 0x14, 0x3c, 0x36, + 0xd8, 0x5d, 0xdf, 0x09, 0xec, 0x19, 0x7d, 0x04, 0x94, 0x32, 0x1e, 0x1a, 0x8c, 0xf5, 0x1c, 0x2c, + 0x8b, 0x5b, 0xb1, 0x54, 0x93, 0xc2, 0xd7, 0x95, 0xc2, 0x77, 0x82, 0x61, 0xca, 0x78, 0xd1, 0x60, + 0xfd, 0x1b, 0x7a, 0x0b, 0x47, 0x29, 0x59, 0x85, 0x4a, 0x93, 0x84, 0x72, 0xaa, 0xd4, 0x68, 0xcf, + 0xc4, 0x3c, 0x6c, 0xc4, 0x5c, 0xda, 0x85, 0xf8, 0x4e, 0xd0, 0x4f, 0xc9, 0x6a, 0x5e, 0x19, 0xd0, + 0x05, 0x0c, 0xb6, 0x48, 0xf6, 0x77, 0x20, 0x39, 0x92, 0x77, 0x30, 0x2e, 0xe1, 0x98, 0xae, 0xc8, + 0x52, 0xd7, 0x40, 0x0e, 0xee, 0x07, 0x19, 0x18, 0xcf, 0x06, 0xe5, 0x1c, 0xce, 0x24, 0xd5, 0xb9, + 0x6c, 0xcc, 0xa6, 0x53, 0x4c, 0x30, 0x38, 0x2d, 0x8b, 0x77, 0x06, 0x30, 0x3d, 0x81, 0xe3, 0xb5, + 0x2e, 0x5c, 0x88, 0x9c, 0x47, 0xd3, 0x0e, 0xec, 0xa7, 0x22, 0xa2, 0xe3, 0xef, 0xd0, 0xab, 0xad, + 0x11, 0x0d, 0xa0, 0xcd, 0x22, 0xb3, 0x8c, 0x7e, 0xd0, 0x66, 0x11, 0x7a, 0xd7, 0xb8, 0xf8, 0xbd, + 0x2b, 0xd8, 0xba, 0xf6, 0xf8, 0x6f, 0x0b, 0x4e, 0x6b, 0x2d, 0xe6, 0x34, 0xa1, 0x4b, 0x2d, 0x24, + 0x7a, 0x0f, 0xa0, 0x18, 0x8f, 0x13, 0x1a, 0xe6, 0xaa, 0x7a, 0xb6, 0xcf, 0x77, 0x7a, 0x65, 0xc5, + 0x63, 0x2d, 0xad, 0x9f, 0x15, 0x45, 0x43, 0x83, 0x5c, 0x60, 0xf5, 0x7d, 0xc7, 0x40, 0xbf, 0x81, + 0x83, 0x05, 0x8d, 0x19, 0xb7, 0x7b, 0xde, 0x39, 0xb4, 0x74, 0x4d, 0x01, 0xba, 0xca, 0x42, 0x4e, + 0x7f, 0xc1, 0xd9, 0x52, 0xa4, 0xcd, 0x80, 0xe9, 0xb0, 0x96, 0x30, 0x2b, 0x66, 0x30, 0x6b, 0x7d, + 0x7b, 0x65, 0x65, 0xb1, 0x48, 0x08, 0x8f, 0xb1, 0x90, 0xb1, 0x17, 0x53, 0x6e, 0x26, 0xe4, 0x95, + 0x25, 0x92, 0x31, 0x55, 0xfb, 0x57, 0x79, 0x6d, 0x3f, 0xff, 0xb4, 0x1f, 0x7c, 0x28, 0xad, 0x17, + 0x89, 0xc8, 0x23, 0x3c, 0xb7, 0x7d, 0xbe, 0x4c, 0x16, 0x1d, 0x63, 0x7f, 0xf9, 0x3f, 0x00, 0x00, + 0xff, 0xff, 0xeb, 0x34, 0x76, 0xbe, 0x93, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go new file mode 100644 index 000000000..dd71100e5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go @@ -0,0 +1,216 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/spanner/v1/type.proto + +package spanner + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to +// indicate the type of a Cloud Spanner value. +// +// Each legal value of a type can be encoded to or decoded from a JSON +// value, using the encodings described below. All Cloud Spanner values can +// be `null`, regardless of type; `null`s are always encoded as a JSON +// `null`. +type TypeCode int32 + +const ( + // Not specified. + TypeCode_TYPE_CODE_UNSPECIFIED TypeCode = 0 + // Encoded as JSON `true` or `false`. + TypeCode_BOOL TypeCode = 1 + // Encoded as `string`, in decimal format. + TypeCode_INT64 TypeCode = 2 + // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or + // `"-Infinity"`. + TypeCode_FLOAT64 TypeCode = 3 + // Encoded as `string` in RFC 3339 timestamp format. The time zone + // must be present, and must be `"Z"`. + TypeCode_TIMESTAMP TypeCode = 4 + // Encoded as `string` in RFC 3339 date format. + TypeCode_DATE TypeCode = 5 + // Encoded as `string`. + TypeCode_STRING TypeCode = 6 + // Encoded as a base64-encoded `string`, as described in RFC 4648, + // section 4. + TypeCode_BYTES TypeCode = 7 + // Encoded as `list`, where the list elements are represented + // according to [array_element_type][google.spanner.v1.Type.array_element_type]. + TypeCode_ARRAY TypeCode = 8 + // Encoded as `list`, where list element `i` is represented according + // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + TypeCode_STRUCT TypeCode = 9 +) + +var TypeCode_name = map[int32]string{ + 0: "TYPE_CODE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "FLOAT64", + 4: "TIMESTAMP", + 5: "DATE", + 6: "STRING", + 7: "BYTES", + 8: "ARRAY", + 9: "STRUCT", +} +var TypeCode_value = map[string]int32{ + "TYPE_CODE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "FLOAT64": 3, + "TIMESTAMP": 4, + "DATE": 5, + "STRING": 6, + "BYTES": 7, + "ARRAY": 8, + "STRUCT": 9, +} + +func (x TypeCode) String() string { + return proto.EnumName(TypeCode_name, int32(x)) +} +func (TypeCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +// `Type` indicates the type of a Cloud Spanner value, as might be stored in a +// table cell or returned from an SQL query. +type Type struct { + // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type. + Code TypeCode `protobuf:"varint,1,opt,name=code,enum=google.spanner.v1.TypeCode" json:"code,omitempty"` + // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` + // is the type of the array elements. + ArrayElementType *Type `protobuf:"bytes,2,opt,name=array_element_type,json=arrayElementType" json:"array_element_type,omitempty"` + // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` + // provides type information for the struct's fields. + StructType *StructType `protobuf:"bytes,3,opt,name=struct_type,json=structType" json:"struct_type,omitempty"` +} + +func (m *Type) Reset() { *m = Type{} } +func (m *Type) String() string { return proto.CompactTextString(m) } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *Type) GetCode() TypeCode { + if m != nil { + return m.Code + } + return TypeCode_TYPE_CODE_UNSPECIFIED +} + +func (m *Type) GetArrayElementType() *Type { + if m != nil { + return m.ArrayElementType + } + return nil +} + +func (m *Type) GetStructType() *StructType { + if m != nil { + return m.StructType + } + return nil +} + +// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. +type StructType struct { + // The list of fields that make up this struct. Order is + // significant, because values of this struct type are represented as + // lists, where the order of field values matches the order of + // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields + // matches the order of columns in a read request, or the order of + // fields in the `SELECT` clause of a query. + Fields []*StructType_Field `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` +} + +func (m *StructType) Reset() { *m = StructType{} } +func (m *StructType) String() string { return proto.CompactTextString(m) } +func (*StructType) ProtoMessage() {} +func (*StructType) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +func (m *StructType) GetFields() []*StructType_Field { + if m != nil { + return m.Fields + } + return nil +} + +// Message representing a single field of a struct. +type StructType_Field struct { + // The name of the field. For reads, this is the column name. For + // SQL queries, it is the column alias (e.g., `"Word"` in the + // query `"SELECT 'hello' AS Word"`), or the column name (e.g., + // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some + // columns might have an empty name (e.g., !"SELECT + // UPPER(ColName)"`). Note that a query result can contain + // multiple fields with the same name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The type of the field. + Type *Type `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` +} + +func (m *StructType_Field) Reset() { *m = StructType_Field{} } +func (m *StructType_Field) String() string { return proto.CompactTextString(m) } +func (*StructType_Field) ProtoMessage() {} +func (*StructType_Field) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1, 0} } + +func (m *StructType_Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StructType_Field) GetType() *Type { + if m != nil { + return m.Type + } + return nil +} + +func init() { + proto.RegisterType((*Type)(nil), "google.spanner.v1.Type") + proto.RegisterType((*StructType)(nil), "google.spanner.v1.StructType") + proto.RegisterType((*StructType_Field)(nil), "google.spanner.v1.StructType.Field") + proto.RegisterEnum("google.spanner.v1.TypeCode", TypeCode_name, TypeCode_value) +} + +func init() { proto.RegisterFile("google/spanner/v1/type.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xd1, 0x8a, 0xd3, 0x40, + 0x14, 0x86, 0x9d, 0x36, 0xed, 0x36, 0xa7, 0x28, 0xe3, 0xc0, 0xb2, 0x75, 0x55, 0x28, 0xeb, 0x4d, + 0x51, 0x48, 0x68, 0x15, 0x11, 0x16, 0x84, 0x34, 0x9d, 0xae, 0x81, 0xdd, 0x36, 0x24, 0xb3, 0x42, + 0xbd, 0x29, 0x63, 0x3b, 0x86, 0x40, 0x3a, 0x13, 0x92, 0xec, 0x62, 0x5f, 0xc2, 0x1b, 0xdf, 0xc2, + 0x87, 0xf0, 0xd9, 0x64, 0x26, 0x59, 0x15, 0xaa, 0xe2, 0xdd, 0x9f, 0xfc, 0xff, 0x77, 0xce, 0x99, + 0xc3, 0x81, 0x27, 0x89, 0x52, 0x49, 0x26, 0xdc, 0x32, 0xe7, 0x52, 0x8a, 0xc2, 0xbd, 0x1d, 0xbb, + 0xd5, 0x3e, 0x17, 0x4e, 0x5e, 0xa8, 0x4a, 0x91, 0x87, 0xb5, 0xeb, 0x34, 0xae, 0x73, 0x3b, 0x3e, + 0xbd, 0x03, 0x78, 0x9e, 0xba, 0x5c, 0x4a, 0x55, 0xf1, 0x2a, 0x55, 0xb2, 0xac, 0x81, 0xb3, 0xef, + 0x08, 0x2c, 0xb6, 0xcf, 0x05, 0x71, 0xc1, 0xda, 0xa8, 0xad, 0x18, 0xa0, 0x21, 0x1a, 0x3d, 0x98, + 0x3c, 0x76, 0x0e, 0x0a, 0x39, 0x3a, 0xe6, 0xab, 0xad, 0x88, 0x4c, 0x90, 0x50, 0x20, 0xbc, 0x28, + 0xf8, 0x7e, 0x2d, 0x32, 0xb1, 0x13, 0xb2, 0x5a, 0xeb, 0x31, 0x06, 0xad, 0x21, 0x1a, 0xf5, 0x27, + 0x27, 0x7f, 0xc1, 0x23, 0x6c, 0x10, 0x5a, 0x13, 0xa6, 0xef, 0x5b, 0xe8, 0x97, 0x55, 0x71, 0xb3, + 0x69, 0xf8, 0xb6, 0xe1, 0x9f, 0xfe, 0x81, 0x8f, 0x4d, 0xca, 0x54, 0x81, 0xf2, 0xa7, 0x3e, 0xfb, + 0x8a, 0x00, 0x7e, 0x59, 0xe4, 0x1c, 0xba, 0x9f, 0x52, 0x91, 0x6d, 0xcb, 0x01, 0x1a, 0xb6, 0x47, + 0xfd, 0xc9, 0xb3, 0x7f, 0x56, 0x72, 0xe6, 0x3a, 0x1b, 0x35, 0xc8, 0xe9, 0x3b, 0xe8, 0x98, 0x1f, + 0x84, 0x80, 0x25, 0xf9, 0xae, 0x5e, 0x86, 0x1d, 0x19, 0x4d, 0x5e, 0x80, 0xf5, 0x3f, 0x2f, 0x34, + 0xa1, 0xe7, 0x5f, 0x10, 0xf4, 0xee, 0xf6, 0x45, 0x1e, 0xc1, 0x31, 0x5b, 0x85, 0x74, 0xed, 0x2f, + 0x67, 0x74, 0x7d, 0xbd, 0x88, 0x43, 0xea, 0x07, 0xf3, 0x80, 0xce, 0xf0, 0x3d, 0xd2, 0x03, 0x6b, + 0xba, 0x5c, 0x5e, 0x62, 0x44, 0x6c, 0xe8, 0x04, 0x0b, 0xf6, 0xfa, 0x15, 0x6e, 0x91, 0x3e, 0x1c, + 0xcd, 0x2f, 0x97, 0x9e, 0xfe, 0x68, 0x93, 0xfb, 0x60, 0xb3, 0xe0, 0x8a, 0xc6, 0xcc, 0xbb, 0x0a, + 0xb1, 0xa5, 0x81, 0x99, 0xc7, 0x28, 0xee, 0x10, 0x80, 0x6e, 0xcc, 0xa2, 0x60, 0x71, 0x81, 0xbb, + 0x1a, 0x9e, 0xae, 0x18, 0x8d, 0xf1, 0x91, 0x96, 0x5e, 0x14, 0x79, 0x2b, 0xdc, 0x6b, 0x12, 0xd7, + 0x3e, 0xc3, 0xf6, 0xf4, 0x33, 0x1c, 0x6f, 0xd4, 0xee, 0x70, 0xe8, 0xa9, 0xad, 0xc7, 0x0c, 0xf5, + 0x2d, 0x84, 0xe8, 0xc3, 0x9b, 0xc6, 0x4f, 0x54, 0xc6, 0x65, 0xe2, 0xa8, 0x22, 0x71, 0x13, 0x21, + 0xcd, 0xa5, 0xb8, 0xb5, 0xc5, 0xf3, 0xb4, 0xfc, 0xed, 0xf6, 0xce, 0x1b, 0xf9, 0xad, 0x75, 0x72, + 0x51, 0xa3, 0x7e, 0xa6, 0x6e, 0xb6, 0x4e, 0xdc, 0x34, 0x78, 0x3f, 0xfe, 0xd8, 0x35, 0xf8, 0xcb, + 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xaa, 0xcb, 0xef, 0xb9, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer.pb.go b/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer.pb.go new file mode 100644 index 000000000..14b3cda47 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer.pb.go @@ -0,0 +1,655 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/storagetransfer/v1/transfer.proto + +/* +Package storagetransfer is a generated protocol buffer package. + +It is generated from these files: + google/storagetransfer/v1/transfer.proto + google/storagetransfer/v1/transfer_types.proto + +It has these top-level messages: + GetGoogleServiceAccountRequest + CreateTransferJobRequest + UpdateTransferJobRequest + GetTransferJobRequest + ListTransferJobsRequest + ListTransferJobsResponse + PauseTransferOperationRequest + ResumeTransferOperationRequest + GoogleServiceAccount + AwsAccessKey + ObjectConditions + GcsData + AwsS3Data + HttpData + TransferOptions + TransferSpec + Schedule + TransferJob + ErrorLogEntry + ErrorSummary + TransferCounters + TransferOperation +*/ +package storagetransfer + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request passed to GetGoogleServiceAccount. +type GetGoogleServiceAccountRequest struct { + // The ID of the Google Cloud Platform Console project that the Google service + // account is associated with. + // Required. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` +} + +func (m *GetGoogleServiceAccountRequest) Reset() { *m = GetGoogleServiceAccountRequest{} } +func (m *GetGoogleServiceAccountRequest) String() string { return proto.CompactTextString(m) } +func (*GetGoogleServiceAccountRequest) ProtoMessage() {} +func (*GetGoogleServiceAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GetGoogleServiceAccountRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Request passed to CreateTransferJob. +type CreateTransferJobRequest struct { + // The job to create. + // Required. + TransferJob *TransferJob `protobuf:"bytes,1,opt,name=transfer_job,json=transferJob" json:"transfer_job,omitempty"` +} + +func (m *CreateTransferJobRequest) Reset() { *m = CreateTransferJobRequest{} } +func (m *CreateTransferJobRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTransferJobRequest) ProtoMessage() {} +func (*CreateTransferJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CreateTransferJobRequest) GetTransferJob() *TransferJob { + if m != nil { + return m.TransferJob + } + return nil +} + +// Request passed to UpdateTransferJob. +type UpdateTransferJobRequest struct { + // The name of job to update. + // Required. + JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName" json:"job_name,omitempty"` + // The ID of the Google Cloud Platform Console project that owns the job. + // Required. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The job to update. + // Required. + TransferJob *TransferJob `protobuf:"bytes,3,opt,name=transfer_job,json=transferJob" json:"transfer_job,omitempty"` + // The field mask of the fields in `transferJob` that are to be updated in + // this request. Fields in `transferJob` that can be updated are: + // `description`, `transferSpec`, and `status`. To update the `transferSpec` + // of the job, a complete transfer specification has to be provided. An + // incomplete specification which misses any required fields will be rejected + // with the error `INVALID_ARGUMENT`. + UpdateTransferJobFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,4,opt,name=update_transfer_job_field_mask,json=updateTransferJobFieldMask" json:"update_transfer_job_field_mask,omitempty"` +} + +func (m *UpdateTransferJobRequest) Reset() { *m = UpdateTransferJobRequest{} } +func (m *UpdateTransferJobRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTransferJobRequest) ProtoMessage() {} +func (*UpdateTransferJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *UpdateTransferJobRequest) GetJobName() string { + if m != nil { + return m.JobName + } + return "" +} + +func (m *UpdateTransferJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateTransferJobRequest) GetTransferJob() *TransferJob { + if m != nil { + return m.TransferJob + } + return nil +} + +func (m *UpdateTransferJobRequest) GetUpdateTransferJobFieldMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateTransferJobFieldMask + } + return nil +} + +// Request passed to GetTransferJob. +type GetTransferJobRequest struct { + // The job to get. + // Required. + JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName" json:"job_name,omitempty"` + // The ID of the Google Cloud Platform Console project that owns the job. + // Required. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` +} + +func (m *GetTransferJobRequest) Reset() { *m = GetTransferJobRequest{} } +func (m *GetTransferJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetTransferJobRequest) ProtoMessage() {} +func (*GetTransferJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetTransferJobRequest) GetJobName() string { + if m != nil { + return m.JobName + } + return "" +} + +func (m *GetTransferJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// `project_id`, `job_names`, and `job_statuses` are query parameters that can +// be specified when listing transfer jobs. +type ListTransferJobsRequest struct { + // A list of query parameters specified as JSON text in the form of + // {"project_id":"my_project_id", + // "job_names":["jobid1","jobid2",...], + // "job_statuses":["status1","status2",...]}. + // Since `job_names` and `job_statuses` support multiple values, their values + // must be specified with array notation. `project_id` is required. `job_names` + // and `job_statuses` are optional. The valid values for `job_statuses` are + // case-insensitive: `ENABLED`, `DISABLED`, and `DELETED`. + Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + // The list page size. The max allowed value is 256. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The list page token. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListTransferJobsRequest) Reset() { *m = ListTransferJobsRequest{} } +func (m *ListTransferJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListTransferJobsRequest) ProtoMessage() {} +func (*ListTransferJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListTransferJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTransferJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTransferJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response from ListTransferJobs. +type ListTransferJobsResponse struct { + // A list of transfer jobs. + TransferJobs []*TransferJob `protobuf:"bytes,1,rep,name=transfer_jobs,json=transferJobs" json:"transfer_jobs,omitempty"` + // The list next page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListTransferJobsResponse) Reset() { *m = ListTransferJobsResponse{} } +func (m *ListTransferJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListTransferJobsResponse) ProtoMessage() {} +func (*ListTransferJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListTransferJobsResponse) GetTransferJobs() []*TransferJob { + if m != nil { + return m.TransferJobs + } + return nil +} + +func (m *ListTransferJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request passed to PauseTransferOperation. +type PauseTransferOperationRequest struct { + // The name of the transfer operation. + // Required. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *PauseTransferOperationRequest) Reset() { *m = PauseTransferOperationRequest{} } +func (m *PauseTransferOperationRequest) String() string { return proto.CompactTextString(m) } +func (*PauseTransferOperationRequest) ProtoMessage() {} +func (*PauseTransferOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PauseTransferOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request passed to ResumeTransferOperation. +type ResumeTransferOperationRequest struct { + // The name of the transfer operation. + // Required. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ResumeTransferOperationRequest) Reset() { *m = ResumeTransferOperationRequest{} } +func (m *ResumeTransferOperationRequest) String() string { return proto.CompactTextString(m) } +func (*ResumeTransferOperationRequest) ProtoMessage() {} +func (*ResumeTransferOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ResumeTransferOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*GetGoogleServiceAccountRequest)(nil), "google.storagetransfer.v1.GetGoogleServiceAccountRequest") + proto.RegisterType((*CreateTransferJobRequest)(nil), "google.storagetransfer.v1.CreateTransferJobRequest") + proto.RegisterType((*UpdateTransferJobRequest)(nil), "google.storagetransfer.v1.UpdateTransferJobRequest") + proto.RegisterType((*GetTransferJobRequest)(nil), "google.storagetransfer.v1.GetTransferJobRequest") + proto.RegisterType((*ListTransferJobsRequest)(nil), "google.storagetransfer.v1.ListTransferJobsRequest") + proto.RegisterType((*ListTransferJobsResponse)(nil), "google.storagetransfer.v1.ListTransferJobsResponse") + proto.RegisterType((*PauseTransferOperationRequest)(nil), "google.storagetransfer.v1.PauseTransferOperationRequest") + proto.RegisterType((*ResumeTransferOperationRequest)(nil), "google.storagetransfer.v1.ResumeTransferOperationRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for StorageTransferService service + +type StorageTransferServiceClient interface { + // Returns the Google service account that is used by Storage Transfer + // Service to access buckets in the project where transfers + // run or in other projects. Each Google service account is associated + // with one Google Cloud Platform Console project. Users + // should add this service account to the Google Cloud Storage bucket + // ACLs to grant access to Storage Transfer Service. This service + // account is created and owned by Storage Transfer Service and can + // only be used by Storage Transfer Service. + GetGoogleServiceAccount(ctx context.Context, in *GetGoogleServiceAccountRequest, opts ...grpc.CallOption) (*GoogleServiceAccount, error) + // Creates a transfer job that runs periodically. + CreateTransferJob(ctx context.Context, in *CreateTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) + // Updates a transfer job. Updating a job's transfer spec does not affect + // transfer operations that are running already. Updating the scheduling + // of a job is not allowed. + UpdateTransferJob(ctx context.Context, in *UpdateTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) + // Gets a transfer job. + GetTransferJob(ctx context.Context, in *GetTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) + // Lists transfer jobs. + ListTransferJobs(ctx context.Context, in *ListTransferJobsRequest, opts ...grpc.CallOption) (*ListTransferJobsResponse, error) + // Pauses a transfer operation. + PauseTransferOperation(ctx context.Context, in *PauseTransferOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Resumes a transfer operation that is paused. + ResumeTransferOperation(ctx context.Context, in *ResumeTransferOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type storageTransferServiceClient struct { + cc *grpc.ClientConn +} + +func NewStorageTransferServiceClient(cc *grpc.ClientConn) StorageTransferServiceClient { + return &storageTransferServiceClient{cc} +} + +func (c *storageTransferServiceClient) GetGoogleServiceAccount(ctx context.Context, in *GetGoogleServiceAccountRequest, opts ...grpc.CallOption) (*GoogleServiceAccount, error) { + out := new(GoogleServiceAccount) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/GetGoogleServiceAccount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) CreateTransferJob(ctx context.Context, in *CreateTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) { + out := new(TransferJob) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/CreateTransferJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) UpdateTransferJob(ctx context.Context, in *UpdateTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) { + out := new(TransferJob) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/UpdateTransferJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) GetTransferJob(ctx context.Context, in *GetTransferJobRequest, opts ...grpc.CallOption) (*TransferJob, error) { + out := new(TransferJob) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/GetTransferJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) ListTransferJobs(ctx context.Context, in *ListTransferJobsRequest, opts ...grpc.CallOption) (*ListTransferJobsResponse, error) { + out := new(ListTransferJobsResponse) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/ListTransferJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) PauseTransferOperation(ctx context.Context, in *PauseTransferOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/PauseTransferOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageTransferServiceClient) ResumeTransferOperation(ctx context.Context, in *ResumeTransferOperationRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/google.storagetransfer.v1.StorageTransferService/ResumeTransferOperation", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for StorageTransferService service + +type StorageTransferServiceServer interface { + // Returns the Google service account that is used by Storage Transfer + // Service to access buckets in the project where transfers + // run or in other projects. Each Google service account is associated + // with one Google Cloud Platform Console project. Users + // should add this service account to the Google Cloud Storage bucket + // ACLs to grant access to Storage Transfer Service. This service + // account is created and owned by Storage Transfer Service and can + // only be used by Storage Transfer Service. + GetGoogleServiceAccount(context.Context, *GetGoogleServiceAccountRequest) (*GoogleServiceAccount, error) + // Creates a transfer job that runs periodically. + CreateTransferJob(context.Context, *CreateTransferJobRequest) (*TransferJob, error) + // Updates a transfer job. Updating a job's transfer spec does not affect + // transfer operations that are running already. Updating the scheduling + // of a job is not allowed. + UpdateTransferJob(context.Context, *UpdateTransferJobRequest) (*TransferJob, error) + // Gets a transfer job. + GetTransferJob(context.Context, *GetTransferJobRequest) (*TransferJob, error) + // Lists transfer jobs. + ListTransferJobs(context.Context, *ListTransferJobsRequest) (*ListTransferJobsResponse, error) + // Pauses a transfer operation. + PauseTransferOperation(context.Context, *PauseTransferOperationRequest) (*google_protobuf1.Empty, error) + // Resumes a transfer operation that is paused. + ResumeTransferOperation(context.Context, *ResumeTransferOperationRequest) (*google_protobuf1.Empty, error) +} + +func RegisterStorageTransferServiceServer(s *grpc.Server, srv StorageTransferServiceServer) { + s.RegisterService(&_StorageTransferService_serviceDesc, srv) +} + +func _StorageTransferService_GetGoogleServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGoogleServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).GetGoogleServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/GetGoogleServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).GetGoogleServiceAccount(ctx, req.(*GetGoogleServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_CreateTransferJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTransferJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).CreateTransferJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/CreateTransferJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).CreateTransferJob(ctx, req.(*CreateTransferJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_UpdateTransferJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTransferJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).UpdateTransferJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/UpdateTransferJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).UpdateTransferJob(ctx, req.(*UpdateTransferJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_GetTransferJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTransferJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).GetTransferJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/GetTransferJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).GetTransferJob(ctx, req.(*GetTransferJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_ListTransferJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTransferJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).ListTransferJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/ListTransferJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).ListTransferJobs(ctx, req.(*ListTransferJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_PauseTransferOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseTransferOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).PauseTransferOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/PauseTransferOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).PauseTransferOperation(ctx, req.(*PauseTransferOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageTransferService_ResumeTransferOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeTransferOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageTransferServiceServer).ResumeTransferOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storagetransfer.v1.StorageTransferService/ResumeTransferOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageTransferServiceServer).ResumeTransferOperation(ctx, req.(*ResumeTransferOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _StorageTransferService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.storagetransfer.v1.StorageTransferService", + HandlerType: (*StorageTransferServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetGoogleServiceAccount", + Handler: _StorageTransferService_GetGoogleServiceAccount_Handler, + }, + { + MethodName: "CreateTransferJob", + Handler: _StorageTransferService_CreateTransferJob_Handler, + }, + { + MethodName: "UpdateTransferJob", + Handler: _StorageTransferService_UpdateTransferJob_Handler, + }, + { + MethodName: "GetTransferJob", + Handler: _StorageTransferService_GetTransferJob_Handler, + }, + { + MethodName: "ListTransferJobs", + Handler: _StorageTransferService_ListTransferJobs_Handler, + }, + { + MethodName: "PauseTransferOperation", + Handler: _StorageTransferService_PauseTransferOperation_Handler, + }, + { + MethodName: "ResumeTransferOperation", + Handler: _StorageTransferService_ResumeTransferOperation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/storagetransfer/v1/transfer.proto", +} + +func init() { proto.RegisterFile("google/storagetransfer/v1/transfer.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 755 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdd, 0x4e, 0x13, 0x5b, + 0x14, 0xc7, 0xb3, 0xf9, 0x3a, 0xb0, 0x80, 0x73, 0x60, 0x27, 0xa7, 0x0c, 0xe5, 0xd0, 0x34, 0x43, + 0xd2, 0x83, 0xd5, 0xcc, 0x48, 0xeb, 0x85, 0x62, 0x8c, 0x11, 0xa3, 0x88, 0x9f, 0x58, 0xf0, 0xc6, + 0x0b, 0x27, 0xbb, 0xed, 0xea, 0x64, 0x4a, 0x3b, 0x7b, 0x9c, 0xbd, 0x4b, 0x04, 0xc2, 0x85, 0xbe, + 0x80, 0x89, 0xc6, 0xc4, 0xc4, 0x98, 0xf8, 0x12, 0x3e, 0x89, 0xaf, 0xe0, 0x43, 0xe8, 0x95, 0x66, + 0x76, 0x67, 0xca, 0xd0, 0x8f, 0x01, 0xd4, 0xbb, 0xd9, 0x7b, 0xaf, 0x8f, 0xdf, 0x5a, 0x8b, 0xff, + 0xa2, 0xb0, 0x6c, 0x73, 0x6e, 0x37, 0xd0, 0x14, 0x92, 0xfb, 0xcc, 0x46, 0xe9, 0x33, 0x57, 0xd4, + 0xd0, 0x37, 0x77, 0x57, 0xcc, 0xe8, 0xdb, 0xf0, 0x7c, 0x2e, 0x39, 0x9d, 0x6f, 0x5b, 0x1a, 0x5d, + 0x96, 0xc6, 0xee, 0x4a, 0xfa, 0xbf, 0x30, 0x08, 0xf3, 0x1c, 0x93, 0xb9, 0x2e, 0x97, 0x4c, 0x3a, + 0xdc, 0x15, 0x6d, 0xc7, 0xf4, 0x42, 0xf8, 0xaa, 0x4e, 0xe5, 0x56, 0xcd, 0xc4, 0xa6, 0x27, 0xf7, + 0xc2, 0xc7, 0x6c, 0xf7, 0x63, 0xcd, 0xc1, 0x46, 0xd5, 0x6a, 0x32, 0xb1, 0x13, 0x5a, 0x18, 0x27, + 0x13, 0x5a, 0x72, 0xcf, 0xc3, 0x30, 0x9d, 0x7e, 0x1d, 0x32, 0xeb, 0x28, 0xd7, 0x95, 0xd3, 0x16, + 0xfa, 0xbb, 0x4e, 0x05, 0x6f, 0x54, 0x2a, 0xbc, 0xe5, 0xca, 0x12, 0x3e, 0x6f, 0xa1, 0x90, 0x74, + 0x11, 0xc0, 0xf3, 0x79, 0x1d, 0x2b, 0xd2, 0x72, 0xaa, 0x1a, 0xc9, 0x92, 0xe5, 0x89, 0xd2, 0x44, + 0x78, 0xb3, 0x51, 0xd5, 0x11, 0xb4, 0x9b, 0x3e, 0x32, 0x89, 0xdb, 0x61, 0xf8, 0xbb, 0xbc, 0x1c, + 0xb9, 0x6e, 0xc0, 0x54, 0x27, 0x69, 0x9d, 0x97, 0x95, 0xf3, 0x64, 0x21, 0x67, 0x0c, 0xec, 0x8d, + 0x11, 0x0f, 0x32, 0x29, 0x8f, 0x0e, 0xfa, 0x0f, 0x02, 0xda, 0x13, 0xaf, 0xda, 0x3f, 0xcf, 0x3c, + 0x8c, 0xd7, 0x79, 0xd9, 0x72, 0x59, 0x13, 0x43, 0xc0, 0xbf, 0xea, 0xbc, 0xfc, 0x90, 0x35, 0xb1, + 0x8b, 0x7e, 0xa8, 0x8b, 0xbe, 0x87, 0x70, 0xf8, 0x97, 0x09, 0xe9, 0x33, 0xc8, 0xb4, 0x14, 0xa0, + 0x15, 0x8f, 0x68, 0x1d, 0x4d, 0x48, 0x1b, 0x51, 0xc1, 0xa3, 0x11, 0x19, 0xd1, 0x10, 0x8d, 0xdb, + 0x81, 0xc9, 0x03, 0x26, 0x76, 0x4a, 0xe9, 0x56, 0x77, 0x89, 0x9d, 0x37, 0xfd, 0x31, 0xfc, 0xbb, + 0x8e, 0xf2, 0x4f, 0x56, 0xaf, 0x37, 0x61, 0xee, 0xbe, 0x23, 0xe2, 0x31, 0x45, 0x14, 0x34, 0x05, + 0x63, 0x35, 0xa7, 0x21, 0xd1, 0x0f, 0x43, 0x86, 0x27, 0xba, 0x00, 0x13, 0x1e, 0xb3, 0xd1, 0x12, + 0xce, 0x3e, 0xaa, 0x82, 0x46, 0x4b, 0xe3, 0xc1, 0xc5, 0x96, 0xb3, 0xdf, 0x4e, 0x17, 0x3c, 0x4a, + 0xbe, 0x83, 0xae, 0x36, 0x1a, 0xa6, 0x63, 0x36, 0x6e, 0x07, 0x17, 0xfa, 0x6b, 0x02, 0x5a, 0x6f, + 0x3e, 0xe1, 0x71, 0x57, 0x20, 0xbd, 0x07, 0xd3, 0xf1, 0xbe, 0x09, 0x8d, 0x64, 0x87, 0xcf, 0x30, + 0x8a, 0xa9, 0xd8, 0x28, 0x04, 0xcd, 0xc1, 0x3f, 0x2e, 0xbe, 0x90, 0x56, 0x8c, 0xa6, 0x5d, 0xfc, + 0x74, 0x70, 0xbd, 0xd9, 0x21, 0x2a, 0xc2, 0xe2, 0x26, 0x6b, 0x89, 0x4e, 0xc3, 0x1f, 0x79, 0xe8, + 0x2b, 0x35, 0x46, 0x6d, 0xa0, 0x30, 0x12, 0xeb, 0xab, 0xfa, 0xd6, 0x2f, 0x41, 0xa6, 0x84, 0xa2, + 0xd5, 0x3c, 0x93, 0x57, 0xe1, 0xfb, 0x38, 0xa4, 0xb6, 0xda, 0x35, 0x44, 0x7e, 0xa1, 0xde, 0xe8, + 0x67, 0x02, 0x73, 0x03, 0x44, 0x48, 0xaf, 0x24, 0xd4, 0x9f, 0x2c, 0xdc, 0xb4, 0x99, 0xe4, 0xda, + 0xc7, 0x4f, 0x37, 0x5e, 0x7d, 0xf9, 0xfa, 0x76, 0x68, 0x99, 0xe6, 0x82, 0x6d, 0x61, 0xf7, 0xb1, + 0x10, 0xe6, 0xc1, 0xd1, 0x9f, 0xd3, 0x21, 0x7d, 0x4f, 0x60, 0xb6, 0x47, 0xfb, 0xb4, 0x98, 0x90, + 0x76, 0xd0, 0xa6, 0x48, 0x9f, 0x72, 0xcc, 0x7a, 0x4e, 0x21, 0x66, 0xf5, 0x99, 0xf8, 0x42, 0x0b, + 0x46, 0xbe, 0x7a, 0x4c, 0xc7, 0xf4, 0x03, 0x81, 0xd9, 0x9e, 0x75, 0x91, 0x88, 0x36, 0x68, 0xb9, + 0x9c, 0x1a, 0xed, 0x9c, 0x42, 0x5b, 0x2a, 0x64, 0x02, 0xb4, 0x83, 0x48, 0x91, 0xd7, 0xe2, 0x90, + 0x66, 0x3e, 0x7f, 0xb8, 0x4a, 0xf2, 0xf4, 0x0d, 0x81, 0xbf, 0x8f, 0x6b, 0x99, 0x5e, 0x4c, 0x9e, + 0xf3, 0xef, 0xb7, 0x8c, 0x9e, 0xc0, 0x45, 0xdf, 0x11, 0x98, 0xe9, 0x56, 0x27, 0x2d, 0x24, 0x24, + 0x19, 0xb0, 0x3a, 0xd2, 0xc5, 0x33, 0xf9, 0xb4, 0xe5, 0xaf, 0x6b, 0x8a, 0x92, 0xd2, 0x9e, 0xc1, + 0xd2, 0x8f, 0x04, 0x52, 0xfd, 0x45, 0x4a, 0x2f, 0x27, 0x64, 0x4a, 0xd4, 0x75, 0x3a, 0xd5, 0xb3, + 0x84, 0x6f, 0x05, 0xff, 0x66, 0xf5, 0x15, 0x85, 0x71, 0x5e, 0x57, 0x12, 0x38, 0x38, 0xd6, 0xa8, + 0x4e, 0x8c, 0xf6, 0x18, 0xbd, 0x20, 0x7e, 0x30, 0xcc, 0x4f, 0x04, 0xe6, 0x06, 0xec, 0x83, 0x44, + 0xf5, 0x26, 0xef, 0x90, 0x81, 0x84, 0x05, 0x45, 0x78, 0x41, 0xff, 0xff, 0x44, 0x42, 0x5f, 0x25, + 0x58, 0x25, 0xf9, 0xb5, 0x97, 0x04, 0x96, 0x2a, 0xbc, 0x99, 0x00, 0xa3, 0x92, 0xac, 0x4d, 0x47, + 0x34, 0x9b, 0xc1, 0xf1, 0xe9, 0x9d, 0xd0, 0xde, 0xe6, 0x0d, 0xe6, 0xda, 0x06, 0xf7, 0x6d, 0xd3, + 0x46, 0x57, 0x99, 0x86, 0xeb, 0x81, 0x79, 0x8e, 0xe8, 0xf3, 0x53, 0xe3, 0x6a, 0xd7, 0xd5, 0x37, + 0x42, 0xca, 0x63, 0xca, 0xaf, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0x18, 0x96, 0x62, 0x38, 0x43, + 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer_types.pb.go b/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer_types.pb.go new file mode 100644 index 000000000..4350c2c1a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/storagetransfer/v1/transfer_types.pb.go @@ -0,0 +1,1218 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/storagetransfer/v1/transfer_types.proto + +package storagetransfer + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/code" +import google_type "google.golang.org/genproto/googleapis/type/date" +import google_type1 "google.golang.org/genproto/googleapis/type/timeofday" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The status of the transfer job. +type TransferJob_Status int32 + +const ( + // Zero is an illegal value. + TransferJob_STATUS_UNSPECIFIED TransferJob_Status = 0 + // New transfers will be performed based on the schedule. + TransferJob_ENABLED TransferJob_Status = 1 + // New transfers will not be scheduled. + TransferJob_DISABLED TransferJob_Status = 2 + // This is a soft delete state. After a transfer job is set to this + // state, the job and all the transfer executions are subject to + // garbage collection. + TransferJob_DELETED TransferJob_Status = 3 +) + +var TransferJob_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "ENABLED", + 2: "DISABLED", + 3: "DELETED", +} +var TransferJob_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "ENABLED": 1, + "DISABLED": 2, + "DELETED": 3, +} + +func (x TransferJob_Status) String() string { + return proto.EnumName(TransferJob_Status_name, int32(x)) +} +func (TransferJob_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } + +// The status of a TransferOperation. +type TransferOperation_Status int32 + +const ( + // Zero is an illegal value. + TransferOperation_STATUS_UNSPECIFIED TransferOperation_Status = 0 + // In progress. + TransferOperation_IN_PROGRESS TransferOperation_Status = 1 + // Paused. + TransferOperation_PAUSED TransferOperation_Status = 2 + // Completed successfully. + TransferOperation_SUCCESS TransferOperation_Status = 3 + // Terminated due to an unrecoverable failure. + TransferOperation_FAILED TransferOperation_Status = 4 + // Aborted by the user. + TransferOperation_ABORTED TransferOperation_Status = 5 +) + +var TransferOperation_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "IN_PROGRESS", + 2: "PAUSED", + 3: "SUCCESS", + 4: "FAILED", + 5: "ABORTED", +} +var TransferOperation_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "IN_PROGRESS": 1, + "PAUSED": 2, + "SUCCESS": 3, + "FAILED": 4, + "ABORTED": 5, +} + +func (x TransferOperation_Status) String() string { + return proto.EnumName(TransferOperation_Status_name, int32(x)) +} +func (TransferOperation_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{13, 0} } + +// Google service account +type GoogleServiceAccount struct { + // Required. + AccountEmail string `protobuf:"bytes,1,opt,name=account_email,json=accountEmail" json:"account_email,omitempty"` +} + +func (m *GoogleServiceAccount) Reset() { *m = GoogleServiceAccount{} } +func (m *GoogleServiceAccount) String() string { return proto.CompactTextString(m) } +func (*GoogleServiceAccount) ProtoMessage() {} +func (*GoogleServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *GoogleServiceAccount) GetAccountEmail() string { + if m != nil { + return m.AccountEmail + } + return "" +} + +// AWS access key (see +// [AWS Security Credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html)). +type AwsAccessKey struct { + // AWS access key ID. + // Required. + AccessKeyId string `protobuf:"bytes,1,opt,name=access_key_id,json=accessKeyId" json:"access_key_id,omitempty"` + // AWS secret access key. This field is not returned in RPC responses. + // Required. + SecretAccessKey string `protobuf:"bytes,2,opt,name=secret_access_key,json=secretAccessKey" json:"secret_access_key,omitempty"` +} + +func (m *AwsAccessKey) Reset() { *m = AwsAccessKey{} } +func (m *AwsAccessKey) String() string { return proto.CompactTextString(m) } +func (*AwsAccessKey) ProtoMessage() {} +func (*AwsAccessKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *AwsAccessKey) GetAccessKeyId() string { + if m != nil { + return m.AccessKeyId + } + return "" +} + +func (m *AwsAccessKey) GetSecretAccessKey() string { + if m != nil { + return m.SecretAccessKey + } + return "" +} + +// Conditions that determine which objects will be transferred. +type ObjectConditions struct { + // If unspecified, `minTimeElapsedSinceLastModification` takes a zero value + // and `maxTimeElapsedSinceLastModification` takes the maximum possible + // value of Duration. Objects that satisfy the object conditions + // must either have a `lastModificationTime` greater or equal to + // `NOW` - `maxTimeElapsedSinceLastModification` and less than + // `NOW` - `minTimeElapsedSinceLastModification`, or not have a + // `lastModificationTime`. + MinTimeElapsedSinceLastModification *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=min_time_elapsed_since_last_modification,json=minTimeElapsedSinceLastModification" json:"min_time_elapsed_since_last_modification,omitempty"` + // `maxTimeElapsedSinceLastModification` is the complement to + // `minTimeElapsedSinceLastModification`. + MaxTimeElapsedSinceLastModification *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=max_time_elapsed_since_last_modification,json=maxTimeElapsedSinceLastModification" json:"max_time_elapsed_since_last_modification,omitempty"` + // If `includePrefixes` is specified, objects that satisfy the object + // conditions must have names that start with one of the `includePrefixes` + // and that do not start with any of the `excludePrefixes`. If `includePrefixes` + // is not specified, all objects except those that have names starting with + // one of the `excludePrefixes` must satisfy the object conditions. + // + // Requirements: + // + // * Each include-prefix and exclude-prefix can contain any sequence of + // Unicode characters, of max length 1024 bytes when UTF8-encoded, and + // must not contain Carriage Return or Line Feed characters. Wildcard + // matching and regular expression matching are not supported. + // + // * Each include-prefix and exclude-prefix must omit the leading slash. + // For example, to include the `requests.gz` object in a transfer from + // `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the include + // prefix as `logs/y=2015/requests.gz`. + // + // * None of the include-prefix or the exclude-prefix values can be empty, + // if specified. + // + // * Each include-prefix must include a distinct portion of the object + // namespace, i.e., no include-prefix may be a prefix of another + // include-prefix. + // + // * Each exclude-prefix must exclude a distinct portion of the object + // namespace, i.e., no exclude-prefix may be a prefix of another + // exclude-prefix. + // + // * If `includePrefixes` is specified, then each exclude-prefix must start + // with the value of a path explicitly included by `includePrefixes`. + // + // The max size of `includePrefixes` is 20. + IncludePrefixes []string `protobuf:"bytes,3,rep,name=include_prefixes,json=includePrefixes" json:"include_prefixes,omitempty"` + // `excludePrefixes` must follow the requirements described for + // `includePrefixes`. + // + // The max size of `excludePrefixes` is 20. + ExcludePrefixes []string `protobuf:"bytes,4,rep,name=exclude_prefixes,json=excludePrefixes" json:"exclude_prefixes,omitempty"` +} + +func (m *ObjectConditions) Reset() { *m = ObjectConditions{} } +func (m *ObjectConditions) String() string { return proto.CompactTextString(m) } +func (*ObjectConditions) ProtoMessage() {} +func (*ObjectConditions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *ObjectConditions) GetMinTimeElapsedSinceLastModification() *google_protobuf3.Duration { + if m != nil { + return m.MinTimeElapsedSinceLastModification + } + return nil +} + +func (m *ObjectConditions) GetMaxTimeElapsedSinceLastModification() *google_protobuf3.Duration { + if m != nil { + return m.MaxTimeElapsedSinceLastModification + } + return nil +} + +func (m *ObjectConditions) GetIncludePrefixes() []string { + if m != nil { + return m.IncludePrefixes + } + return nil +} + +func (m *ObjectConditions) GetExcludePrefixes() []string { + if m != nil { + return m.ExcludePrefixes + } + return nil +} + +// In a GcsData, an object's name is the Google Cloud Storage object's name and +// its `lastModificationTime` refers to the object's updated time, which changes +// when the content or the metadata of the object is updated. +type GcsData struct { + // Google Cloud Storage bucket name (see + // [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). + // Required. + BucketName string `protobuf:"bytes,1,opt,name=bucket_name,json=bucketName" json:"bucket_name,omitempty"` +} + +func (m *GcsData) Reset() { *m = GcsData{} } +func (m *GcsData) String() string { return proto.CompactTextString(m) } +func (*GcsData) ProtoMessage() {} +func (*GcsData) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *GcsData) GetBucketName() string { + if m != nil { + return m.BucketName + } + return "" +} + +// An AwsS3Data can be a data source, but not a data sink. +// In an AwsS3Data, an object's name is the S3 object's key name. +type AwsS3Data struct { + // S3 Bucket name (see + // [Creating a bucket](http://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). + // Required. + BucketName string `protobuf:"bytes,1,opt,name=bucket_name,json=bucketName" json:"bucket_name,omitempty"` + // AWS access key used to sign the API requests to the AWS S3 bucket. + // Permissions on the bucket must be granted to the access ID of the + // AWS access key. + // Required. + AwsAccessKey *AwsAccessKey `protobuf:"bytes,2,opt,name=aws_access_key,json=awsAccessKey" json:"aws_access_key,omitempty"` +} + +func (m *AwsS3Data) Reset() { *m = AwsS3Data{} } +func (m *AwsS3Data) String() string { return proto.CompactTextString(m) } +func (*AwsS3Data) ProtoMessage() {} +func (*AwsS3Data) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *AwsS3Data) GetBucketName() string { + if m != nil { + return m.BucketName + } + return "" +} + +func (m *AwsS3Data) GetAwsAccessKey() *AwsAccessKey { + if m != nil { + return m.AwsAccessKey + } + return nil +} + +// An HttpData specifies a list of objects on the web to be transferred over +// HTTP. The information of the objects to be transferred is contained in a +// file referenced by a URL. The first line in the file must be +// "TsvHttpData-1.0", which specifies the format of the file. Subsequent lines +// specify the information of the list of objects, one object per list entry. +// Each entry has the following tab-delimited fields: +// +// * HTTP URL - The location of the object. +// +// * Length - The size of the object in bytes. +// +// * MD5 - The base64-encoded MD5 hash of the object. +// +// For an example of a valid TSV file, see +// [Transferring data from URLs](https://cloud.google.com/storage/transfer/#urls) +// +// When transferring data based on a URL list, keep the following in mind: +// +// * When an object located at `http(s)://hostname:port/` is transferred +// to a data sink, the name of the object at the data sink is +// `/`. +// +// * If the specified size of an object does not match the actual size of the +// object fetched, the object will not be transferred. +// +// * If the specified MD5 does not match the MD5 computed from the transferred +// bytes, the object transfer will fail. For more information, see +// [Generating MD5 hashes](https://cloud.google.com/storage/transfer/#md5) +// +// * Ensure that each URL you specify is publicly accessible. For +// example, in Google Cloud Storage you can +// [share an object publicly] +// (https://cloud.google.com/storage/docs/cloud-console#_sharingdata) and get +// a link to it. +// +// * Storage Transfer Service obeys `robots.txt` rules and requires the source +// HTTP server to support `Range` requests and to return a `Content-Length` +// header in each response. +// +// * [ObjectConditions](#ObjectConditions) have no effect when filtering objects +// to transfer. +type HttpData struct { + // The URL that points to the file that stores the object list entries. + // This file must allow public access. Currently, only URLs with HTTP and + // HTTPS schemes are supported. + // Required. + ListUrl string `protobuf:"bytes,1,opt,name=list_url,json=listUrl" json:"list_url,omitempty"` +} + +func (m *HttpData) Reset() { *m = HttpData{} } +func (m *HttpData) String() string { return proto.CompactTextString(m) } +func (*HttpData) ProtoMessage() {} +func (*HttpData) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *HttpData) GetListUrl() string { + if m != nil { + return m.ListUrl + } + return "" +} + +// TransferOptions uses three boolean parameters to define the actions +// to be performed on objects in a transfer. +type TransferOptions struct { + // Whether overwriting objects that already exist in the sink is allowed. + OverwriteObjectsAlreadyExistingInSink bool `protobuf:"varint,1,opt,name=overwrite_objects_already_existing_in_sink,json=overwriteObjectsAlreadyExistingInSink" json:"overwrite_objects_already_existing_in_sink,omitempty"` + // Whether objects that exist only in the sink should be deleted. + DeleteObjectsUniqueInSink bool `protobuf:"varint,2,opt,name=delete_objects_unique_in_sink,json=deleteObjectsUniqueInSink" json:"delete_objects_unique_in_sink,omitempty"` + // Whether objects should be deleted from the source after they are + // transferred to the sink. + DeleteObjectsFromSourceAfterTransfer bool `protobuf:"varint,3,opt,name=delete_objects_from_source_after_transfer,json=deleteObjectsFromSourceAfterTransfer" json:"delete_objects_from_source_after_transfer,omitempty"` +} + +func (m *TransferOptions) Reset() { *m = TransferOptions{} } +func (m *TransferOptions) String() string { return proto.CompactTextString(m) } +func (*TransferOptions) ProtoMessage() {} +func (*TransferOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *TransferOptions) GetOverwriteObjectsAlreadyExistingInSink() bool { + if m != nil { + return m.OverwriteObjectsAlreadyExistingInSink + } + return false +} + +func (m *TransferOptions) GetDeleteObjectsUniqueInSink() bool { + if m != nil { + return m.DeleteObjectsUniqueInSink + } + return false +} + +func (m *TransferOptions) GetDeleteObjectsFromSourceAfterTransfer() bool { + if m != nil { + return m.DeleteObjectsFromSourceAfterTransfer + } + return false +} + +// Configuration for running a transfer. +type TransferSpec struct { + // The read source of the data. + // + // Types that are valid to be assigned to DataSource: + // *TransferSpec_GcsDataSource + // *TransferSpec_AwsS3DataSource + // *TransferSpec_HttpDataSource + DataSource isTransferSpec_DataSource `protobuf_oneof:"data_source"` + // The write sink for the data. + // + // Types that are valid to be assigned to DataSink: + // *TransferSpec_GcsDataSink + DataSink isTransferSpec_DataSink `protobuf_oneof:"data_sink"` + // Only objects that satisfy these object conditions are included in the set + // of data source and data sink objects. Object conditions based on + // objects' `lastModificationTime` do not exclude objects in a data sink. + ObjectConditions *ObjectConditions `protobuf:"bytes,5,opt,name=object_conditions,json=objectConditions" json:"object_conditions,omitempty"` + // If the option `deleteObjectsUniqueInSink` is `true`, object conditions + // based on objects' `lastModificationTime` are ignored and do not exclude + // objects in a data source or a data sink. + TransferOptions *TransferOptions `protobuf:"bytes,6,opt,name=transfer_options,json=transferOptions" json:"transfer_options,omitempty"` +} + +func (m *TransferSpec) Reset() { *m = TransferSpec{} } +func (m *TransferSpec) String() string { return proto.CompactTextString(m) } +func (*TransferSpec) ProtoMessage() {} +func (*TransferSpec) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +type isTransferSpec_DataSource interface { + isTransferSpec_DataSource() +} +type isTransferSpec_DataSink interface { + isTransferSpec_DataSink() +} + +type TransferSpec_GcsDataSource struct { + GcsDataSource *GcsData `protobuf:"bytes,1,opt,name=gcs_data_source,json=gcsDataSource,oneof"` +} +type TransferSpec_AwsS3DataSource struct { + AwsS3DataSource *AwsS3Data `protobuf:"bytes,2,opt,name=aws_s3_data_source,json=awsS3DataSource,oneof"` +} +type TransferSpec_HttpDataSource struct { + HttpDataSource *HttpData `protobuf:"bytes,3,opt,name=http_data_source,json=httpDataSource,oneof"` +} +type TransferSpec_GcsDataSink struct { + GcsDataSink *GcsData `protobuf:"bytes,4,opt,name=gcs_data_sink,json=gcsDataSink,oneof"` +} + +func (*TransferSpec_GcsDataSource) isTransferSpec_DataSource() {} +func (*TransferSpec_AwsS3DataSource) isTransferSpec_DataSource() {} +func (*TransferSpec_HttpDataSource) isTransferSpec_DataSource() {} +func (*TransferSpec_GcsDataSink) isTransferSpec_DataSink() {} + +func (m *TransferSpec) GetDataSource() isTransferSpec_DataSource { + if m != nil { + return m.DataSource + } + return nil +} +func (m *TransferSpec) GetDataSink() isTransferSpec_DataSink { + if m != nil { + return m.DataSink + } + return nil +} + +func (m *TransferSpec) GetGcsDataSource() *GcsData { + if x, ok := m.GetDataSource().(*TransferSpec_GcsDataSource); ok { + return x.GcsDataSource + } + return nil +} + +func (m *TransferSpec) GetAwsS3DataSource() *AwsS3Data { + if x, ok := m.GetDataSource().(*TransferSpec_AwsS3DataSource); ok { + return x.AwsS3DataSource + } + return nil +} + +func (m *TransferSpec) GetHttpDataSource() *HttpData { + if x, ok := m.GetDataSource().(*TransferSpec_HttpDataSource); ok { + return x.HttpDataSource + } + return nil +} + +func (m *TransferSpec) GetGcsDataSink() *GcsData { + if x, ok := m.GetDataSink().(*TransferSpec_GcsDataSink); ok { + return x.GcsDataSink + } + return nil +} + +func (m *TransferSpec) GetObjectConditions() *ObjectConditions { + if m != nil { + return m.ObjectConditions + } + return nil +} + +func (m *TransferSpec) GetTransferOptions() *TransferOptions { + if m != nil { + return m.TransferOptions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TransferSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TransferSpec_OneofMarshaler, _TransferSpec_OneofUnmarshaler, _TransferSpec_OneofSizer, []interface{}{ + (*TransferSpec_GcsDataSource)(nil), + (*TransferSpec_AwsS3DataSource)(nil), + (*TransferSpec_HttpDataSource)(nil), + (*TransferSpec_GcsDataSink)(nil), + } +} + +func _TransferSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TransferSpec) + // data_source + switch x := m.DataSource.(type) { + case *TransferSpec_GcsDataSource: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GcsDataSource); err != nil { + return err + } + case *TransferSpec_AwsS3DataSource: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AwsS3DataSource); err != nil { + return err + } + case *TransferSpec_HttpDataSource: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpDataSource); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransferSpec.DataSource has unexpected type %T", x) + } + // data_sink + switch x := m.DataSink.(type) { + case *TransferSpec_GcsDataSink: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GcsDataSink); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TransferSpec.DataSink has unexpected type %T", x) + } + return nil +} + +func _TransferSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TransferSpec) + switch tag { + case 1: // data_source.gcs_data_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcsData) + err := b.DecodeMessage(msg) + m.DataSource = &TransferSpec_GcsDataSource{msg} + return true, err + case 2: // data_source.aws_s3_data_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AwsS3Data) + err := b.DecodeMessage(msg) + m.DataSource = &TransferSpec_AwsS3DataSource{msg} + return true, err + case 3: // data_source.http_data_source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HttpData) + err := b.DecodeMessage(msg) + m.DataSource = &TransferSpec_HttpDataSource{msg} + return true, err + case 4: // data_sink.gcs_data_sink + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GcsData) + err := b.DecodeMessage(msg) + m.DataSink = &TransferSpec_GcsDataSink{msg} + return true, err + default: + return false, nil + } +} + +func _TransferSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TransferSpec) + // data_source + switch x := m.DataSource.(type) { + case *TransferSpec_GcsDataSource: + s := proto.Size(x.GcsDataSource) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransferSpec_AwsS3DataSource: + s := proto.Size(x.AwsS3DataSource) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TransferSpec_HttpDataSource: + s := proto.Size(x.HttpDataSource) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // data_sink + switch x := m.DataSink.(type) { + case *TransferSpec_GcsDataSink: + s := proto.Size(x.GcsDataSink) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Transfers can be scheduled to recur or to run just once. +type Schedule struct { + // The first day the recurring transfer is scheduled to run. If + // `scheduleStartDate` is in the past, the transfer will run for the first + // time on the following day. + // Required. + ScheduleStartDate *google_type.Date `protobuf:"bytes,1,opt,name=schedule_start_date,json=scheduleStartDate" json:"schedule_start_date,omitempty"` + // The last day the recurring transfer will be run. If `scheduleEndDate` + // is the same as `scheduleStartDate`, the transfer will be executed only + // once. + ScheduleEndDate *google_type.Date `protobuf:"bytes,2,opt,name=schedule_end_date,json=scheduleEndDate" json:"schedule_end_date,omitempty"` + // The time in UTC at which the transfer will be scheduled to start in a day. + // Transfers may start later than this time. If not specified, recurring and + // one-time transfers that are scheduled to run today will run immediately; + // recurring transfers that are scheduled to run on a future date will start + // at approximately midnight UTC on that date. Note that when configuring a + // transfer with the Cloud Platform Console, the transfer's start time in a + // day is specified in your local timezone. + StartTimeOfDay *google_type1.TimeOfDay `protobuf:"bytes,3,opt,name=start_time_of_day,json=startTimeOfDay" json:"start_time_of_day,omitempty"` +} + +func (m *Schedule) Reset() { *m = Schedule{} } +func (m *Schedule) String() string { return proto.CompactTextString(m) } +func (*Schedule) ProtoMessage() {} +func (*Schedule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *Schedule) GetScheduleStartDate() *google_type.Date { + if m != nil { + return m.ScheduleStartDate + } + return nil +} + +func (m *Schedule) GetScheduleEndDate() *google_type.Date { + if m != nil { + return m.ScheduleEndDate + } + return nil +} + +func (m *Schedule) GetStartTimeOfDay() *google_type1.TimeOfDay { + if m != nil { + return m.StartTimeOfDay + } + return nil +} + +// This resource represents the configuration of a transfer job that runs +// periodically. +type TransferJob struct { + // A globally unique name assigned by Storage Transfer Service when the + // job is created. This field should be left empty in requests to create a new + // transfer job; otherwise, the requests result in an `INVALID_ARGUMENT` + // error. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A description provided by the user for the job. Its max length is 1024 + // bytes when Unicode-encoded. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // The ID of the Google Cloud Platform Console project that owns the job. + // Required. + ProjectId string `protobuf:"bytes,3,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Transfer specification. + // Required. + TransferSpec *TransferSpec `protobuf:"bytes,4,opt,name=transfer_spec,json=transferSpec" json:"transfer_spec,omitempty"` + // Schedule specification. + // Required. + Schedule *Schedule `protobuf:"bytes,5,opt,name=schedule" json:"schedule,omitempty"` + // Status of the job. This value MUST be specified for + // `CreateTransferJobRequests`. + // + // NOTE: The effect of the new job status takes place during a subsequent job + // run. For example, if you change the job status from `ENABLED` to + // `DISABLED`, and an operation spawned by the transfer is running, the status + // change would not affect the current operation. + Status TransferJob_Status `protobuf:"varint,6,opt,name=status,enum=google.storagetransfer.v1.TransferJob_Status" json:"status,omitempty"` + // This field cannot be changed by user requests. + CreationTime *google_protobuf4.Timestamp `protobuf:"bytes,7,opt,name=creation_time,json=creationTime" json:"creation_time,omitempty"` + // This field cannot be changed by user requests. + LastModificationTime *google_protobuf4.Timestamp `protobuf:"bytes,8,opt,name=last_modification_time,json=lastModificationTime" json:"last_modification_time,omitempty"` + // This field cannot be changed by user requests. + DeletionTime *google_protobuf4.Timestamp `protobuf:"bytes,9,opt,name=deletion_time,json=deletionTime" json:"deletion_time,omitempty"` +} + +func (m *TransferJob) Reset() { *m = TransferJob{} } +func (m *TransferJob) String() string { return proto.CompactTextString(m) } +func (*TransferJob) ProtoMessage() {} +func (*TransferJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *TransferJob) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TransferJob) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *TransferJob) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *TransferJob) GetTransferSpec() *TransferSpec { + if m != nil { + return m.TransferSpec + } + return nil +} + +func (m *TransferJob) GetSchedule() *Schedule { + if m != nil { + return m.Schedule + } + return nil +} + +func (m *TransferJob) GetStatus() TransferJob_Status { + if m != nil { + return m.Status + } + return TransferJob_STATUS_UNSPECIFIED +} + +func (m *TransferJob) GetCreationTime() *google_protobuf4.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *TransferJob) GetLastModificationTime() *google_protobuf4.Timestamp { + if m != nil { + return m.LastModificationTime + } + return nil +} + +func (m *TransferJob) GetDeletionTime() *google_protobuf4.Timestamp { + if m != nil { + return m.DeletionTime + } + return nil +} + +// An entry describing an error that has occurred. +type ErrorLogEntry struct { + // A URL that refers to the target (a data source, a data sink, + // or an object) with which the error is associated. + // Required. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + // A list of messages that carry the error details. + ErrorDetails []string `protobuf:"bytes,3,rep,name=error_details,json=errorDetails" json:"error_details,omitempty"` +} + +func (m *ErrorLogEntry) Reset() { *m = ErrorLogEntry{} } +func (m *ErrorLogEntry) String() string { return proto.CompactTextString(m) } +func (*ErrorLogEntry) ProtoMessage() {} +func (*ErrorLogEntry) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *ErrorLogEntry) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ErrorLogEntry) GetErrorDetails() []string { + if m != nil { + return m.ErrorDetails + } + return nil +} + +// A summary of errors by error code, plus a count and sample error log +// entries. +type ErrorSummary struct { + // Required. + ErrorCode google_rpc.Code `protobuf:"varint,1,opt,name=error_code,json=errorCode,enum=google.rpc.Code" json:"error_code,omitempty"` + // Count of this type of error. + // Required. + ErrorCount int64 `protobuf:"varint,2,opt,name=error_count,json=errorCount" json:"error_count,omitempty"` + // Error samples. + ErrorLogEntries []*ErrorLogEntry `protobuf:"bytes,3,rep,name=error_log_entries,json=errorLogEntries" json:"error_log_entries,omitempty"` +} + +func (m *ErrorSummary) Reset() { *m = ErrorSummary{} } +func (m *ErrorSummary) String() string { return proto.CompactTextString(m) } +func (*ErrorSummary) ProtoMessage() {} +func (*ErrorSummary) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *ErrorSummary) GetErrorCode() google_rpc.Code { + if m != nil { + return m.ErrorCode + } + return google_rpc.Code_OK +} + +func (m *ErrorSummary) GetErrorCount() int64 { + if m != nil { + return m.ErrorCount + } + return 0 +} + +func (m *ErrorSummary) GetErrorLogEntries() []*ErrorLogEntry { + if m != nil { + return m.ErrorLogEntries + } + return nil +} + +// A collection of counters that report the progress of a transfer operation. +type TransferCounters struct { + // Objects found in the data source that are scheduled to be transferred, + // which will be copied, excluded based on conditions, or skipped due to + // failures. + ObjectsFoundFromSource int64 `protobuf:"varint,1,opt,name=objects_found_from_source,json=objectsFoundFromSource" json:"objects_found_from_source,omitempty"` + // Bytes found in the data source that are scheduled to be transferred, + // which will be copied, excluded based on conditions, or skipped due to + // failures. + BytesFoundFromSource int64 `protobuf:"varint,2,opt,name=bytes_found_from_source,json=bytesFoundFromSource" json:"bytes_found_from_source,omitempty"` + // Objects found only in the data sink that are scheduled to be deleted. + ObjectsFoundOnlyFromSink int64 `protobuf:"varint,3,opt,name=objects_found_only_from_sink,json=objectsFoundOnlyFromSink" json:"objects_found_only_from_sink,omitempty"` + // Bytes found only in the data sink that are scheduled to be deleted. + BytesFoundOnlyFromSink int64 `protobuf:"varint,4,opt,name=bytes_found_only_from_sink,json=bytesFoundOnlyFromSink" json:"bytes_found_only_from_sink,omitempty"` + // Objects in the data source that are not transferred because they already + // exist in the data sink. + ObjectsFromSourceSkippedBySync int64 `protobuf:"varint,5,opt,name=objects_from_source_skipped_by_sync,json=objectsFromSourceSkippedBySync" json:"objects_from_source_skipped_by_sync,omitempty"` + // Bytes in the data source that are not transferred because they already + // exist in the data sink. + BytesFromSourceSkippedBySync int64 `protobuf:"varint,6,opt,name=bytes_from_source_skipped_by_sync,json=bytesFromSourceSkippedBySync" json:"bytes_from_source_skipped_by_sync,omitempty"` + // Objects that are copied to the data sink. + ObjectsCopiedToSink int64 `protobuf:"varint,7,opt,name=objects_copied_to_sink,json=objectsCopiedToSink" json:"objects_copied_to_sink,omitempty"` + // Bytes that are copied to the data sink. + BytesCopiedToSink int64 `protobuf:"varint,8,opt,name=bytes_copied_to_sink,json=bytesCopiedToSink" json:"bytes_copied_to_sink,omitempty"` + // Objects that are deleted from the data source. + ObjectsDeletedFromSource int64 `protobuf:"varint,9,opt,name=objects_deleted_from_source,json=objectsDeletedFromSource" json:"objects_deleted_from_source,omitempty"` + // Bytes that are deleted from the data source. + BytesDeletedFromSource int64 `protobuf:"varint,10,opt,name=bytes_deleted_from_source,json=bytesDeletedFromSource" json:"bytes_deleted_from_source,omitempty"` + // Objects that are deleted from the data sink. + ObjectsDeletedFromSink int64 `protobuf:"varint,11,opt,name=objects_deleted_from_sink,json=objectsDeletedFromSink" json:"objects_deleted_from_sink,omitempty"` + // Bytes that are deleted from the data sink. + BytesDeletedFromSink int64 `protobuf:"varint,12,opt,name=bytes_deleted_from_sink,json=bytesDeletedFromSink" json:"bytes_deleted_from_sink,omitempty"` + // Objects in the data source that failed during the transfer. + ObjectsFromSourceFailed int64 `protobuf:"varint,13,opt,name=objects_from_source_failed,json=objectsFromSourceFailed" json:"objects_from_source_failed,omitempty"` + // Bytes in the data source that failed during the transfer. + BytesFromSourceFailed int64 `protobuf:"varint,14,opt,name=bytes_from_source_failed,json=bytesFromSourceFailed" json:"bytes_from_source_failed,omitempty"` + // Objects that failed to be deleted from the data sink. + ObjectsFailedToDeleteFromSink int64 `protobuf:"varint,15,opt,name=objects_failed_to_delete_from_sink,json=objectsFailedToDeleteFromSink" json:"objects_failed_to_delete_from_sink,omitempty"` + // Bytes that failed to be deleted from the data sink. + BytesFailedToDeleteFromSink int64 `protobuf:"varint,16,opt,name=bytes_failed_to_delete_from_sink,json=bytesFailedToDeleteFromSink" json:"bytes_failed_to_delete_from_sink,omitempty"` +} + +func (m *TransferCounters) Reset() { *m = TransferCounters{} } +func (m *TransferCounters) String() string { return proto.CompactTextString(m) } +func (*TransferCounters) ProtoMessage() {} +func (*TransferCounters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *TransferCounters) GetObjectsFoundFromSource() int64 { + if m != nil { + return m.ObjectsFoundFromSource + } + return 0 +} + +func (m *TransferCounters) GetBytesFoundFromSource() int64 { + if m != nil { + return m.BytesFoundFromSource + } + return 0 +} + +func (m *TransferCounters) GetObjectsFoundOnlyFromSink() int64 { + if m != nil { + return m.ObjectsFoundOnlyFromSink + } + return 0 +} + +func (m *TransferCounters) GetBytesFoundOnlyFromSink() int64 { + if m != nil { + return m.BytesFoundOnlyFromSink + } + return 0 +} + +func (m *TransferCounters) GetObjectsFromSourceSkippedBySync() int64 { + if m != nil { + return m.ObjectsFromSourceSkippedBySync + } + return 0 +} + +func (m *TransferCounters) GetBytesFromSourceSkippedBySync() int64 { + if m != nil { + return m.BytesFromSourceSkippedBySync + } + return 0 +} + +func (m *TransferCounters) GetObjectsCopiedToSink() int64 { + if m != nil { + return m.ObjectsCopiedToSink + } + return 0 +} + +func (m *TransferCounters) GetBytesCopiedToSink() int64 { + if m != nil { + return m.BytesCopiedToSink + } + return 0 +} + +func (m *TransferCounters) GetObjectsDeletedFromSource() int64 { + if m != nil { + return m.ObjectsDeletedFromSource + } + return 0 +} + +func (m *TransferCounters) GetBytesDeletedFromSource() int64 { + if m != nil { + return m.BytesDeletedFromSource + } + return 0 +} + +func (m *TransferCounters) GetObjectsDeletedFromSink() int64 { + if m != nil { + return m.ObjectsDeletedFromSink + } + return 0 +} + +func (m *TransferCounters) GetBytesDeletedFromSink() int64 { + if m != nil { + return m.BytesDeletedFromSink + } + return 0 +} + +func (m *TransferCounters) GetObjectsFromSourceFailed() int64 { + if m != nil { + return m.ObjectsFromSourceFailed + } + return 0 +} + +func (m *TransferCounters) GetBytesFromSourceFailed() int64 { + if m != nil { + return m.BytesFromSourceFailed + } + return 0 +} + +func (m *TransferCounters) GetObjectsFailedToDeleteFromSink() int64 { + if m != nil { + return m.ObjectsFailedToDeleteFromSink + } + return 0 +} + +func (m *TransferCounters) GetBytesFailedToDeleteFromSink() int64 { + if m != nil { + return m.BytesFailedToDeleteFromSink + } + return 0 +} + +// A description of the execution of a transfer. +type TransferOperation struct { + // A globally unique ID assigned by the system. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The ID of the Google Cloud Platform Console project that owns the operation. + // Required. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Transfer specification. + // Required. + TransferSpec *TransferSpec `protobuf:"bytes,3,opt,name=transfer_spec,json=transferSpec" json:"transfer_spec,omitempty"` + // Start time of this transfer execution. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // End time of this transfer execution. + EndTime *google_protobuf4.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Status of the transfer operation. + Status TransferOperation_Status `protobuf:"varint,6,opt,name=status,enum=google.storagetransfer.v1.TransferOperation_Status" json:"status,omitempty"` + // Information about the progress of the transfer operation. + Counters *TransferCounters `protobuf:"bytes,7,opt,name=counters" json:"counters,omitempty"` + // Summarizes errors encountered with sample error log entries. + ErrorBreakdowns []*ErrorSummary `protobuf:"bytes,8,rep,name=error_breakdowns,json=errorBreakdowns" json:"error_breakdowns,omitempty"` + // The name of the transfer job that triggers this transfer operation. + TransferJobName string `protobuf:"bytes,9,opt,name=transfer_job_name,json=transferJobName" json:"transfer_job_name,omitempty"` +} + +func (m *TransferOperation) Reset() { *m = TransferOperation{} } +func (m *TransferOperation) String() string { return proto.CompactTextString(m) } +func (*TransferOperation) ProtoMessage() {} +func (*TransferOperation) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *TransferOperation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TransferOperation) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *TransferOperation) GetTransferSpec() *TransferSpec { + if m != nil { + return m.TransferSpec + } + return nil +} + +func (m *TransferOperation) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *TransferOperation) GetEndTime() *google_protobuf4.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TransferOperation) GetStatus() TransferOperation_Status { + if m != nil { + return m.Status + } + return TransferOperation_STATUS_UNSPECIFIED +} + +func (m *TransferOperation) GetCounters() *TransferCounters { + if m != nil { + return m.Counters + } + return nil +} + +func (m *TransferOperation) GetErrorBreakdowns() []*ErrorSummary { + if m != nil { + return m.ErrorBreakdowns + } + return nil +} + +func (m *TransferOperation) GetTransferJobName() string { + if m != nil { + return m.TransferJobName + } + return "" +} + +func init() { + proto.RegisterType((*GoogleServiceAccount)(nil), "google.storagetransfer.v1.GoogleServiceAccount") + proto.RegisterType((*AwsAccessKey)(nil), "google.storagetransfer.v1.AwsAccessKey") + proto.RegisterType((*ObjectConditions)(nil), "google.storagetransfer.v1.ObjectConditions") + proto.RegisterType((*GcsData)(nil), "google.storagetransfer.v1.GcsData") + proto.RegisterType((*AwsS3Data)(nil), "google.storagetransfer.v1.AwsS3Data") + proto.RegisterType((*HttpData)(nil), "google.storagetransfer.v1.HttpData") + proto.RegisterType((*TransferOptions)(nil), "google.storagetransfer.v1.TransferOptions") + proto.RegisterType((*TransferSpec)(nil), "google.storagetransfer.v1.TransferSpec") + proto.RegisterType((*Schedule)(nil), "google.storagetransfer.v1.Schedule") + proto.RegisterType((*TransferJob)(nil), "google.storagetransfer.v1.TransferJob") + proto.RegisterType((*ErrorLogEntry)(nil), "google.storagetransfer.v1.ErrorLogEntry") + proto.RegisterType((*ErrorSummary)(nil), "google.storagetransfer.v1.ErrorSummary") + proto.RegisterType((*TransferCounters)(nil), "google.storagetransfer.v1.TransferCounters") + proto.RegisterType((*TransferOperation)(nil), "google.storagetransfer.v1.TransferOperation") + proto.RegisterEnum("google.storagetransfer.v1.TransferJob_Status", TransferJob_Status_name, TransferJob_Status_value) + proto.RegisterEnum("google.storagetransfer.v1.TransferOperation_Status", TransferOperation_Status_name, TransferOperation_Status_value) +} + +func init() { proto.RegisterFile("google/storagetransfer/v1/transfer_types.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1729 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdd, 0x72, 0xdb, 0xc6, + 0x15, 0x36, 0x45, 0x59, 0x22, 0x0f, 0x49, 0x91, 0xda, 0x38, 0x0a, 0x25, 0xdb, 0x89, 0x0b, 0x25, + 0x63, 0xc7, 0x99, 0x92, 0x13, 0x69, 0x32, 0x1d, 0xd7, 0x93, 0xba, 0x94, 0x48, 0x49, 0x8c, 0x15, + 0x4b, 0x03, 0x50, 0xd3, 0x9f, 0x8b, 0x62, 0x96, 0xc0, 0x92, 0x46, 0x04, 0x62, 0x51, 0xec, 0xd2, + 0x12, 0xa6, 0x57, 0xb9, 0xea, 0x33, 0xf4, 0x45, 0xfa, 0x14, 0x9d, 0xe9, 0x5d, 0x9f, 0xa3, 0x97, + 0xbd, 0xec, 0xec, 0x0f, 0x40, 0x10, 0xa2, 0x49, 0xcd, 0xe4, 0x0e, 0x38, 0xe7, 0x7c, 0xdf, 0xd9, + 0xf3, 0xc3, 0x73, 0x16, 0x84, 0xd6, 0x98, 0xd2, 0xb1, 0x4f, 0xda, 0x8c, 0xd3, 0x08, 0x8f, 0x09, + 0x8f, 0x70, 0xc0, 0x46, 0x24, 0x6a, 0x7f, 0xf8, 0xb6, 0x9d, 0x3c, 0xdb, 0x3c, 0x0e, 0x09, 0x6b, + 0x85, 0x11, 0xe5, 0x14, 0xed, 0x2a, 0xfb, 0x56, 0xce, 0xbe, 0xf5, 0xe1, 0xdb, 0xbd, 0x27, 0x9a, + 0x0a, 0x87, 0x5e, 0x1b, 0x07, 0x01, 0xe5, 0x98, 0x7b, 0x34, 0xd0, 0xc0, 0xbd, 0xcf, 0xb5, 0x56, + 0xbe, 0x0d, 0xa7, 0xa3, 0xb6, 0x3b, 0x8d, 0xa4, 0x81, 0xd6, 0x7f, 0x91, 0xd7, 0x73, 0x6f, 0x42, + 0x18, 0xc7, 0x93, 0x50, 0x1b, 0x7c, 0xaa, 0x0d, 0xa2, 0xd0, 0x69, 0x3b, 0xd4, 0x25, 0x5a, 0xbc, + 0xa3, 0xc5, 0xe2, 0x90, 0x6d, 0x17, 0xf3, 0x44, 0xfe, 0x38, 0x2b, 0x17, 0x5c, 0x74, 0xe4, 0xe2, + 0x58, 0x29, 0x8d, 0xd7, 0xf0, 0xe8, 0x54, 0xaa, 0x2d, 0x12, 0x7d, 0xf0, 0x1c, 0xd2, 0x71, 0x1c, + 0x3a, 0x0d, 0x38, 0xda, 0x87, 0x1a, 0x56, 0x8f, 0x36, 0x99, 0x60, 0xcf, 0x6f, 0x16, 0x9e, 0x15, + 0x5e, 0x94, 0xcd, 0xaa, 0x16, 0xf6, 0x84, 0xcc, 0xf8, 0x0b, 0x54, 0x3b, 0x37, 0xac, 0xe3, 0x38, + 0x84, 0xb1, 0xb7, 0x24, 0x46, 0x86, 0x04, 0x11, 0xc6, 0xec, 0x6b, 0x12, 0xdb, 0x9e, 0xab, 0x41, + 0x15, 0x9c, 0x58, 0xf4, 0x5d, 0xf4, 0x12, 0xb6, 0x19, 0x71, 0x22, 0xc2, 0xed, 0x99, 0x69, 0x73, + 0x4d, 0xda, 0xd5, 0x95, 0x22, 0xe5, 0x33, 0xfe, 0xb5, 0x06, 0x8d, 0x8b, 0xe1, 0x4f, 0xc4, 0xe1, + 0xc7, 0x34, 0x70, 0x3d, 0x99, 0x44, 0x14, 0xc2, 0x8b, 0x89, 0x17, 0xd8, 0x22, 0x10, 0x9b, 0xf8, + 0x38, 0x64, 0xc4, 0xb5, 0x99, 0x17, 0x38, 0xc4, 0xf6, 0x31, 0xe3, 0xf6, 0x84, 0xba, 0xde, 0xc8, + 0x73, 0x64, 0x42, 0xa5, 0xff, 0xca, 0xc1, 0xae, 0x2e, 0x6d, 0x2b, 0xc9, 0x68, 0xab, 0xab, 0x33, + 0x6e, 0xee, 0x4f, 0xbc, 0x60, 0xe0, 0x4d, 0x48, 0x4f, 0x11, 0x59, 0x82, 0xe7, 0x1c, 0x33, 0xfe, + 0x63, 0x86, 0x45, 0x7a, 0xc4, 0xb7, 0xf7, 0xf3, 0xb8, 0xb6, 0xda, 0x23, 0xbe, 0x5d, 0xe9, 0xf1, + 0x6b, 0x68, 0x78, 0x81, 0xe3, 0x4f, 0x5d, 0x62, 0x87, 0x11, 0x19, 0x79, 0xb7, 0x84, 0x35, 0x8b, + 0xcf, 0x8a, 0x22, 0x47, 0x5a, 0x7e, 0xa9, 0xc5, 0xc2, 0x94, 0xdc, 0xe6, 0x4c, 0xd7, 0x95, 0xa9, + 0x96, 0x27, 0xa6, 0xc6, 0x4b, 0xd8, 0x3c, 0x75, 0x58, 0x17, 0x73, 0x8c, 0xbe, 0x80, 0xca, 0x70, + 0xea, 0x5c, 0x13, 0x6e, 0x07, 0x78, 0x42, 0x74, 0x9d, 0x40, 0x89, 0xde, 0xe1, 0x09, 0x31, 0xfe, + 0x06, 0xe5, 0xce, 0x0d, 0xb3, 0x0e, 0xef, 0x65, 0x8d, 0x7e, 0x84, 0x2d, 0x7c, 0xc3, 0xf2, 0x15, + 0xad, 0x1c, 0x3c, 0x6f, 0x7d, 0xf4, 0x47, 0xd2, 0xca, 0x76, 0x8e, 0x59, 0xc5, 0x99, 0x37, 0xe3, + 0x2b, 0x28, 0x9d, 0x71, 0x1e, 0x4a, 0xdf, 0xbb, 0x50, 0xf2, 0x3d, 0xc6, 0xed, 0x69, 0x94, 0xf4, + 0xe0, 0xa6, 0x78, 0xbf, 0x8a, 0x7c, 0xe3, 0xef, 0x6b, 0x50, 0x1f, 0x68, 0xc6, 0x8b, 0x50, 0x75, + 0xc7, 0x9f, 0xe0, 0x25, 0xfd, 0x40, 0xa2, 0x9b, 0xc8, 0xe3, 0xc4, 0xa6, 0xb2, 0x77, 0x98, 0x8d, + 0xfd, 0x88, 0x60, 0x37, 0xb6, 0xc9, 0xad, 0xc7, 0xb8, 0x17, 0x8c, 0x6d, 0x2f, 0x10, 0x05, 0xbc, + 0x96, 0x84, 0x25, 0xf3, 0xab, 0x14, 0xa1, 0x9a, 0x8d, 0x75, 0x94, 0x7d, 0x4f, 0x9b, 0xf7, 0x03, + 0xcb, 0x0b, 0xae, 0xd1, 0xef, 0xe1, 0xa9, 0x4b, 0x7c, 0x92, 0xe1, 0x9d, 0x06, 0xde, 0x5f, 0xa7, + 0x24, 0x65, 0x5b, 0x93, 0x6c, 0xbb, 0xca, 0x48, 0x53, 0x5d, 0x49, 0x13, 0xcd, 0xf0, 0x07, 0xf8, + 0x3a, 0xc7, 0x30, 0x8a, 0xe8, 0xc4, 0x66, 0x74, 0x1a, 0x39, 0xc4, 0xc6, 0x23, 0x2e, 0x46, 0x8c, + 0x0e, 0xa8, 0x59, 0x94, 0x6c, 0x5f, 0xce, 0xb1, 0x9d, 0x44, 0x74, 0x62, 0x49, 0xeb, 0x8e, 0x30, + 0x4e, 0x82, 0x37, 0xfe, 0xb1, 0x0e, 0xd5, 0xe4, 0xc5, 0x0a, 0x89, 0x83, 0xce, 0xa1, 0x3e, 0x76, + 0x98, 0xed, 0x62, 0x8e, 0x35, 0xbd, 0xfe, 0x2d, 0x18, 0x4b, 0x2a, 0xa2, 0x9b, 0xe3, 0xec, 0x81, + 0x59, 0x1b, 0xab, 0x47, 0xe5, 0x0b, 0x59, 0x80, 0x44, 0x79, 0xd9, 0xe1, 0x1c, 0xa1, 0x2a, 0xf1, + 0x97, 0xcb, 0x4b, 0xac, 0x3a, 0xe8, 0xec, 0x81, 0x59, 0xc7, 0xc9, 0x8b, 0x26, 0xbd, 0x80, 0xc6, + 0x7b, 0xce, 0xc3, 0x39, 0xca, 0xa2, 0xa4, 0xdc, 0x5f, 0x42, 0x99, 0xf4, 0xc5, 0xd9, 0x03, 0x73, + 0xeb, 0xbd, 0x7e, 0xd6, 0x84, 0x67, 0x50, 0x9b, 0xc5, 0x2c, 0xea, 0xb1, 0x7e, 0xef, 0x88, 0x0b, + 0x66, 0x25, 0x89, 0x58, 0xd4, 0xe9, 0x8f, 0xb0, 0xad, 0x0a, 0x64, 0x3b, 0xe9, 0xdc, 0x69, 0x3e, + 0x94, 0x6c, 0xdf, 0x2c, 0x61, 0xcb, 0x8f, 0x2a, 0xb3, 0x41, 0xf3, 0xc3, 0xeb, 0x0a, 0x1a, 0xe9, + 0x32, 0xa1, 0xaa, 0x65, 0x9b, 0x1b, 0x92, 0xf8, 0xe5, 0x12, 0xe2, 0x5c, 0x93, 0x9b, 0x75, 0x3e, + 0x2f, 0x38, 0xaa, 0x41, 0x25, 0x93, 0xc6, 0xa3, 0x0a, 0x94, 0xd3, 0x2c, 0x18, 0xff, 0x2e, 0x40, + 0xc9, 0x72, 0xde, 0x13, 0x77, 0xea, 0x13, 0xd4, 0x81, 0x4f, 0x98, 0x7e, 0xb6, 0x19, 0xc7, 0x11, + 0x17, 0xe9, 0x4a, 0x7a, 0x63, 0x3b, 0x39, 0x82, 0xd8, 0x14, 0xad, 0x2e, 0xe6, 0xc4, 0xdc, 0x4e, + 0xac, 0x2d, 0x61, 0x2c, 0x44, 0xe8, 0x7b, 0x48, 0x85, 0x36, 0x09, 0x5c, 0x45, 0xb0, 0xf6, 0x31, + 0x82, 0x7a, 0x62, 0xdb, 0x0b, 0x5c, 0x09, 0xef, 0xc0, 0xb6, 0x72, 0x2c, 0xc7, 0x29, 0x1d, 0xd9, + 0x2e, 0x8e, 0x75, 0xdd, 0x77, 0xe6, 0xe0, 0x62, 0x48, 0x5e, 0x8c, 0xba, 0x38, 0x36, 0xb7, 0x24, + 0x20, 0x7d, 0x37, 0xfe, 0xb3, 0x0e, 0x95, 0x24, 0x25, 0x3f, 0xd0, 0x21, 0x42, 0xb0, 0x9e, 0x99, + 0x4b, 0xf2, 0x19, 0x3d, 0x83, 0x8a, 0x4b, 0x98, 0x13, 0x79, 0x61, 0x3a, 0x96, 0xcb, 0x66, 0x56, + 0x84, 0x9e, 0x02, 0x84, 0x11, 0x95, 0x55, 0xf6, 0x5c, 0x79, 0x82, 0xb2, 0x59, 0xd6, 0x92, 0xbe, + 0x8b, 0xce, 0xa1, 0x96, 0x56, 0x8a, 0x85, 0xc4, 0xd1, 0xdd, 0xf4, 0xfc, 0x1e, 0x65, 0x12, 0xbf, + 0x40, 0xb3, 0xca, 0xb3, 0xbf, 0xc7, 0x37, 0x50, 0x4a, 0x12, 0xa1, 0x1b, 0x69, 0x59, 0x93, 0x27, + 0xe5, 0x32, 0x53, 0x10, 0xea, 0xc1, 0x06, 0xe3, 0x98, 0x4f, 0x55, 0xbb, 0x6c, 0x1d, 0xfc, 0xfa, + 0x1e, 0xe7, 0xf8, 0x81, 0x0e, 0x5b, 0x96, 0x04, 0x99, 0x1a, 0x8c, 0xde, 0x40, 0xcd, 0x89, 0x88, + 0x5c, 0x32, 0xb2, 0x00, 0xcd, 0x4d, 0x79, 0x98, 0xbd, 0x3b, 0xfb, 0x6a, 0x90, 0xdc, 0x39, 0xcc, + 0x6a, 0x02, 0x10, 0x22, 0x74, 0x09, 0x3b, 0x77, 0x96, 0x9e, 0x62, 0x2a, 0xad, 0x64, 0x7a, 0xe4, + 0xe7, 0xf6, 0x9c, 0x64, 0x7c, 0x03, 0x35, 0x39, 0xe3, 0x52, 0xa2, 0xf2, 0xea, 0x23, 0x25, 0x00, + 0x21, 0x32, 0xce, 0x60, 0x43, 0x45, 0x89, 0x76, 0x00, 0x59, 0x83, 0xce, 0xe0, 0xca, 0xb2, 0xaf, + 0xde, 0x59, 0x97, 0xbd, 0xe3, 0xfe, 0x49, 0xbf, 0xd7, 0x6d, 0x3c, 0x40, 0x15, 0xd8, 0xec, 0xbd, + 0xeb, 0x1c, 0x9d, 0xf7, 0xba, 0x8d, 0x02, 0xaa, 0x42, 0xa9, 0xdb, 0xb7, 0xd4, 0xdb, 0x9a, 0x50, + 0x75, 0x7b, 0xe7, 0xbd, 0x41, 0xaf, 0xdb, 0x28, 0x1a, 0x27, 0x50, 0xeb, 0x45, 0x11, 0x8d, 0xce, + 0xe9, 0xb8, 0x17, 0xf0, 0x28, 0x46, 0x0d, 0x28, 0xce, 0xf6, 0x8e, 0x78, 0x14, 0xf7, 0x22, 0x22, + 0x4c, 0x6c, 0x97, 0x70, 0xec, 0xf9, 0xc9, 0x5a, 0xae, 0x4a, 0x61, 0x57, 0xc9, 0x8c, 0x7f, 0x16, + 0xa0, 0x2a, 0x89, 0xac, 0xe9, 0x64, 0x82, 0xa3, 0x18, 0xb5, 0x01, 0x14, 0x4a, 0x5c, 0xd7, 0x24, + 0xdd, 0xd6, 0x41, 0x23, 0x09, 0x30, 0x0a, 0x9d, 0xd6, 0x31, 0x75, 0x89, 0x59, 0x96, 0x36, 0xe2, + 0x51, 0x6c, 0xdc, 0x04, 0x30, 0x0d, 0xb8, 0x6c, 0xdf, 0xa2, 0x09, 0x5a, 0x2f, 0xee, 0x67, 0x03, + 0xd8, 0x56, 0x06, 0x3e, 0x1d, 0xdb, 0x24, 0xe0, 0x91, 0xa7, 0xaf, 0x08, 0x95, 0x83, 0x17, 0x4b, + 0x5a, 0x63, 0x2e, 0x3c, 0xb3, 0x4e, 0x32, 0xaf, 0x1e, 0x61, 0xc6, 0x7f, 0x37, 0xa1, 0x91, 0x74, + 0x8f, 0xf4, 0x43, 0x22, 0x86, 0x5e, 0xc1, 0x6e, 0xba, 0xae, 0xe8, 0x34, 0x70, 0xb3, 0x4b, 0x4b, + 0xc6, 0x52, 0x34, 0x77, 0xb4, 0xc1, 0x89, 0xd0, 0xcf, 0x96, 0x14, 0xfa, 0x0e, 0x3e, 0x1b, 0xc6, + 0x9c, 0x2c, 0x02, 0xaa, 0x90, 0x1e, 0x49, 0x75, 0x1e, 0xf6, 0x3b, 0x78, 0x32, 0xef, 0x91, 0x06, + 0x7e, 0xac, 0xd1, 0x62, 0xb0, 0x17, 0x25, 0xb6, 0x99, 0x75, 0x7a, 0x11, 0xf8, 0xb1, 0x64, 0x10, + 0xf3, 0xfb, 0xb7, 0xb0, 0x97, 0x75, 0x9b, 0x43, 0xaf, 0xab, 0x23, 0xcf, 0x3c, 0xcf, 0x61, 0xdf, + 0xc2, 0xfe, 0xa2, 0xe5, 0xcc, 0xae, 0xbd, 0x30, 0x24, 0xae, 0x3d, 0x8c, 0x6d, 0x16, 0x07, 0x8e, + 0xfc, 0x11, 0x17, 0xcd, 0xcf, 0x69, 0x7e, 0x2f, 0x5b, 0xca, 0xee, 0x28, 0xb6, 0xe2, 0xc0, 0x41, + 0xa7, 0xf0, 0x2b, 0x7d, 0x90, 0x25, 0x54, 0x1b, 0x92, 0xea, 0x89, 0x3a, 0xcf, 0x47, 0x88, 0x0e, + 0x21, 0x49, 0xb1, 0xed, 0xd0, 0xd0, 0x23, 0xae, 0xcd, 0xa9, 0x8a, 0x66, 0x53, 0xa2, 0x3f, 0xd1, + 0xda, 0x63, 0xa9, 0x1c, 0x50, 0x19, 0x4a, 0x1b, 0x54, 0x7a, 0xf3, 0x90, 0x92, 0x84, 0x6c, 0x4b, + 0xdd, 0x1c, 0xe0, 0x7b, 0x78, 0x9c, 0x78, 0x51, 0xd7, 0x8e, 0xf9, 0x92, 0x95, 0xe7, 0xd2, 0xde, + 0x55, 0x16, 0x99, 0xb2, 0xbd, 0x82, 0x5d, 0xe5, 0x6f, 0x11, 0x18, 0x32, 0x59, 0x5f, 0x08, 0x5d, + 0xec, 0x59, 0x9c, 0xb7, 0x32, 0xd7, 0x63, 0x59, 0xb0, 0x38, 0x74, 0xda, 0x63, 0x77, 0x81, 0xd5, + 0x4c, 0x8f, 0xe5, 0x61, 0xaf, 0x61, 0x6f, 0x51, 0x9d, 0x47, 0xd8, 0xf3, 0x89, 0xdb, 0xac, 0x49, + 0xe4, 0x67, 0x77, 0xca, 0x7b, 0x22, 0xd5, 0xe8, 0x37, 0xd0, 0xbc, 0x5b, 0x57, 0x0d, 0xdd, 0x92, + 0xd0, 0x4f, 0x73, 0xe5, 0xd4, 0xc0, 0x3e, 0x18, 0xa9, 0x57, 0x29, 0x11, 0x45, 0xd1, 0x77, 0xc2, + 0xd9, 0xb9, 0xeb, 0x92, 0xe2, 0x69, 0xe2, 0x5d, 0x1a, 0x0e, 0xa8, 0x8a, 0x20, 0x0d, 0xa0, 0x07, + 0xcf, 0xf4, 0x19, 0x3e, 0x4e, 0xd4, 0x90, 0x44, 0x8f, 0xd5, 0x59, 0x16, 0xd2, 0x18, 0x3f, 0x3f, + 0x84, 0xed, 0xd9, 0xfd, 0x82, 0xa8, 0xaf, 0x94, 0x85, 0x2b, 0x75, 0x7e, 0x61, 0xae, 0xad, 0x5c, + 0x98, 0xc5, 0x5f, 0xb2, 0x30, 0x5f, 0x01, 0xcc, 0xae, 0x09, 0x7a, 0xf7, 0x2e, 0x5b, 0x09, 0xe5, + 0xf4, 0x8e, 0x80, 0xbe, 0x83, 0x92, 0xb8, 0x97, 0x48, 0xe0, 0xc3, 0x95, 0xc0, 0x4d, 0x12, 0xb8, + 0x12, 0xf6, 0x36, 0xb7, 0x61, 0x0f, 0xef, 0x75, 0x21, 0xd3, 0x09, 0xcb, 0xef, 0xd9, 0x53, 0x28, + 0x39, 0x7a, 0x7e, 0xea, 0x15, 0xfb, 0xcd, 0x3d, 0xe8, 0x92, 0x91, 0x6b, 0xa6, 0x60, 0x64, 0x42, + 0x43, 0xcd, 0xf9, 0x61, 0x44, 0xf0, 0xb5, 0x4b, 0x6f, 0x02, 0xd6, 0x2c, 0xc9, 0x31, 0xff, 0x7c, + 0xd5, 0x98, 0xd7, 0xcb, 0x47, 0x4f, 0xf9, 0xa3, 0x14, 0x2f, 0x3e, 0xc1, 0xd3, 0x4a, 0xfd, 0x44, + 0x87, 0xea, 0xa3, 0xae, 0xac, 0x3e, 0xc1, 0xf9, 0xec, 0xee, 0x20, 0xbf, 0x03, 0x9d, 0x95, 0xcb, + 0xb5, 0x0e, 0x95, 0xfe, 0x3b, 0xfb, 0xd2, 0xbc, 0x38, 0x35, 0x7b, 0x96, 0xd5, 0x28, 0x20, 0x80, + 0x8d, 0xcb, 0xce, 0x95, 0x95, 0xac, 0x57, 0xeb, 0xea, 0xf8, 0x58, 0x28, 0x8a, 0x42, 0x71, 0xd2, + 0xe9, 0x8b, 0xbd, 0xbb, 0x2e, 0x14, 0x9d, 0xa3, 0x0b, 0x53, 0xec, 0xdd, 0x87, 0x47, 0x3f, 0x17, + 0x60, 0xdf, 0xa1, 0x93, 0x25, 0x01, 0xc9, 0xc2, 0x1d, 0xd5, 0x92, 0x44, 0x0d, 0xe2, 0x90, 0xb0, + 0x3f, 0x9f, 0x69, 0xfb, 0x31, 0xf5, 0x71, 0x30, 0x6e, 0xd1, 0x68, 0xdc, 0x1e, 0x93, 0x40, 0x9a, + 0xb6, 0x95, 0x0a, 0x87, 0x1e, 0x5b, 0xf0, 0x87, 0xce, 0xeb, 0x9c, 0xe8, 0x7f, 0x85, 0xc2, 0x70, + 0x43, 0xe2, 0x0e, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xed, 0xb3, 0x1d, 0xf0, 0x07, 0x12, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/resources.pb.go new file mode 100644 index 000000000..a92e9eacf --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/resources.pb.go @@ -0,0 +1,398 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/streetview/publish/v1/resources.proto + +/* +Package publish is a generated protocol buffer package. + +It is generated from these files: + google/streetview/publish/v1/resources.proto + google/streetview/publish/v1/rpcmessages.proto + google/streetview/publish/v1/streetview_publish.proto + +It has these top-level messages: + UploadRef + PhotoId + Level + Pose + Place + Connection + Photo + CreatePhotoRequest + GetPhotoRequest + BatchGetPhotosRequest + BatchGetPhotosResponse + PhotoResponse + ListPhotosRequest + ListPhotosResponse + UpdatePhotoRequest + BatchUpdatePhotosRequest + BatchUpdatePhotosResponse + DeletePhotoRequest + BatchDeletePhotosRequest + BatchDeletePhotosResponse +*/ +package publish + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_type "google.golang.org/genproto/googleapis/type/latlng" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Upload reference for media files. +type UploadRef struct { + // Required. An upload reference should be unique for each user. It follows + // the form: + // "https://streetviewpublish.googleapis.com/media/user//photo/" + UploadUrl string `protobuf:"bytes,1,opt,name=upload_url,json=uploadUrl" json:"upload_url,omitempty"` +} + +func (m *UploadRef) Reset() { *m = UploadRef{} } +func (m *UploadRef) String() string { return proto.CompactTextString(m) } +func (*UploadRef) ProtoMessage() {} +func (*UploadRef) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *UploadRef) GetUploadUrl() string { + if m != nil { + return m.UploadUrl + } + return "" +} + +// Identifier for a photo. +type PhotoId struct { + // Required. A base64 encoded identifier. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *PhotoId) Reset() { *m = PhotoId{} } +func (m *PhotoId) String() string { return proto.CompactTextString(m) } +func (*PhotoId) ProtoMessage() {} +func (*PhotoId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *PhotoId) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +// Level information containing level number and its corresponding name. +type Level struct { + // Floor number, used for ordering. 0 indicates the ground level, 1 indicates + // the first level above ground level, -1 indicates the first level under + // ground level. Non-integer values are OK. + Number float64 `protobuf:"fixed64,1,opt,name=number" json:"number,omitempty"` + // Required. A name assigned to this Level, restricted to 3 characters. + // Consider how the elevator buttons would be labeled for this level if there + // was an elevator. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *Level) Reset() { *m = Level{} } +func (m *Level) String() string { return proto.CompactTextString(m) } +func (*Level) ProtoMessage() {} +func (*Level) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Level) GetNumber() float64 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Level) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Raw pose measurement for an entity. +type Pose struct { + // Latitude and longitude pair of the pose, as explained here: + // https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/LatLng + // When creating a photo, if the latitude and longitude pair are not provided + // here, the geolocation from the exif header will be used. + // If the latitude and longitude pair is not provided and cannot be found in + // the exif header, the create photo process will fail. + LatLngPair *google_type.LatLng `protobuf:"bytes,1,opt,name=lat_lng_pair,json=latLngPair" json:"lat_lng_pair,omitempty"` + // Altitude of the pose in meters above ground level (as defined by WGS84). + // NaN indicates an unmeasured quantity. + Altitude float64 `protobuf:"fixed64,2,opt,name=altitude" json:"altitude,omitempty"` + // Compass heading, measured at the center of the photo in degrees clockwise + // from North. Value must be >=0 and <360. + // NaN indicates an unmeasured quantity. + Heading float64 `protobuf:"fixed64,3,opt,name=heading" json:"heading,omitempty"` + // Pitch, measured at the center of the photo in degrees. Value must be >=-90 + // and <= 90. A value of -90 means looking directly down, and a value of 90 + // means looking directly up. + // NaN indicates an unmeasured quantity. + Pitch float64 `protobuf:"fixed64,4,opt,name=pitch" json:"pitch,omitempty"` + // Roll, measured in degrees. Value must be >= 0 and <360. A value of 0 + // means level with the horizon. + // NaN indicates an unmeasured quantity. + Roll float64 `protobuf:"fixed64,5,opt,name=roll" json:"roll,omitempty"` + // Level (the floor in a building) used to configure vertical navigation. + Level *Level `protobuf:"bytes,7,opt,name=level" json:"level,omitempty"` +} + +func (m *Pose) Reset() { *m = Pose{} } +func (m *Pose) String() string { return proto.CompactTextString(m) } +func (*Pose) ProtoMessage() {} +func (*Pose) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Pose) GetLatLngPair() *google_type.LatLng { + if m != nil { + return m.LatLngPair + } + return nil +} + +func (m *Pose) GetAltitude() float64 { + if m != nil { + return m.Altitude + } + return 0 +} + +func (m *Pose) GetHeading() float64 { + if m != nil { + return m.Heading + } + return 0 +} + +func (m *Pose) GetPitch() float64 { + if m != nil { + return m.Pitch + } + return 0 +} + +func (m *Pose) GetRoll() float64 { + if m != nil { + return m.Roll + } + return 0 +} + +func (m *Pose) GetLevel() *Level { + if m != nil { + return m.Level + } + return nil +} + +// Place metadata for an entity. +type Place struct { + // Required. Place identifier, as described in + // https://developers.google.com/places/place-id. + PlaceId string `protobuf:"bytes,1,opt,name=place_id,json=placeId" json:"place_id,omitempty"` +} + +func (m *Place) Reset() { *m = Place{} } +func (m *Place) String() string { return proto.CompactTextString(m) } +func (*Place) ProtoMessage() {} +func (*Place) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Place) GetPlaceId() string { + if m != nil { + return m.PlaceId + } + return "" +} + +// A connection is the link from a source photo to a destination photo. +type Connection struct { + // Required. The destination of the connection from the containing photo to + // another photo. + Target *PhotoId `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` +} + +func (m *Connection) Reset() { *m = Connection{} } +func (m *Connection) String() string { return proto.CompactTextString(m) } +func (*Connection) ProtoMessage() {} +func (*Connection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Connection) GetTarget() *PhotoId { + if m != nil { + return m.Target + } + return nil +} + +// Photo is used to store 360 photos along with photo metadata. +type Photo struct { + // Output only. Identifier for the photo, which is unique among all photos in + // Google. + PhotoId *PhotoId `protobuf:"bytes,1,opt,name=photo_id,json=photoId" json:"photo_id,omitempty"` + // Required (when creating photo). Input only. The resource URL where the + // photo bytes are uploaded to. + UploadReference *UploadRef `protobuf:"bytes,2,opt,name=upload_reference,json=uploadReference" json:"upload_reference,omitempty"` + // Output only. The download URL for the photo bytes. This field is set only + // when the `view` parameter in a `GetPhotoRequest` is set to + // `INCLUDE_DOWNLOAD_URL`. + DownloadUrl string `protobuf:"bytes,3,opt,name=download_url,json=downloadUrl" json:"download_url,omitempty"` + // Output only. The thumbnail URL for showing a preview of the given photo. + ThumbnailUrl string `protobuf:"bytes,9,opt,name=thumbnail_url,json=thumbnailUrl" json:"thumbnail_url,omitempty"` + // Output only. The share link for the photo. + ShareLink string `protobuf:"bytes,11,opt,name=share_link,json=shareLink" json:"share_link,omitempty"` + // Pose of the photo. + Pose *Pose `protobuf:"bytes,4,opt,name=pose" json:"pose,omitempty"` + // Connections to other photos. A connection represents the link from this + // photo to another photo. + Connections []*Connection `protobuf:"bytes,5,rep,name=connections" json:"connections,omitempty"` + // Absolute time when the photo was captured. + // When the photo has no exif timestamp, this is used to set a timestamp in + // the photo metadata. + CaptureTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=capture_time,json=captureTime" json:"capture_time,omitempty"` + // Places where this photo belongs. + Places []*Place `protobuf:"bytes,7,rep,name=places" json:"places,omitempty"` + // Output only. View count of the photo. + ViewCount int64 `protobuf:"varint,10,opt,name=view_count,json=viewCount" json:"view_count,omitempty"` +} + +func (m *Photo) Reset() { *m = Photo{} } +func (m *Photo) String() string { return proto.CompactTextString(m) } +func (*Photo) ProtoMessage() {} +func (*Photo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Photo) GetPhotoId() *PhotoId { + if m != nil { + return m.PhotoId + } + return nil +} + +func (m *Photo) GetUploadReference() *UploadRef { + if m != nil { + return m.UploadReference + } + return nil +} + +func (m *Photo) GetDownloadUrl() string { + if m != nil { + return m.DownloadUrl + } + return "" +} + +func (m *Photo) GetThumbnailUrl() string { + if m != nil { + return m.ThumbnailUrl + } + return "" +} + +func (m *Photo) GetShareLink() string { + if m != nil { + return m.ShareLink + } + return "" +} + +func (m *Photo) GetPose() *Pose { + if m != nil { + return m.Pose + } + return nil +} + +func (m *Photo) GetConnections() []*Connection { + if m != nil { + return m.Connections + } + return nil +} + +func (m *Photo) GetCaptureTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CaptureTime + } + return nil +} + +func (m *Photo) GetPlaces() []*Place { + if m != nil { + return m.Places + } + return nil +} + +func (m *Photo) GetViewCount() int64 { + if m != nil { + return m.ViewCount + } + return 0 +} + +func init() { + proto.RegisterType((*UploadRef)(nil), "google.streetview.publish.v1.UploadRef") + proto.RegisterType((*PhotoId)(nil), "google.streetview.publish.v1.PhotoId") + proto.RegisterType((*Level)(nil), "google.streetview.publish.v1.Level") + proto.RegisterType((*Pose)(nil), "google.streetview.publish.v1.Pose") + proto.RegisterType((*Place)(nil), "google.streetview.publish.v1.Place") + proto.RegisterType((*Connection)(nil), "google.streetview.publish.v1.Connection") + proto.RegisterType((*Photo)(nil), "google.streetview.publish.v1.Photo") +} + +func init() { proto.RegisterFile("google/streetview/publish/v1/resources.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 651 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdb, 0x6a, 0xdb, 0x4c, + 0x10, 0xc6, 0xf1, 0x29, 0x1e, 0xf9, 0x3f, 0xb0, 0xff, 0x4f, 0x51, 0x4c, 0x43, 0x53, 0x85, 0x52, + 0x53, 0x8a, 0x44, 0x1c, 0x5a, 0x28, 0x21, 0x50, 0x92, 0xab, 0xb4, 0xbe, 0x30, 0xdb, 0xa6, 0x17, + 0xbd, 0x11, 0x6b, 0x69, 0x22, 0x2f, 0x59, 0xef, 0x2e, 0xab, 0x95, 0x43, 0x9f, 0xa1, 0x8f, 0xd1, + 0x97, 0xea, 0xe3, 0x14, 0xad, 0x56, 0x4e, 0x2f, 0x82, 0xd3, 0x2b, 0xcf, 0x7c, 0xf3, 0x7d, 0xe3, + 0x39, 0xad, 0xe0, 0x75, 0xa1, 0x54, 0x21, 0x30, 0x29, 0xad, 0x41, 0xb4, 0x1b, 0x8e, 0x77, 0x89, + 0xae, 0x96, 0x82, 0x97, 0xab, 0x64, 0x73, 0x92, 0x18, 0x2c, 0x55, 0x65, 0x32, 0x2c, 0x63, 0x6d, + 0x94, 0x55, 0xe4, 0x69, 0xc3, 0x8e, 0xef, 0xd9, 0xb1, 0x67, 0xc7, 0x9b, 0x93, 0x89, 0x8f, 0x26, + 0x4c, 0xf3, 0x84, 0x49, 0xa9, 0x2c, 0xb3, 0x5c, 0x49, 0xaf, 0x9d, 0x3c, 0xf3, 0x51, 0xe7, 0x2d, + 0xab, 0x9b, 0xc4, 0xf2, 0x35, 0x96, 0x96, 0xad, 0xb5, 0x27, 0x84, 0x9e, 0x60, 0xbf, 0x69, 0x4c, + 0x04, 0xb3, 0x42, 0x16, 0x4d, 0x24, 0x7a, 0x05, 0xa3, 0x6b, 0x2d, 0x14, 0xcb, 0x29, 0xde, 0x90, + 0x43, 0x80, 0xca, 0x39, 0x69, 0x65, 0x44, 0xd8, 0x39, 0xea, 0x4c, 0x47, 0x74, 0xd4, 0x20, 0xd7, + 0x46, 0x44, 0x07, 0x30, 0x5c, 0xac, 0x94, 0x55, 0x57, 0x39, 0xf9, 0x1b, 0xf6, 0x78, 0xee, 0x19, + 0x7b, 0x3c, 0x8f, 0x4e, 0xa1, 0x3f, 0xc7, 0x0d, 0x0a, 0xf2, 0x04, 0x06, 0xb2, 0x5a, 0x2f, 0xd1, + 0xb8, 0x60, 0x87, 0x7a, 0x8f, 0x10, 0xe8, 0x49, 0xb6, 0xc6, 0x70, 0xcf, 0x49, 0x9c, 0x1d, 0xfd, + 0xec, 0x40, 0x6f, 0xa1, 0x4a, 0x24, 0x6f, 0x60, 0x2c, 0x98, 0x4d, 0x85, 0x2c, 0x52, 0xcd, 0x78, + 0x23, 0x0d, 0x66, 0xff, 0xc5, 0x7e, 0x24, 0x75, 0xd5, 0xf1, 0x9c, 0xd9, 0xb9, 0x2c, 0x28, 0x08, + 0xf7, 0xbb, 0x60, 0xdc, 0x90, 0x09, 0xec, 0x33, 0x61, 0xb9, 0xad, 0xf2, 0x26, 0x6f, 0x87, 0x6e, + 0x7d, 0x12, 0xc2, 0x70, 0x85, 0x2c, 0xe7, 0xb2, 0x08, 0xbb, 0x2e, 0xd4, 0xba, 0xe4, 0x7f, 0xe8, + 0x6b, 0x6e, 0xb3, 0x55, 0xd8, 0x73, 0x78, 0xe3, 0xd4, 0xf5, 0x19, 0x25, 0x44, 0xd8, 0x77, 0xa0, + 0xb3, 0xc9, 0x3b, 0xe8, 0x8b, 0xba, 0xa9, 0x70, 0xe8, 0xea, 0x39, 0x8e, 0x77, 0xad, 0x28, 0x76, + 0xfd, 0xd3, 0x46, 0x11, 0x45, 0xd0, 0x5f, 0x08, 0x96, 0x21, 0x39, 0x80, 0x7d, 0x5d, 0x1b, 0xe9, + 0x76, 0x5c, 0x43, 0xe7, 0x5f, 0xe5, 0xd1, 0x47, 0x80, 0x4b, 0x25, 0x25, 0x66, 0xf5, 0x2a, 0xc9, + 0x39, 0x0c, 0x2c, 0x33, 0x05, 0x5a, 0xdf, 0xfd, 0x8b, 0xdd, 0xff, 0xe6, 0x17, 0x41, 0xbd, 0x28, + 0xfa, 0xd1, 0x83, 0xbe, 0xc3, 0xc8, 0x7b, 0xd8, 0xd7, 0xb5, 0xd1, 0xfe, 0xe3, 0x1f, 0xa7, 0x1a, + 0x6a, 0xbf, 0x5c, 0x0a, 0xff, 0xfa, 0x33, 0x30, 0x78, 0x83, 0x06, 0x65, 0xd6, 0xcc, 0x37, 0x98, + 0xbd, 0xdc, 0x9d, 0x69, 0x7b, 0x49, 0xf4, 0x9f, 0xaa, 0x35, 0x1b, 0x3d, 0x79, 0x0e, 0xe3, 0x5c, + 0xdd, 0xc9, 0xed, 0x71, 0x75, 0xdd, 0x2c, 0x82, 0x16, 0xbb, 0x36, 0x82, 0x1c, 0xc3, 0x5f, 0x76, + 0x55, 0xad, 0x97, 0x92, 0x71, 0xe1, 0x38, 0x23, 0xc7, 0x19, 0x6f, 0xc1, 0x9a, 0x74, 0x08, 0x50, + 0xae, 0x98, 0xc1, 0x54, 0x70, 0x79, 0x1b, 0x06, 0xcd, 0x89, 0x3a, 0x64, 0xce, 0xe5, 0x2d, 0x79, + 0x0b, 0x3d, 0xad, 0x4a, 0x74, 0xbb, 0x0d, 0x66, 0xd1, 0x23, 0x8d, 0xab, 0x12, 0xa9, 0xe3, 0x93, + 0x0f, 0x10, 0x64, 0xdb, 0x5d, 0x94, 0x61, 0xff, 0xa8, 0x3b, 0x0d, 0x66, 0xd3, 0xdd, 0xf2, 0xfb, + 0xe5, 0xd1, 0xdf, 0xc5, 0xe4, 0x1c, 0xc6, 0x19, 0xd3, 0xb6, 0x32, 0x98, 0xd6, 0xef, 0x30, 0x1c, + 0xb8, 0x5a, 0x26, 0x6d, 0xb2, 0xf6, 0x91, 0xc6, 0x9f, 0xdb, 0x47, 0x4a, 0x03, 0xcf, 0xaf, 0x11, + 0x72, 0x06, 0x03, 0x77, 0x21, 0x65, 0x38, 0x74, 0x55, 0x3c, 0x72, 0x76, 0xee, 0xcc, 0xa8, 0x97, + 0xd4, 0xe3, 0xa9, 0x09, 0x69, 0xa6, 0x2a, 0x69, 0x43, 0x38, 0xea, 0x4c, 0xbb, 0x74, 0x54, 0x23, + 0x97, 0x35, 0x70, 0xf1, 0xbd, 0x03, 0xd3, 0x4c, 0xad, 0xdb, 0x8c, 0x05, 0xaa, 0xb8, 0x2a, 0xb2, + 0x87, 0x33, 0x5f, 0x4c, 0x3e, 0x39, 0xf8, 0x0b, 0xc7, 0xbb, 0x45, 0x83, 0xd2, 0xf6, 0x9b, 0xf5, + 0xf5, 0xb2, 0xcd, 0xa0, 0x04, 0x93, 0x45, 0xac, 0x4c, 0x91, 0x14, 0x28, 0x5d, 0x6b, 0x49, 0x13, + 0x62, 0x9a, 0x97, 0x0f, 0x7f, 0xfa, 0xce, 0xbc, 0xb9, 0x1c, 0x38, 0xfe, 0xe9, 0xaf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xad, 0x4e, 0x7a, 0x51, 0x29, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/rpcmessages.pb.go b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/rpcmessages.pb.go new file mode 100644 index 000000000..4a96824f5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/rpcmessages.pb.go @@ -0,0 +1,470 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/streetview/publish/v1/rpcmessages.proto + +package publish + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf2 "google.golang.org/genproto/protobuf/field_mask" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Specifies which view of the `Photo` should be included in the response. +type PhotoView int32 + +const ( + // Server reponses do not include the download URL for the photo bytes. + // The default value. + PhotoView_BASIC PhotoView = 0 + // Server responses include the download URL for the photo bytes. + PhotoView_INCLUDE_DOWNLOAD_URL PhotoView = 1 +) + +var PhotoView_name = map[int32]string{ + 0: "BASIC", + 1: "INCLUDE_DOWNLOAD_URL", +} +var PhotoView_value = map[string]int32{ + "BASIC": 0, + "INCLUDE_DOWNLOAD_URL": 1, +} + +func (x PhotoView) String() string { + return proto.EnumName(PhotoView_name, int32(x)) +} +func (PhotoView) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Request to create a photo. +type CreatePhotoRequest struct { + // Required. Photo to create. + Photo *Photo `protobuf:"bytes,1,opt,name=photo" json:"photo,omitempty"` +} + +func (m *CreatePhotoRequest) Reset() { *m = CreatePhotoRequest{} } +func (m *CreatePhotoRequest) String() string { return proto.CompactTextString(m) } +func (*CreatePhotoRequest) ProtoMessage() {} +func (*CreatePhotoRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *CreatePhotoRequest) GetPhoto() *Photo { + if m != nil { + return m.Photo + } + return nil +} + +// Request to get a photo. +// +// By default +// - does not return the download URL for the photo bytes. +// +// Parameters: +// - 'view' controls if the download URL for the photo bytes will be returned. +type GetPhotoRequest struct { + // Required. ID of the photo. + PhotoId string `protobuf:"bytes,1,opt,name=photo_id,json=photoId" json:"photo_id,omitempty"` + // Specifies if a download URL for the photo bytes should be returned in the + // Photo response. + View PhotoView `protobuf:"varint,2,opt,name=view,enum=google.streetview.publish.v1.PhotoView" json:"view,omitempty"` +} + +func (m *GetPhotoRequest) Reset() { *m = GetPhotoRequest{} } +func (m *GetPhotoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPhotoRequest) ProtoMessage() {} +func (*GetPhotoRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *GetPhotoRequest) GetPhotoId() string { + if m != nil { + return m.PhotoId + } + return "" +} + +func (m *GetPhotoRequest) GetView() PhotoView { + if m != nil { + return m.View + } + return PhotoView_BASIC +} + +// Request to get one or more photos. +// By default +// - does not return the download URL for the photo bytes. +// +// Parameters: +// - 'view' controls if the download URL for the photo bytes will be returned. +type BatchGetPhotosRequest struct { + // Required. IDs of the photos. + PhotoIds []string `protobuf:"bytes,1,rep,name=photo_ids,json=photoIds" json:"photo_ids,omitempty"` + // Specifies if a download URL for the photo bytes should be returned in the + // Photo response. + View PhotoView `protobuf:"varint,2,opt,name=view,enum=google.streetview.publish.v1.PhotoView" json:"view,omitempty"` +} + +func (m *BatchGetPhotosRequest) Reset() { *m = BatchGetPhotosRequest{} } +func (m *BatchGetPhotosRequest) String() string { return proto.CompactTextString(m) } +func (*BatchGetPhotosRequest) ProtoMessage() {} +func (*BatchGetPhotosRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *BatchGetPhotosRequest) GetPhotoIds() []string { + if m != nil { + return m.PhotoIds + } + return nil +} + +func (m *BatchGetPhotosRequest) GetView() PhotoView { + if m != nil { + return m.View + } + return PhotoView_BASIC +} + +// Response to batch get of photos. +type BatchGetPhotosResponse struct { + // List of results for each individual photo requested, in the same order as + // the request. + Results []*PhotoResponse `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *BatchGetPhotosResponse) Reset() { *m = BatchGetPhotosResponse{} } +func (m *BatchGetPhotosResponse) String() string { return proto.CompactTextString(m) } +func (*BatchGetPhotosResponse) ProtoMessage() {} +func (*BatchGetPhotosResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *BatchGetPhotosResponse) GetResults() []*PhotoResponse { + if m != nil { + return m.Results + } + return nil +} + +// Response payload for a single `Photo` in batch operations including +// `BatchGetPhotosRequest` and `BatchUpdatePhotosRequest`. +type PhotoResponse struct { + // The status for the operation to get or update a single photo in the batch + // request. + Status *google_rpc.Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + // The photo resource, if the request was successful. + Photo *Photo `protobuf:"bytes,2,opt,name=photo" json:"photo,omitempty"` +} + +func (m *PhotoResponse) Reset() { *m = PhotoResponse{} } +func (m *PhotoResponse) String() string { return proto.CompactTextString(m) } +func (*PhotoResponse) ProtoMessage() {} +func (*PhotoResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *PhotoResponse) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *PhotoResponse) GetPhoto() *Photo { + if m != nil { + return m.Photo + } + return nil +} + +// Request to list all photos that belong to the user sending the request. +// +// By default +// - does not return the download URL for the photo bytes. +// +// Parameters: +// - 'view' controls if the download URL for the photo bytes will be returned. +// - 'page_size' determines the maximum number of photos to return. +// - 'page_token' is the next page token value returned from a previous List +// request, if any. +type ListPhotosRequest struct { + // Specifies if a download URL for the photos bytes should be returned in the + // Photos response. + View PhotoView `protobuf:"varint,1,opt,name=view,enum=google.streetview.publish.v1.PhotoView" json:"view,omitempty"` + // The maximum number of photos to return. + // `page_size` must be non-negative. If `page_size` is zero or is not + // provided, the default page size of 100 will be used. + // The number of photos returned in the response may be less than `page_size` + // if the number of photos that belong to the user is less than `page_size`. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The next_page_token value returned from a previous List request, if any. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The filter expression. + // Example: `placeId=ChIJj61dQgK6j4AR4GeTYWZsKWw` + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListPhotosRequest) Reset() { *m = ListPhotosRequest{} } +func (m *ListPhotosRequest) String() string { return proto.CompactTextString(m) } +func (*ListPhotosRequest) ProtoMessage() {} +func (*ListPhotosRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *ListPhotosRequest) GetView() PhotoView { + if m != nil { + return m.View + } + return PhotoView_BASIC +} + +func (m *ListPhotosRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListPhotosRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListPhotosRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Response to list all photos that belong to a user. +type ListPhotosResponse struct { + // List of photos. There will be a maximum number of items returned based on + // the page_size field in the request. + Photos []*Photo `protobuf:"bytes,1,rep,name=photos" json:"photos,omitempty"` + // Token to retrieve the next page of results, or empty if there are no + // more results in the list. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListPhotosResponse) Reset() { *m = ListPhotosResponse{} } +func (m *ListPhotosResponse) String() string { return proto.CompactTextString(m) } +func (*ListPhotosResponse) ProtoMessage() {} +func (*ListPhotosResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *ListPhotosResponse) GetPhotos() []*Photo { + if m != nil { + return m.Photos + } + return nil +} + +func (m *ListPhotosResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request to update the metadata of a photo. +// Updating the pixels of a photo is not supported. +type UpdatePhotoRequest struct { + // Required. Photo object containing the new metadata. Only the fields + // specified in `update_mask` are used. If `update_mask` is not present, the + // update applies to all fields. + // **Note:** To update `pose.altitude`, `pose.latlngpair` has to be filled as + // well. Otherwise, the request will fail. + Photo *Photo `protobuf:"bytes,1,opt,name=photo" json:"photo,omitempty"` + // Mask that identifies fields on the photo metadata to update. + // If not present, the old Photo metadata will be entirely replaced with the + // new Photo metadata in this request. The update fails if invalid fields are + // specified. Multiple fields can be specified in a comma-delimited list. + // + // The following fields are valid: + // + // * `pose.heading` + // * `pose.latlngpair` + // * `pose.pitch` + // * `pose.roll` + // * `pose.level` + // * `pose.altitude` + // * `connections` + // * `places` + // + // + // **Note:** Repeated fields in `update_mask` mean the entire set of repeated + // values will be replaced with the new contents. For example, if + // `UpdatePhotoRequest.photo.update_mask` contains `connections` and + // `UpdatePhotoRequest.photo.connections` is empty, all connections will be + // removed. + UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdatePhotoRequest) Reset() { *m = UpdatePhotoRequest{} } +func (m *UpdatePhotoRequest) String() string { return proto.CompactTextString(m) } +func (*UpdatePhotoRequest) ProtoMessage() {} +func (*UpdatePhotoRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *UpdatePhotoRequest) GetPhoto() *Photo { + if m != nil { + return m.Photo + } + return nil +} + +func (m *UpdatePhotoRequest) GetUpdateMask() *google_protobuf2.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request to update the metadata of photos. +// Updating the pixels of photos is not supported. +type BatchUpdatePhotosRequest struct { + // Required. List of update photo requests. + UpdatePhotoRequests []*UpdatePhotoRequest `protobuf:"bytes,1,rep,name=update_photo_requests,json=updatePhotoRequests" json:"update_photo_requests,omitempty"` +} + +func (m *BatchUpdatePhotosRequest) Reset() { *m = BatchUpdatePhotosRequest{} } +func (m *BatchUpdatePhotosRequest) String() string { return proto.CompactTextString(m) } +func (*BatchUpdatePhotosRequest) ProtoMessage() {} +func (*BatchUpdatePhotosRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *BatchUpdatePhotosRequest) GetUpdatePhotoRequests() []*UpdatePhotoRequest { + if m != nil { + return m.UpdatePhotoRequests + } + return nil +} + +// Response to batch update of metadata of one or more photos. +type BatchUpdatePhotosResponse struct { + // List of results for each individual photo updated, in the same order as + // the request. + Results []*PhotoResponse `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *BatchUpdatePhotosResponse) Reset() { *m = BatchUpdatePhotosResponse{} } +func (m *BatchUpdatePhotosResponse) String() string { return proto.CompactTextString(m) } +func (*BatchUpdatePhotosResponse) ProtoMessage() {} +func (*BatchUpdatePhotosResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *BatchUpdatePhotosResponse) GetResults() []*PhotoResponse { + if m != nil { + return m.Results + } + return nil +} + +// Request to delete a photo. +type DeletePhotoRequest struct { + // Required. ID of the photo. + PhotoId string `protobuf:"bytes,1,opt,name=photo_id,json=photoId" json:"photo_id,omitempty"` +} + +func (m *DeletePhotoRequest) Reset() { *m = DeletePhotoRequest{} } +func (m *DeletePhotoRequest) String() string { return proto.CompactTextString(m) } +func (*DeletePhotoRequest) ProtoMessage() {} +func (*DeletePhotoRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *DeletePhotoRequest) GetPhotoId() string { + if m != nil { + return m.PhotoId + } + return "" +} + +// Request to delete multiple photos. +type BatchDeletePhotosRequest struct { + // Required. List of delete photo requests. + PhotoIds []string `protobuf:"bytes,1,rep,name=photo_ids,json=photoIds" json:"photo_ids,omitempty"` +} + +func (m *BatchDeletePhotosRequest) Reset() { *m = BatchDeletePhotosRequest{} } +func (m *BatchDeletePhotosRequest) String() string { return proto.CompactTextString(m) } +func (*BatchDeletePhotosRequest) ProtoMessage() {} +func (*BatchDeletePhotosRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *BatchDeletePhotosRequest) GetPhotoIds() []string { + if m != nil { + return m.PhotoIds + } + return nil +} + +// Response to batch delete of one or more photos. +type BatchDeletePhotosResponse struct { + // The status for the operation to delete a single photo in the batch request. + Status []*google_rpc.Status `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` +} + +func (m *BatchDeletePhotosResponse) Reset() { *m = BatchDeletePhotosResponse{} } +func (m *BatchDeletePhotosResponse) String() string { return proto.CompactTextString(m) } +func (*BatchDeletePhotosResponse) ProtoMessage() {} +func (*BatchDeletePhotosResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *BatchDeletePhotosResponse) GetStatus() []*google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*CreatePhotoRequest)(nil), "google.streetview.publish.v1.CreatePhotoRequest") + proto.RegisterType((*GetPhotoRequest)(nil), "google.streetview.publish.v1.GetPhotoRequest") + proto.RegisterType((*BatchGetPhotosRequest)(nil), "google.streetview.publish.v1.BatchGetPhotosRequest") + proto.RegisterType((*BatchGetPhotosResponse)(nil), "google.streetview.publish.v1.BatchGetPhotosResponse") + proto.RegisterType((*PhotoResponse)(nil), "google.streetview.publish.v1.PhotoResponse") + proto.RegisterType((*ListPhotosRequest)(nil), "google.streetview.publish.v1.ListPhotosRequest") + proto.RegisterType((*ListPhotosResponse)(nil), "google.streetview.publish.v1.ListPhotosResponse") + proto.RegisterType((*UpdatePhotoRequest)(nil), "google.streetview.publish.v1.UpdatePhotoRequest") + proto.RegisterType((*BatchUpdatePhotosRequest)(nil), "google.streetview.publish.v1.BatchUpdatePhotosRequest") + proto.RegisterType((*BatchUpdatePhotosResponse)(nil), "google.streetview.publish.v1.BatchUpdatePhotosResponse") + proto.RegisterType((*DeletePhotoRequest)(nil), "google.streetview.publish.v1.DeletePhotoRequest") + proto.RegisterType((*BatchDeletePhotosRequest)(nil), "google.streetview.publish.v1.BatchDeletePhotosRequest") + proto.RegisterType((*BatchDeletePhotosResponse)(nil), "google.streetview.publish.v1.BatchDeletePhotosResponse") + proto.RegisterEnum("google.streetview.publish.v1.PhotoView", PhotoView_name, PhotoView_value) +} + +func init() { proto.RegisterFile("google/streetview/publish/v1/rpcmessages.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 639 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0xc5, 0x7d, 0xa4, 0xcd, 0xad, 0x4a, 0xcb, 0x40, 0x8b, 0x1b, 0x8a, 0x14, 0x19, 0x09, 0xa2, + 0x82, 0xec, 0xb6, 0x2c, 0x10, 0xca, 0xaa, 0x49, 0x4a, 0x55, 0x29, 0x7d, 0xc8, 0xa1, 0x20, 0xb1, + 0xb1, 0x1c, 0xe7, 0xc6, 0xb1, 0xe2, 0x64, 0x5c, 0xcf, 0x38, 0x85, 0xae, 0xf8, 0x00, 0xf8, 0x0b, + 0x3e, 0x14, 0x79, 0x3c, 0xd3, 0x26, 0x69, 0x88, 0x02, 0x74, 0x67, 0xdf, 0xc7, 0xb9, 0x67, 0xce, + 0x9d, 0x63, 0x83, 0xe9, 0x53, 0xea, 0x87, 0x68, 0x31, 0x1e, 0x23, 0xf2, 0x41, 0x80, 0x57, 0x56, + 0x94, 0x34, 0xc3, 0x80, 0x75, 0xac, 0xc1, 0x9e, 0x15, 0x47, 0x5e, 0x0f, 0x19, 0x73, 0x7d, 0x64, + 0x66, 0x14, 0x53, 0x4e, 0xc9, 0x76, 0x56, 0x6f, 0xde, 0xd6, 0x9b, 0xb2, 0xde, 0x1c, 0xec, 0x15, + 0x8a, 0x12, 0x4d, 0xd4, 0x36, 0x93, 0xb6, 0xd5, 0x0e, 0x30, 0x6c, 0x39, 0x3d, 0x97, 0x75, 0xb3, + 0xfe, 0xc2, 0x53, 0x59, 0x11, 0x47, 0x9e, 0xc5, 0xb8, 0xcb, 0x13, 0x09, 0x5c, 0x78, 0x33, 0x9d, + 0x08, 0x32, 0x9a, 0xc4, 0x9e, 0xa2, 0x61, 0x9c, 0x01, 0xa9, 0xc6, 0xe8, 0x72, 0x3c, 0xef, 0x50, + 0x4e, 0x6d, 0xbc, 0x4c, 0x90, 0x71, 0xf2, 0x1e, 0x16, 0xa3, 0xf4, 0x5d, 0xd7, 0x8a, 0x5a, 0x69, + 0x65, 0xff, 0x85, 0x39, 0x8d, 0xac, 0x99, 0xb5, 0x66, 0x1d, 0x46, 0x00, 0x6b, 0x47, 0xc8, 0x47, + 0xd0, 0xb6, 0x60, 0x59, 0xe4, 0x9c, 0xa0, 0x25, 0x00, 0xf3, 0xf6, 0x92, 0x78, 0x3f, 0x6e, 0x91, + 0x32, 0x2c, 0xa4, 0x68, 0xfa, 0x5c, 0x51, 0x2b, 0x3d, 0xdc, 0x7f, 0x35, 0xc3, 0x9c, 0x4f, 0x01, + 0x5e, 0xd9, 0xa2, 0xc9, 0xb8, 0x84, 0x8d, 0x8a, 0xcb, 0xbd, 0x8e, 0x9a, 0xc7, 0xd4, 0xc0, 0x67, + 0x90, 0x57, 0x03, 0x99, 0xae, 0x15, 0xe7, 0x4b, 0x79, 0x7b, 0x59, 0x4e, 0x64, 0xff, 0x37, 0xd2, + 0x81, 0xcd, 0xf1, 0x91, 0x2c, 0xa2, 0x7d, 0x86, 0xe4, 0x10, 0x96, 0x62, 0x64, 0x49, 0xc8, 0xb3, + 0x89, 0x2b, 0xfb, 0xaf, 0x67, 0x11, 0x4d, 0x76, 0xdb, 0xaa, 0xd7, 0x18, 0xc0, 0xea, 0x48, 0x86, + 0xec, 0x40, 0x2e, 0x5b, 0xaf, 0xdc, 0x05, 0x51, 0xb0, 0x71, 0xe4, 0x99, 0x0d, 0x91, 0xb1, 0x65, + 0xc5, 0xed, 0xda, 0xe6, 0xfe, 0x7a, 0x6d, 0xbf, 0x34, 0x78, 0x54, 0x0f, 0xd8, 0x98, 0x90, 0x4a, + 0x2b, 0xed, 0x1f, 0xb4, 0x12, 0x5b, 0x70, 0x7d, 0x74, 0x58, 0x70, 0x8d, 0x82, 0xd1, 0xa2, 0xbd, + 0x9c, 0x06, 0x1a, 0xc1, 0x35, 0x92, 0xe7, 0x00, 0x22, 0xc9, 0x69, 0x17, 0xfb, 0xfa, 0xbc, 0xb8, + 0x15, 0xa2, 0xfc, 0x63, 0x1a, 0x20, 0x9b, 0x90, 0x6b, 0x07, 0x21, 0xc7, 0x58, 0x5f, 0x10, 0x29, + 0xf9, 0x66, 0x7c, 0x03, 0x32, 0xcc, 0x52, 0x6a, 0x54, 0x86, 0x9c, 0x38, 0x85, 0x92, 0x7e, 0xa6, + 0x83, 0xcb, 0x16, 0xf2, 0x12, 0xd6, 0xfa, 0xf8, 0x95, 0x3b, 0x43, 0x74, 0xe6, 0xc4, 0xcc, 0xd5, + 0x34, 0x7c, 0xae, 0x28, 0x19, 0x3f, 0x34, 0x20, 0x17, 0x51, 0xeb, 0xfe, 0xac, 0x42, 0xca, 0xb0, + 0x92, 0x08, 0x40, 0xe1, 0x6b, 0xb9, 0xb4, 0x82, 0x02, 0x50, 0xd6, 0x37, 0x3f, 0xa4, 0xd6, 0x3f, + 0x71, 0x59, 0xd7, 0x86, 0xac, 0x3c, 0x7d, 0x36, 0xbe, 0x6b, 0xa0, 0x8b, 0xab, 0x38, 0xc4, 0xe9, + 0x66, 0x6f, 0x2d, 0xd8, 0x90, 0xc8, 0x99, 0x0f, 0xe2, 0x2c, 0xae, 0xf4, 0xd9, 0x9d, 0x4e, 0xf2, + 0xee, 0x29, 0xed, 0xc7, 0xc9, 0x9d, 0x18, 0x33, 0x9a, 0xb0, 0x35, 0x81, 0xc1, 0xfd, 0xfa, 0xc1, + 0x02, 0x52, 0xc3, 0x10, 0xc7, 0x44, 0xff, 0xf3, 0x17, 0xc5, 0x78, 0x27, 0x65, 0x19, 0xea, 0x9a, + 0xe9, 0xbb, 0x60, 0x1c, 0xc9, 0xd3, 0x8c, 0x36, 0x4e, 0x70, 0xe1, 0xfc, 0x74, 0x17, 0xee, 0xec, + 0x42, 0xfe, 0xc6, 0x0a, 0x24, 0x0f, 0x8b, 0x95, 0x83, 0xc6, 0x71, 0x75, 0xfd, 0x01, 0xd1, 0xe1, + 0xc9, 0xf1, 0x69, 0xb5, 0x7e, 0x51, 0x3b, 0x74, 0x6a, 0x67, 0x9f, 0x4f, 0xeb, 0x67, 0x07, 0x35, + 0xe7, 0xc2, 0xae, 0xaf, 0x6b, 0x95, 0x9f, 0x1a, 0x94, 0x3c, 0xda, 0x53, 0x98, 0x3e, 0x52, 0x33, + 0xf1, 0xbd, 0xc9, 0x42, 0x55, 0xb6, 0x1b, 0x22, 0x9c, 0xa2, 0x9f, 0x67, 0x51, 0x3b, 0xf2, 0x4e, + 0xe4, 0xcf, 0xe5, 0x4b, 0x55, 0x61, 0xd0, 0xd0, 0xed, 0xfb, 0x26, 0x8d, 0x7d, 0xcb, 0xc7, 0xbe, + 0xb8, 0x4b, 0x56, 0x96, 0x72, 0xa3, 0x80, 0x4d, 0xfe, 0x39, 0x94, 0xe5, 0x63, 0x33, 0x27, 0xea, + 0xdf, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x7c, 0xa0, 0x45, 0xd4, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/streetview_publish.pb.go b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/streetview_publish.pb.go new file mode 100644 index 000000000..63849b687 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/streetview/publish/v1/streetview_publish.pb.go @@ -0,0 +1,524 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/streetview/publish/v1/streetview_publish.proto + +package publish + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for StreetViewPublishService service + +type StreetViewPublishServiceClient interface { + // Creates an upload session to start uploading photo data. The upload URL of + // the returned `UploadRef` is used to upload the data for the photo. + // + // After the upload is complete, the `UploadRef` is used with + // `StreetViewPublishService:CreatePhoto()` to create the `Photo` object + // entry. + StartUpload(ctx context.Context, in *google_protobuf4.Empty, opts ...grpc.CallOption) (*UploadRef, error) + // After the client finishes uploading the photo with the returned + // `UploadRef`, `photo.create` publishes the uploaded photo to Street View on + // Google Maps. + // + // This method returns the following error codes: + // + // * `INVALID_ARGUMENT` if the request is malformed. + // * `NOT_FOUND` if the upload reference does not exist. + CreatePhoto(ctx context.Context, in *CreatePhotoRequest, opts ...grpc.CallOption) (*Photo, error) + // Gets the metadata of the specified `Photo`. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `NOT_FOUND` if the requested photo does not exist. + GetPhoto(ctx context.Context, in *GetPhotoRequest, opts ...grpc.CallOption) (*Photo, error) + // Gets the metadata of the specified `Photo` batch. + // + // Note that if `photos.batchGet` fails, either critical fields are + // missing or there was an authentication error. + // Even if `photos.batchGet` succeeds, there may have been failures + // for single photos in the batch. These failures will be specified in + // `BatchGetPhotosResponse.results.status`. + // See `photo.get` for specific failures that will occur per photo. + BatchGetPhotos(ctx context.Context, in *BatchGetPhotosRequest, opts ...grpc.CallOption) (*BatchGetPhotosResponse, error) + // Lists all the photos that belong to the user. + ListPhotos(ctx context.Context, in *ListPhotosRequest, opts ...grpc.CallOption) (*ListPhotosResponse, error) + // Updates the metadata of a photo, such as pose, place association, etc. + // Changing the pixels of a photo is not supported. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `INVALID_ARGUMENT` if the request is malformed. + // * `NOT_FOUND` if the photo ID does not exist. + UpdatePhoto(ctx context.Context, in *UpdatePhotoRequest, opts ...grpc.CallOption) (*Photo, error) + // Updates the metadata of photos, such as pose, place association, etc. + // Changing the pixels of a photo is not supported. + // + // Note that if `photos.batchUpdate` fails, either critical fields + // are missing or there was an authentication error. + // Even if `photos.batchUpdate` succeeds, there may have been + // failures for single photos in the batch. These failures will be specified + // in `BatchUpdatePhotosResponse.results.status`. + // See `UpdatePhoto` for specific failures that will occur per photo. + BatchUpdatePhotos(ctx context.Context, in *BatchUpdatePhotosRequest, opts ...grpc.CallOption) (*BatchUpdatePhotosResponse, error) + // Deletes a photo and its metadata. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `NOT_FOUND` if the photo ID does not exist. + DeletePhoto(ctx context.Context, in *DeletePhotoRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Deletes a list of photos and their metadata. + // + // Note that if `photos.batchDelete` fails, either critical fields + // are missing or there was an authentication error. + // Even if `photos.batchDelete` succeeds, there may have been + // failures for single photos in the batch. These failures will be specified + // in `BatchDeletePhotosResponse.status`. + // See `photo.update` for specific failures that will occur per photo. + BatchDeletePhotos(ctx context.Context, in *BatchDeletePhotosRequest, opts ...grpc.CallOption) (*BatchDeletePhotosResponse, error) +} + +type streetViewPublishServiceClient struct { + cc *grpc.ClientConn +} + +func NewStreetViewPublishServiceClient(cc *grpc.ClientConn) StreetViewPublishServiceClient { + return &streetViewPublishServiceClient{cc} +} + +func (c *streetViewPublishServiceClient) StartUpload(ctx context.Context, in *google_protobuf4.Empty, opts ...grpc.CallOption) (*UploadRef, error) { + out := new(UploadRef) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/StartUpload", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) CreatePhoto(ctx context.Context, in *CreatePhotoRequest, opts ...grpc.CallOption) (*Photo, error) { + out := new(Photo) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/CreatePhoto", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) GetPhoto(ctx context.Context, in *GetPhotoRequest, opts ...grpc.CallOption) (*Photo, error) { + out := new(Photo) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/GetPhoto", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) BatchGetPhotos(ctx context.Context, in *BatchGetPhotosRequest, opts ...grpc.CallOption) (*BatchGetPhotosResponse, error) { + out := new(BatchGetPhotosResponse) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/BatchGetPhotos", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) ListPhotos(ctx context.Context, in *ListPhotosRequest, opts ...grpc.CallOption) (*ListPhotosResponse, error) { + out := new(ListPhotosResponse) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/ListPhotos", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) UpdatePhoto(ctx context.Context, in *UpdatePhotoRequest, opts ...grpc.CallOption) (*Photo, error) { + out := new(Photo) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/UpdatePhoto", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) BatchUpdatePhotos(ctx context.Context, in *BatchUpdatePhotosRequest, opts ...grpc.CallOption) (*BatchUpdatePhotosResponse, error) { + out := new(BatchUpdatePhotosResponse) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/BatchUpdatePhotos", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) DeletePhoto(ctx context.Context, in *DeletePhotoRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/DeletePhoto", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *streetViewPublishServiceClient) BatchDeletePhotos(ctx context.Context, in *BatchDeletePhotosRequest, opts ...grpc.CallOption) (*BatchDeletePhotosResponse, error) { + out := new(BatchDeletePhotosResponse) + err := grpc.Invoke(ctx, "/google.streetview.publish.v1.StreetViewPublishService/BatchDeletePhotos", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for StreetViewPublishService service + +type StreetViewPublishServiceServer interface { + // Creates an upload session to start uploading photo data. The upload URL of + // the returned `UploadRef` is used to upload the data for the photo. + // + // After the upload is complete, the `UploadRef` is used with + // `StreetViewPublishService:CreatePhoto()` to create the `Photo` object + // entry. + StartUpload(context.Context, *google_protobuf4.Empty) (*UploadRef, error) + // After the client finishes uploading the photo with the returned + // `UploadRef`, `photo.create` publishes the uploaded photo to Street View on + // Google Maps. + // + // This method returns the following error codes: + // + // * `INVALID_ARGUMENT` if the request is malformed. + // * `NOT_FOUND` if the upload reference does not exist. + CreatePhoto(context.Context, *CreatePhotoRequest) (*Photo, error) + // Gets the metadata of the specified `Photo`. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `NOT_FOUND` if the requested photo does not exist. + GetPhoto(context.Context, *GetPhotoRequest) (*Photo, error) + // Gets the metadata of the specified `Photo` batch. + // + // Note that if `photos.batchGet` fails, either critical fields are + // missing or there was an authentication error. + // Even if `photos.batchGet` succeeds, there may have been failures + // for single photos in the batch. These failures will be specified in + // `BatchGetPhotosResponse.results.status`. + // See `photo.get` for specific failures that will occur per photo. + BatchGetPhotos(context.Context, *BatchGetPhotosRequest) (*BatchGetPhotosResponse, error) + // Lists all the photos that belong to the user. + ListPhotos(context.Context, *ListPhotosRequest) (*ListPhotosResponse, error) + // Updates the metadata of a photo, such as pose, place association, etc. + // Changing the pixels of a photo is not supported. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `INVALID_ARGUMENT` if the request is malformed. + // * `NOT_FOUND` if the photo ID does not exist. + UpdatePhoto(context.Context, *UpdatePhotoRequest) (*Photo, error) + // Updates the metadata of photos, such as pose, place association, etc. + // Changing the pixels of a photo is not supported. + // + // Note that if `photos.batchUpdate` fails, either critical fields + // are missing or there was an authentication error. + // Even if `photos.batchUpdate` succeeds, there may have been + // failures for single photos in the batch. These failures will be specified + // in `BatchUpdatePhotosResponse.results.status`. + // See `UpdatePhoto` for specific failures that will occur per photo. + BatchUpdatePhotos(context.Context, *BatchUpdatePhotosRequest) (*BatchUpdatePhotosResponse, error) + // Deletes a photo and its metadata. + // + // This method returns the following error codes: + // + // * `PERMISSION_DENIED` if the requesting user did not create the requested + // photo. + // * `NOT_FOUND` if the photo ID does not exist. + DeletePhoto(context.Context, *DeletePhotoRequest) (*google_protobuf4.Empty, error) + // Deletes a list of photos and their metadata. + // + // Note that if `photos.batchDelete` fails, either critical fields + // are missing or there was an authentication error. + // Even if `photos.batchDelete` succeeds, there may have been + // failures for single photos in the batch. These failures will be specified + // in `BatchDeletePhotosResponse.status`. + // See `photo.update` for specific failures that will occur per photo. + BatchDeletePhotos(context.Context, *BatchDeletePhotosRequest) (*BatchDeletePhotosResponse, error) +} + +func RegisterStreetViewPublishServiceServer(s *grpc.Server, srv StreetViewPublishServiceServer) { + s.RegisterService(&_StreetViewPublishService_serviceDesc, srv) +} + +func _StreetViewPublishService_StartUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf4.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).StartUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/StartUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).StartUpload(ctx, req.(*google_protobuf4.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_CreatePhoto_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreatePhotoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).CreatePhoto(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/CreatePhoto", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).CreatePhoto(ctx, req.(*CreatePhotoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_GetPhoto_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPhotoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).GetPhoto(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/GetPhoto", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).GetPhoto(ctx, req.(*GetPhotoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_BatchGetPhotos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchGetPhotosRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).BatchGetPhotos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/BatchGetPhotos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).BatchGetPhotos(ctx, req.(*BatchGetPhotosRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_ListPhotos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPhotosRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).ListPhotos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/ListPhotos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).ListPhotos(ctx, req.(*ListPhotosRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_UpdatePhoto_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdatePhotoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).UpdatePhoto(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/UpdatePhoto", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).UpdatePhoto(ctx, req.(*UpdatePhotoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_BatchUpdatePhotos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchUpdatePhotosRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).BatchUpdatePhotos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/BatchUpdatePhotos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).BatchUpdatePhotos(ctx, req.(*BatchUpdatePhotosRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_DeletePhoto_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeletePhotoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).DeletePhoto(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/DeletePhoto", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).DeletePhoto(ctx, req.(*DeletePhotoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StreetViewPublishService_BatchDeletePhotos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchDeletePhotosRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StreetViewPublishServiceServer).BatchDeletePhotos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.streetview.publish.v1.StreetViewPublishService/BatchDeletePhotos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StreetViewPublishServiceServer).BatchDeletePhotos(ctx, req.(*BatchDeletePhotosRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _StreetViewPublishService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.streetview.publish.v1.StreetViewPublishService", + HandlerType: (*StreetViewPublishServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "StartUpload", + Handler: _StreetViewPublishService_StartUpload_Handler, + }, + { + MethodName: "CreatePhoto", + Handler: _StreetViewPublishService_CreatePhoto_Handler, + }, + { + MethodName: "GetPhoto", + Handler: _StreetViewPublishService_GetPhoto_Handler, + }, + { + MethodName: "BatchGetPhotos", + Handler: _StreetViewPublishService_BatchGetPhotos_Handler, + }, + { + MethodName: "ListPhotos", + Handler: _StreetViewPublishService_ListPhotos_Handler, + }, + { + MethodName: "UpdatePhoto", + Handler: _StreetViewPublishService_UpdatePhoto_Handler, + }, + { + MethodName: "BatchUpdatePhotos", + Handler: _StreetViewPublishService_BatchUpdatePhotos_Handler, + }, + { + MethodName: "DeletePhoto", + Handler: _StreetViewPublishService_DeletePhoto_Handler, + }, + { + MethodName: "BatchDeletePhotos", + Handler: _StreetViewPublishService_BatchDeletePhotos_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/streetview/publish/v1/streetview_publish.proto", +} + +func init() { + proto.RegisterFile("google/streetview/publish/v1/streetview_publish.proto", fileDescriptor2) +} + +var fileDescriptor2 = []byte{ + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x15, 0x24, 0x10, 0xb8, 0x08, 0x69, 0x86, 0x55, 0x53, 0x3a, 0x24, 0x08, 0x12, 0xa0, + 0x6a, 0xd8, 0x1b, 0xe3, 0x8f, 0x54, 0x6e, 0x1d, 0x88, 0x0b, 0x87, 0x69, 0xd5, 0x38, 0x70, 0x99, + 0xdc, 0xf4, 0x5d, 0x6a, 0x29, 0x8d, 0x4d, 0xec, 0x74, 0x42, 0x30, 0x0e, 0xe3, 0xc8, 0x0d, 0x2e, + 0x7c, 0x03, 0x3e, 0x10, 0x5f, 0x81, 0x0f, 0x82, 0xea, 0xd8, 0x4d, 0x36, 0x8a, 0x49, 0x4e, 0x69, + 0xf3, 0x3e, 0xcf, 0xfb, 0xfc, 0xfa, 0xbe, 0xae, 0xd1, 0xd3, 0x44, 0x88, 0x24, 0x05, 0xaa, 0x74, + 0x0e, 0xa0, 0xe7, 0x1c, 0x4e, 0xa8, 0x2c, 0xc6, 0x29, 0x57, 0x53, 0x3a, 0xdf, 0xa9, 0xbd, 0x3d, + 0xb2, 0x6f, 0x89, 0xcc, 0x85, 0x16, 0x78, 0xb3, 0xb4, 0x91, 0x4a, 0x40, 0x9c, 0x60, 0xbe, 0x13, + 0xda, 0x2a, 0x65, 0x92, 0x53, 0x96, 0x65, 0x42, 0x33, 0xcd, 0x45, 0xa6, 0x4a, 0x6f, 0xd8, 0xb3, + 0x55, 0xf3, 0x6d, 0x5c, 0x1c, 0x53, 0x98, 0x49, 0xfd, 0xc1, 0x16, 0xb7, 0xbc, 0x3c, 0x39, 0x28, + 0x51, 0xe4, 0x31, 0xb8, 0x56, 0xc4, 0xaf, 0x96, 0xf1, 0x0c, 0x94, 0x62, 0x89, 0xd3, 0x3f, 0xfe, + 0x8a, 0xd0, 0xc6, 0xc8, 0x68, 0xdf, 0x72, 0x38, 0xd9, 0x2f, 0xa5, 0x23, 0xc8, 0xe7, 0x3c, 0x06, + 0x2c, 0x51, 0x67, 0xa4, 0x59, 0xae, 0x0f, 0x65, 0x2a, 0xd8, 0x04, 0x77, 0x6d, 0x73, 0xe2, 0x38, + 0xc9, 0xab, 0x05, 0x67, 0xf8, 0x80, 0xf8, 0x7e, 0x3b, 0x29, 0xdd, 0x07, 0x70, 0x1c, 0xdd, 0x39, + 0xfb, 0xf5, 0xfb, 0xfb, 0xa5, 0x30, 0x5a, 0x5f, 0xb0, 0xc8, 0xa9, 0xd0, 0x62, 0xa0, 0xaa, 0xfe, + 0x83, 0xa0, 0x8f, 0x3f, 0xa3, 0xce, 0x5e, 0x0e, 0x4c, 0xc3, 0xfe, 0xa2, 0x8a, 0xb7, 0xfd, 0x9d, + 0x6b, 0xd2, 0x03, 0x78, 0x5f, 0x80, 0xd2, 0xe1, 0x3d, 0xbf, 0xc3, 0x68, 0xa3, 0x0d, 0xc3, 0x81, + 0xa3, 0x6b, 0x15, 0xc7, 0x65, 0xf3, 0xc0, 0x9f, 0xd0, 0xd5, 0xd7, 0xa0, 0xcb, 0xf0, 0x47, 0xfe, + 0x56, 0x4e, 0xd7, 0x2a, 0x79, 0xd3, 0x24, 0x77, 0xf1, 0xad, 0x65, 0x32, 0xfd, 0x68, 0x1e, 0x47, + 0x7c, 0x72, 0x8a, 0x7f, 0x04, 0xe8, 0xc6, 0x90, 0xe9, 0x78, 0xea, 0x7a, 0x2b, 0xbc, 0xeb, 0xef, + 0x7a, 0x5e, 0xed, 0x50, 0x9e, 0xb4, 0x33, 0x29, 0x29, 0x32, 0x05, 0x51, 0xcf, 0xb0, 0xad, 0xe3, + 0x9b, 0x4b, 0x36, 0x35, 0x18, 0x5b, 0x29, 0xfe, 0x12, 0x20, 0xf4, 0x86, 0x2b, 0x87, 0x45, 0xfd, + 0x09, 0x95, 0xd2, 0x21, 0x6d, 0x37, 0x37, 0x58, 0x1c, 0x6c, 0x70, 0xae, 0x63, 0x54, 0xe1, 0xe0, + 0x6f, 0x01, 0xea, 0x1c, 0xca, 0x49, 0xd3, 0xf3, 0x51, 0x93, 0xb6, 0xda, 0xd2, 0x96, 0x89, 0xbe, + 0x1f, 0xde, 0xbe, 0xb8, 0x25, 0xe2, 0x76, 0x45, 0xf8, 0xe4, 0xd4, 0x9d, 0x99, 0x9f, 0x01, 0x5a, + 0x33, 0x23, 0xad, 0xc5, 0x29, 0xfc, 0xac, 0xc1, 0x0e, 0xea, 0x06, 0x07, 0xf8, 0xbc, 0xb5, 0xcf, + 0xce, 0xeb, 0xae, 0x81, 0xee, 0x45, 0xdd, 0x8b, 0xeb, 0x2b, 0xd5, 0x8b, 0x7f, 0x57, 0x81, 0x3a, + 0x2f, 0x21, 0x85, 0x86, 0xd3, 0xab, 0x49, 0x1d, 0xdc, 0x3f, 0x6e, 0x00, 0x77, 0xac, 0xfb, 0xab, + 0x8f, 0xf5, 0x72, 0x40, 0xb5, 0x8e, 0xcd, 0x06, 0x54, 0x37, 0xb4, 0x19, 0xd0, 0x79, 0xdf, 0xff, + 0x06, 0x54, 0xaa, 0x07, 0x41, 0x7f, 0x78, 0x16, 0xa0, 0x87, 0xb1, 0x98, 0xb9, 0x84, 0x04, 0x04, + 0x29, 0x92, 0x78, 0x75, 0xd2, 0x70, 0xed, 0xaf, 0x7b, 0xf3, 0xdd, 0x9e, 0x33, 0x8a, 0x94, 0x65, + 0x09, 0x11, 0x79, 0x42, 0x13, 0xc8, 0xcc, 0xb0, 0x68, 0x59, 0x62, 0x92, 0xab, 0xd5, 0x97, 0xf3, + 0x0b, 0xfb, 0x71, 0x7c, 0xc5, 0xe8, 0x77, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x9d, 0xfe, + 0x1c, 0x89, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go new file mode 100644 index 000000000..b25493668 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go @@ -0,0 +1,888 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/tracing/trace.proto + +/* +Package tracing is a generated protocol buffer package. + +It is generated from these files: + google/tracing/trace.proto + +It has these top-level messages: + TraceId + Module + StackTrace + LabelValue + Span + Trace +*/ +package tracing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of the network event. SENT or RECV event. +type Span_TimeEvent_NetworkEvent_Type int32 + +const ( + Span_TimeEvent_NetworkEvent_UNSPECIFIED Span_TimeEvent_NetworkEvent_Type = 0 + Span_TimeEvent_NetworkEvent_SENT Span_TimeEvent_NetworkEvent_Type = 1 + Span_TimeEvent_NetworkEvent_RECV Span_TimeEvent_NetworkEvent_Type = 2 +) + +var Span_TimeEvent_NetworkEvent_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SENT", + 2: "RECV", +} +var Span_TimeEvent_NetworkEvent_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "SENT": 1, + "RECV": 2, +} + +func (x Span_TimeEvent_NetworkEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_NetworkEvent_Type_name, int32(x)) +} +func (Span_TimeEvent_NetworkEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0, 1, 0} +} + +// The type of the link. +type Span_Link_Type int32 + +const ( + Span_Link_UNSPECIFIED Span_Link_Type = 0 + Span_Link_CHILD Span_Link_Type = 1 + Span_Link_PARENT Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CHILD", + 2: "PARENT", +} +var Span_Link_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "CHILD": 1, + "PARENT": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 1, 0} } + +// A TraceId uniquely identifies a Trace. It is conceptually a 128-bit value, +// represented as a string, containing the hex-encoded value. +type TraceId struct { + // Trace ID specified as a hex-encoded string. *Must* be 32 bytes long. + HexEncoded string `protobuf:"bytes,1,opt,name=hex_encoded,json=hexEncoded" json:"hex_encoded,omitempty"` +} + +func (m *TraceId) Reset() { *m = TraceId{} } +func (m *TraceId) String() string { return proto.CompactTextString(m) } +func (*TraceId) ProtoMessage() {} +func (*TraceId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TraceId) GetHexEncoded() string { + if m != nil { + return m.HexEncoded + } + return "" +} + +type Module struct { + // Binary module. + // E.g. main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so + Module string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + // Build_id is a unique identifier for the module, + // probably a hash of its contents + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId" json:"build_id,omitempty"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Module) GetModule() string { + if m != nil { + return m.Module + } + return "" +} + +func (m *Module) GetBuildId() string { + if m != nil { + return m.BuildId + } + return "" +} + +type StackTrace struct { + // Stack frames of this stack trace. + StackFrame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=stack_frame,json=stackFrame" json:"stack_frame,omitempty"` + // User can choose to use his own hash function to hash large labels to save + // network bandwidth and storage. + // Typical usage is to pass both initially to inform the storage of the + // mapping. And in subsequent calls, pass in stack_trace_hash_id only. + // User shall verify the hash value is successfully stored. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId" json:"stack_trace_hash_id,omitempty"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *StackTrace) GetStackFrame() []*StackTrace_StackFrame { + if m != nil { + return m.StackFrame + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// Presents a single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // Fully qualified names which uniquely identify function/method/etc. + FunctionName string `protobuf:"bytes,1,opt,name=function_name,json=functionName" json:"function_name,omitempty"` + // Used when function name is ‘mangled’. Not guaranteed to be fully + // qualified but usually it is. + OrigFunctionName string `protobuf:"bytes,2,opt,name=orig_function_name,json=origFunctionName" json:"orig_function_name,omitempty"` + // File name of the frame. + FileName string `protobuf:"bytes,3,opt,name=file_name,json=fileName" json:"file_name,omitempty"` + // Line number of the frame. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber" json:"line_number,omitempty"` + // Column number is important in JavaScript(anonymous functions), + // Might not be available in some languages. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber" json:"column_number,omitempty"` + // Binary module the code is loaded from. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule" json:"load_module,omitempty"` + // source_version is deployment specific. It might be + // better to be stored in deployment metadata. + // However, in distributed tracing, it’s hard to keep track of + // source/binary versions at one place for all spans. + SourceVersion string `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion" json:"source_version,omitempty"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *StackTrace_StackFrame) GetFunctionName() string { + if m != nil { + return m.FunctionName + } + return "" +} + +func (m *StackTrace_StackFrame) GetOrigFunctionName() string { + if m != nil { + return m.OrigFunctionName + } + return "" +} + +func (m *StackTrace_StackFrame) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() string { + if m != nil { + return m.SourceVersion + } + return "" +} + +// Allowed label values. +type LabelValue struct { + // The value of the label. + // + // Types that are valid to be assigned to Value: + // *LabelValue_StringValue + // *LabelValue_IntValue + // *LabelValue_BoolValue + Value isLabelValue_Value `protobuf_oneof:"value"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type isLabelValue_Value interface { + isLabelValue_Value() +} + +type LabelValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,oneof"` +} +type LabelValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,oneof"` +} +type LabelValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,oneof"` +} + +func (*LabelValue_StringValue) isLabelValue_Value() {} +func (*LabelValue_IntValue) isLabelValue_Value() {} +func (*LabelValue_BoolValue) isLabelValue_Value() {} + +func (m *LabelValue) GetValue() isLabelValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *LabelValue) GetStringValue() string { + if x, ok := m.GetValue().(*LabelValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *LabelValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*LabelValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *LabelValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*LabelValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LabelValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LabelValue_OneofMarshaler, _LabelValue_OneofUnmarshaler, _LabelValue_OneofSizer, []interface{}{ + (*LabelValue_StringValue)(nil), + (*LabelValue_IntValue)(nil), + (*LabelValue_BoolValue)(nil), + } +} + +func _LabelValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LabelValue) + // value + switch x := m.Value.(type) { + case *LabelValue_StringValue: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *LabelValue_IntValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntValue)) + case *LabelValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("LabelValue.Value has unexpected type %T", x) + } + return nil +} + +func _LabelValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LabelValue) + switch tag { + case 1: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &LabelValue_StringValue{x} + return true, err + case 2: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &LabelValue_IntValue{int64(x)} + return true, err + case 3: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &LabelValue_BoolValue{x != 0} + return true, err + default: + return false, nil + } +} + +func _LabelValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LabelValue) + // value + switch x := m.Value.(type) { + case *LabelValue_StringValue: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *LabelValue_IntValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntValue)) + case *LabelValue_BoolValue: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A span represents a single operation within a trace. Spans can be nested +// and form a trace tree. Often, a trace contains a root span that describes the +// end-to-end latency and, optionally, one or more subspans for +// its sub-operations. Spans do not need to be contiguous. There may be gaps +// between spans in a trace. +type Span struct { + // Identifier for the span. Must be a 64-bit integer other than 0 and + // unique within a trace. + Id uint64 `protobuf:"fixed64,1,opt,name=id" json:"id,omitempty"` + // Name of the span. The span name is sanitized and displayed in the + // Stackdriver Trace tool in the {% dynamic print site_values.console_name %}. + // The name may be a method name or some other per-call site name. + // For the same executable and the same call point, a best practice is + // to use a consistent name, which makes it easier to correlate + // cross-trace spans. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // ID of parent span. 0 or missing if this is a root span. + ParentId uint64 `protobuf:"fixed64,3,opt,name=parent_id,json=parentId" json:"parent_id,omitempty"` + // Local machine clock in nanoseconds from the UNIX epoch, + // at which span execution started. + // On the server side these are the times when the server application + // handler starts running. + LocalStartTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=local_start_time,json=localStartTime" json:"local_start_time,omitempty"` + // Local machine clock in nanoseconds from the UNIX epoch, + // at which span execution ended. + // On the server side these are the times when the server application + // handler finishes running. + LocalEndTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=local_end_time,json=localEndTime" json:"local_end_time,omitempty"` + // Properties of a span. Labels at the span level. + // E.g. + // "/instance_id": "my-instance" + // "/zone": "us-central1-a" + // "/grpc/peer_address": "ip:port" (dns, etc.) + // "/grpc/deadline": "Duration" + // "/http/user_agent" + // "/http/request_bytes": 300 + // "/http/response_bytes": 1200 + // "/http/url": google.com/apis + // "/pid" + // "abc.com/mylabel": "my label value" + Labels map[string]*LabelValue `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Stack trace captured at the start of the span. This is optional. + StackTrace *StackTrace `protobuf:"bytes,7,opt,name=stack_trace,json=stackTrace" json:"stack_trace,omitempty"` + // A collection of time-stamped events. + TimeEvents []*Span_TimeEvent `protobuf:"bytes,8,rep,name=time_events,json=timeEvents" json:"time_events,omitempty"` + // A collection of links. + Links []*Span_Link `protobuf:"bytes,9,rep,name=links" json:"links,omitempty"` + // The final status of the Span. This is optional. + Status *google_rpc.Status `protobuf:"bytes,10,opt,name=status" json:"status,omitempty"` + // True if this Span has a remote parent (is an RPC server Span). + HasRemoteParent bool `protobuf:"varint,11,opt,name=has_remote_parent,json=hasRemoteParent" json:"has_remote_parent,omitempty"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Span) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetParentId() uint64 { + if m != nil { + return m.ParentId + } + return 0 +} + +func (m *Span) GetLocalStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LocalStartTime + } + return nil +} + +func (m *Span) GetLocalEndTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LocalEndTime + } + return nil +} + +func (m *Span) GetLabels() map[string]*LabelValue { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() []*Span_TimeEvent { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() []*Span_Link { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *google_rpc.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetHasRemoteParent() bool { + if m != nil { + return m.HasRemoteParent + } + return false +} + +// A time-stamped annotation in the Span. +type Span_TimeEvent struct { + // The local machine absolute timestamp when this event happened. + LocalTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=local_time,json=localTime" json:"local_time,omitempty"` + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_NetworkEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,oneof"` +} +type Span_TimeEvent_NetworkEvent_ struct { + NetworkEvent *Span_TimeEvent_NetworkEvent `protobuf:"bytes,3,opt,name=network_event,json=networkEvent,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} +func (*Span_TimeEvent_NetworkEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetLocalTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LocalTime + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetNetworkEvent() *Span_TimeEvent_NetworkEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_NetworkEvent_); ok { + return x.NetworkEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_NetworkEvent_)(nil), + } +} + +func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Annotation); err != nil { + return err + } + case *Span_TimeEvent_NetworkEvent_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NetworkEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) + } + return nil +} + +func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Span_TimeEvent) + switch tag { + case 2: // value.annotation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_Annotation) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_Annotation_{msg} + return true, err + case 3: // value.network_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_NetworkEvent) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_NetworkEvent_{msg} + return true, err + default: + return false, nil + } +} + +func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + s := proto.Size(x.Annotation) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Span_TimeEvent_NetworkEvent_: + s := proto.Size(x.NetworkEvent) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Text annotation with a set of labels. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // A set of labels on the annotation. + Labels map[string]*LabelValue `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0, 0} } + +func (m *Span_TimeEvent_Annotation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Span_TimeEvent_Annotation) GetLabels() map[string]*LabelValue { + if m != nil { + return m.Labels + } + return nil +} + +// An event describing an RPC message sent/received on the network. +type Span_TimeEvent_NetworkEvent struct { + // If available, this is the kernel time: + // For sent messages, this is the time at which the first bit was sent. + // For received messages, this is the time at which the last bit was + // received. + KernelTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=kernel_time,json=kernelTime" json:"kernel_time,omitempty"` + Type Span_TimeEvent_NetworkEvent_Type `protobuf:"varint,2,opt,name=type,enum=google.tracing.v1.Span_TimeEvent_NetworkEvent_Type" json:"type,omitempty"` + // Every message has an identifier, that must be different from all the + // network messages in this span. + // This is very important when the request/response are streamed. + MessageId uint64 `protobuf:"varint,3,opt,name=message_id,json=messageId" json:"message_id,omitempty"` + // Number of bytes send/receive. + MessageSize uint64 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"` +} + +func (m *Span_TimeEvent_NetworkEvent) Reset() { *m = Span_TimeEvent_NetworkEvent{} } +func (m *Span_TimeEvent_NetworkEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_NetworkEvent) ProtoMessage() {} +func (*Span_TimeEvent_NetworkEvent) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0, 1} +} + +func (m *Span_TimeEvent_NetworkEvent) GetKernelTime() *google_protobuf1.Timestamp { + if m != nil { + return m.KernelTime + } + return nil +} + +func (m *Span_TimeEvent_NetworkEvent) GetType() Span_TimeEvent_NetworkEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_NetworkEvent_UNSPECIFIED +} + +func (m *Span_TimeEvent_NetworkEvent) GetMessageId() uint64 { + if m != nil { + return m.MessageId + } + return 0 +} + +func (m *Span_TimeEvent_NetworkEvent) GetMessageSize() uint64 { + if m != nil { + return m.MessageSize + } + return 0 +} + +// Link one span with another which may be in a different Trace. Used (for +// example) in batching operations, where a single batch handler processes +// multiple requests from different traces. +type Span_Link struct { + // The trace and span identifier of the linked span. + TraceId *TraceId `protobuf:"bytes,1,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` + SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId" json:"span_id,omitempty"` + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,enum=google.tracing.v1.Span_Link_Type" json:"type,omitempty"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 1} } + +func (m *Span_Link) GetTraceId() *TraceId { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() uint64 { + if m != nil { + return m.SpanId + } + return 0 +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_UNSPECIFIED +} + +// A trace describes how long it takes for an application to perform some +// operations. It consists of a tree of spans, each of which contains details +// about an operation with time information and operation details. +type Trace struct { + // Globally unique identifier for the trace. Common to all the spans. + TraceId *TraceId `protobuf:"bytes,1,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` + // Collection of spans in the trace. The root span has parent_id == 0. + Spans []*Span `protobuf:"bytes,2,rep,name=spans" json:"spans,omitempty"` +} + +func (m *Trace) Reset() { *m = Trace{} } +func (m *Trace) String() string { return proto.CompactTextString(m) } +func (*Trace) ProtoMessage() {} +func (*Trace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Trace) GetTraceId() *TraceId { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Trace) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +func init() { + proto.RegisterType((*TraceId)(nil), "google.tracing.v1.TraceId") + proto.RegisterType((*Module)(nil), "google.tracing.v1.Module") + proto.RegisterType((*StackTrace)(nil), "google.tracing.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "google.tracing.v1.StackTrace.StackFrame") + proto.RegisterType((*LabelValue)(nil), "google.tracing.v1.LabelValue") + proto.RegisterType((*Span)(nil), "google.tracing.v1.Span") + proto.RegisterType((*Span_TimeEvent)(nil), "google.tracing.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.tracing.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_NetworkEvent)(nil), "google.tracing.v1.Span.TimeEvent.NetworkEvent") + proto.RegisterType((*Span_Link)(nil), "google.tracing.v1.Span.Link") + proto.RegisterType((*Trace)(nil), "google.tracing.v1.Trace") + proto.RegisterEnum("google.tracing.v1.Span_TimeEvent_NetworkEvent_Type", Span_TimeEvent_NetworkEvent_Type_name, Span_TimeEvent_NetworkEvent_Type_value) + proto.RegisterEnum("google.tracing.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) +} + +func init() { proto.RegisterFile("google/tracing/trace.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1102 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1a, 0x47, + 0x14, 0x66, 0xf9, 0x59, 0xe0, 0x2c, 0x76, 0xc8, 0x54, 0xad, 0x09, 0x8d, 0x65, 0x9b, 0xa8, 0x92, + 0xe5, 0x26, 0x8b, 0x82, 0x15, 0xc9, 0x8d, 0xa5, 0xaa, 0xb1, 0x8d, 0x0b, 0x52, 0x8a, 0xd0, 0xe0, + 0x58, 0x55, 0x6f, 0x56, 0xc3, 0xee, 0x18, 0x56, 0x2c, 0xb3, 0xab, 0x9d, 0x81, 0xc6, 0xbe, 0xed, + 0x1b, 0xf4, 0x1d, 0x7a, 0xdb, 0x37, 0xe8, 0x83, 0xb4, 0x4f, 0x53, 0xcd, 0xcf, 0x62, 0xaa, 0xd8, + 0x71, 0x53, 0xa9, 0x57, 0xcc, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x7c, 0xe7, 0x9c, 0x59, 0xa0, 0x39, + 0x89, 0xe3, 0x49, 0x44, 0xdb, 0x22, 0x25, 0x7e, 0xc8, 0x26, 0xea, 0x97, 0xba, 0x49, 0x1a, 0x8b, + 0x18, 0x3d, 0xd6, 0x36, 0xd7, 0xd8, 0xdc, 0xe5, 0xcb, 0xe6, 0x53, 0x43, 0x27, 0x49, 0xd8, 0x26, + 0x8c, 0xc5, 0x82, 0x88, 0x30, 0x66, 0x5c, 0x3b, 0x34, 0x77, 0x8c, 0x55, 0xed, 0xc6, 0x8b, 0xab, + 0xb6, 0x08, 0xe7, 0x94, 0x0b, 0x32, 0x4f, 0x0c, 0x61, 0xcb, 0x10, 0xd2, 0xc4, 0x6f, 0x73, 0x41, + 0xc4, 0xc2, 0x78, 0xb6, 0x0e, 0xa0, 0x7c, 0x21, 0x4f, 0xee, 0x07, 0x68, 0x07, 0x9c, 0x29, 0x7d, + 0xef, 0x51, 0xe6, 0xc7, 0x01, 0x0d, 0x1a, 0xd6, 0xae, 0xb5, 0x5f, 0xc5, 0x30, 0xa5, 0xef, 0xbb, + 0x1a, 0x69, 0x1d, 0x83, 0xfd, 0x43, 0x1c, 0x2c, 0x22, 0x8a, 0xbe, 0x00, 0x7b, 0xae, 0x56, 0x86, + 0x65, 0x76, 0xe8, 0x09, 0x54, 0xc6, 0x8b, 0x30, 0x0a, 0xbc, 0x30, 0x68, 0xe4, 0x95, 0xa5, 0xac, + 0xf6, 0xfd, 0xa0, 0xf5, 0x7b, 0x01, 0x60, 0x24, 0x88, 0x3f, 0x53, 0xc7, 0xa1, 0x3e, 0x38, 0x5c, + 0xee, 0xbc, 0xab, 0x94, 0xcc, 0x65, 0x98, 0xc2, 0xbe, 0xd3, 0xd9, 0x77, 0x3f, 0xb8, 0xb8, 0x7b, + 0xeb, 0xa3, 0x97, 0xe7, 0x92, 0x8f, 0x81, 0xaf, 0xd6, 0xe8, 0x05, 0x7c, 0xa6, 0x43, 0x29, 0x09, + 0xbd, 0x29, 0xe1, 0xd3, 0xec, 0xfc, 0x22, 0xae, 0xf3, 0x95, 0x7f, 0x8f, 0xf0, 0x69, 0x3f, 0x68, + 0xfe, 0x96, 0x37, 0x89, 0x68, 0xef, 0x67, 0xb0, 0x71, 0xb5, 0x60, 0xbe, 0x54, 0xd3, 0x63, 0x3a, + 0x15, 0x99, 0x77, 0x2d, 0x03, 0x07, 0x92, 0xf4, 0x1c, 0x50, 0x9c, 0x86, 0x13, 0xef, 0x9f, 0x4c, + 0x7d, 0xc3, 0xba, 0xb4, 0x9c, 0xaf, 0xb3, 0xbf, 0x84, 0xea, 0x55, 0x18, 0x51, 0x4d, 0x2a, 0x28, + 0x52, 0x45, 0x02, 0xca, 0xb8, 0x03, 0x4e, 0x14, 0x32, 0xea, 0xb1, 0xc5, 0x7c, 0x4c, 0xd3, 0x46, + 0x71, 0xd7, 0xda, 0x2f, 0x60, 0x90, 0xd0, 0x40, 0x21, 0x32, 0x21, 0x3f, 0x8e, 0x16, 0x73, 0x96, + 0x51, 0x4a, 0x8a, 0x52, 0xd3, 0xa0, 0x21, 0xbd, 0x06, 0x27, 0x8a, 0x49, 0xe0, 0x99, 0x2a, 0xd8, + 0xbb, 0xd6, 0xbe, 0xd3, 0x79, 0x72, 0x87, 0x7c, 0xba, 0x60, 0x18, 0x24, 0xdb, 0x14, 0xef, 0x2b, + 0xd8, 0xe4, 0xf1, 0x22, 0xf5, 0xa9, 0xb7, 0xa4, 0x29, 0x0f, 0x63, 0xd6, 0x28, 0xab, 0x1c, 0x37, + 0x34, 0x7a, 0xa9, 0xc1, 0xd6, 0x0d, 0xc0, 0x5b, 0x32, 0xa6, 0xd1, 0x25, 0x89, 0x16, 0x52, 0xa6, + 0x1a, 0x17, 0x69, 0xc8, 0x26, 0xde, 0x52, 0xee, 0xb5, 0x4a, 0xbd, 0x1c, 0x76, 0x34, 0xaa, 0x49, + 0xdb, 0x50, 0x0d, 0x99, 0x30, 0x0c, 0xa9, 0x4e, 0xa1, 0x97, 0xc3, 0x95, 0x90, 0x09, 0x6d, 0xde, + 0x01, 0x18, 0xc7, 0x71, 0x64, 0xec, 0x52, 0x98, 0x4a, 0x2f, 0x87, 0xab, 0x12, 0x53, 0x84, 0x93, + 0x32, 0x94, 0x94, 0xad, 0xf5, 0x6b, 0x0d, 0x8a, 0xa3, 0x84, 0x30, 0xb4, 0x09, 0xf9, 0x50, 0xb7, + 0xa2, 0x8d, 0xf3, 0x61, 0x80, 0x10, 0x14, 0xd7, 0xa4, 0x57, 0x6b, 0x29, 0x77, 0x42, 0x52, 0xca, + 0x84, 0xac, 0x7a, 0x41, 0x51, 0x2b, 0x1a, 0xe8, 0x07, 0xe8, 0x0c, 0xea, 0x51, 0xec, 0x93, 0xc8, + 0xe3, 0x82, 0xa4, 0xc2, 0x93, 0x73, 0xa1, 0x34, 0x77, 0x3a, 0xcd, 0x4c, 0xad, 0x6c, 0x68, 0xdc, + 0x8b, 0x6c, 0x68, 0xf0, 0xa6, 0xf2, 0x19, 0x49, 0x17, 0x09, 0xa2, 0xef, 0x40, 0x23, 0x1e, 0x65, + 0x81, 0x8e, 0x51, 0x7a, 0x30, 0x46, 0x4d, 0x79, 0x74, 0x59, 0xa0, 0x22, 0x1c, 0x83, 0x1d, 0x49, + 0x35, 0x79, 0xc3, 0x56, 0xad, 0xfe, 0xec, 0xae, 0x56, 0x4f, 0x08, 0x73, 0x95, 0xe6, 0xbc, 0xcb, + 0x44, 0x7a, 0x8d, 0x8d, 0x0b, 0xfa, 0x36, 0x1b, 0x16, 0xd5, 0xe1, 0xaa, 0x5c, 0x4e, 0x67, 0xfb, + 0xa3, 0xc3, 0x62, 0x26, 0x44, 0x0f, 0xdb, 0x09, 0x38, 0x32, 0x69, 0x8f, 0x2e, 0x29, 0x13, 0xbc, + 0x51, 0x51, 0x19, 0xec, 0xdd, 0x97, 0x81, 0xcc, 0xb7, 0x2b, 0x99, 0x18, 0x44, 0xb6, 0xe4, 0xa8, + 0x03, 0xa5, 0x28, 0x64, 0x33, 0xde, 0xa8, 0x2a, 0xef, 0xa7, 0xf7, 0xe6, 0x1f, 0xb2, 0x19, 0xd6, + 0x54, 0x74, 0x00, 0xb6, 0x7e, 0x6c, 0x1a, 0xa0, 0x52, 0x46, 0x99, 0x53, 0x9a, 0xf8, 0x32, 0x57, + 0xb1, 0xe0, 0xd8, 0x30, 0xd0, 0x01, 0x3c, 0x9e, 0x12, 0xee, 0xa5, 0x74, 0x1e, 0x0b, 0xea, 0xe9, + 0xfa, 0x35, 0x1c, 0xd9, 0x23, 0xf8, 0xd1, 0x94, 0x70, 0xac, 0xf0, 0xa1, 0x82, 0x9b, 0x7f, 0x96, + 0xa0, 0xba, 0xca, 0x12, 0x7d, 0x03, 0xa0, 0x8b, 0xa3, 0x0a, 0x63, 0x3d, 0x58, 0x98, 0xaa, 0x62, + 0xab, 0xaa, 0x0c, 0x00, 0x6e, 0x1f, 0x53, 0xd5, 0x54, 0x4e, 0xe7, 0xf9, 0x83, 0xba, 0xb8, 0x6f, + 0x56, 0x3e, 0xbd, 0x1c, 0x5e, 0x8b, 0x80, 0xde, 0xc1, 0x06, 0xa3, 0xe2, 0xe7, 0x38, 0x9d, 0x69, + 0xad, 0x55, 0x3b, 0x3a, 0x1d, 0xf7, 0xe1, 0x90, 0x03, 0xed, 0xa6, 0x36, 0xbd, 0x1c, 0xae, 0xb1, + 0xb5, 0x7d, 0xf3, 0x2f, 0x0b, 0xe0, 0xf6, 0x4c, 0xb4, 0x0b, 0x4e, 0x40, 0xb9, 0x9f, 0x86, 0x89, + 0x4a, 0x5b, 0x3f, 0x58, 0xeb, 0x10, 0x1a, 0xae, 0xba, 0x2d, 0xaf, 0xaa, 0x75, 0xf4, 0x29, 0x77, + 0xba, 0xab, 0x05, 0x9b, 0x3f, 0x82, 0xb3, 0x06, 0xa3, 0x3a, 0x14, 0x66, 0xf4, 0xda, 0x1c, 0x2d, + 0x97, 0xe8, 0xd0, 0xcc, 0xae, 0x51, 0xf1, 0xae, 0xee, 0xbc, 0x7d, 0x4e, 0xb0, 0xe6, 0xbe, 0xce, + 0x1f, 0x59, 0xcd, 0x5f, 0xf2, 0x50, 0x5b, 0xbf, 0x3d, 0x3a, 0x06, 0x67, 0x46, 0x53, 0x46, 0xff, + 0x75, 0x41, 0x41, 0xd3, 0x55, 0x45, 0xbf, 0x87, 0xa2, 0xb8, 0x4e, 0x74, 0x16, 0x9b, 0x9d, 0xc3, + 0x4f, 0x13, 0xde, 0xbd, 0xb8, 0x4e, 0x28, 0x56, 0x01, 0xd0, 0x36, 0xc0, 0x9c, 0x72, 0x4e, 0x26, + 0x34, 0x7b, 0x56, 0x8a, 0xb8, 0x6a, 0x90, 0x7e, 0x80, 0xf6, 0xa0, 0x96, 0x99, 0x79, 0x78, 0xa3, + 0xdf, 0x94, 0x22, 0x76, 0x0c, 0x36, 0x0a, 0x6f, 0x68, 0xeb, 0x6b, 0x28, 0xca, 0x78, 0xe8, 0x11, + 0x38, 0xef, 0x06, 0xa3, 0x61, 0xf7, 0xb4, 0x7f, 0xde, 0xef, 0x9e, 0xd5, 0x73, 0xa8, 0x02, 0xc5, + 0x51, 0x77, 0x70, 0x51, 0xb7, 0xe4, 0x0a, 0x77, 0x4f, 0x2f, 0xeb, 0xf9, 0xd5, 0xd3, 0xd7, 0xfc, + 0xc3, 0x82, 0xa2, 0x9c, 0x21, 0xf4, 0x0a, 0x2a, 0xfa, 0x83, 0x66, 0x1e, 0xc0, 0x35, 0x0d, 0xd6, + 0x6e, 0x63, 0x3e, 0xde, 0xb8, 0x2c, 0xcc, 0x57, 0x7c, 0x0b, 0xca, 0x3c, 0x21, 0x2c, 0xfb, 0x02, + 0xda, 0xd8, 0x96, 0xdb, 0x7e, 0x80, 0x5e, 0x19, 0x65, 0x0a, 0x4a, 0x99, 0xbd, 0x8f, 0xcd, 0xef, + 0x9a, 0x0e, 0x2d, 0xf7, 0xbe, 0x5b, 0x54, 0xa1, 0x74, 0xda, 0xeb, 0xbf, 0x3d, 0xab, 0x5b, 0x08, + 0xc0, 0x1e, 0xbe, 0xc1, 0xf2, 0x4a, 0xf9, 0xff, 0xaf, 0x51, 0x5a, 0x73, 0x28, 0xe9, 0xe7, 0xec, + 0x3f, 0x2a, 0xf3, 0x02, 0x4a, 0x52, 0x8a, 0x6c, 0x26, 0xb6, 0xee, 0x51, 0x00, 0x6b, 0xd6, 0x89, + 0x07, 0x9f, 0xfb, 0xf1, 0xfc, 0x43, 0xd2, 0x09, 0xa8, 0xc8, 0x43, 0xd9, 0x87, 0x43, 0xeb, 0xa7, + 0x23, 0x43, 0x98, 0xc4, 0x11, 0x61, 0x13, 0x37, 0x4e, 0x27, 0xed, 0x09, 0x65, 0xaa, 0x4b, 0xdb, + 0xda, 0x44, 0x92, 0x90, 0xaf, 0xfe, 0xe6, 0x2d, 0x5f, 0x1e, 0x9b, 0xe5, 0xd8, 0x56, 0xa4, 0xc3, + 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x30, 0x5b, 0x04, 0x0a, 0x0a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go b/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go new file mode 100644 index 000000000..4f201b88a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/color.proto + +/* +Package color is a generated protocol buffer package. + +It is generated from these files: + google/type/color.proto + +It has these top-level messages: + Color +*/ +package color + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents a color in the RGBA color space. This representation is designed +// for simplicity of conversion to/from color representations in various +// languages over compactness; for example, the fields of this representation +// can be trivially provided to the constructor of "java.awt.Color" in Java; it +// can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha" +// method in iOS; and, with just a little work, it can be easily formatted into +// a CSS "rgba()" string in JavaScript, as well. Here are some examples: +// +// Example (Java): +// +// import com.google.type.Color; +// +// // ... +// public static java.awt.Color fromProto(Color protocolor) { +// float alpha = protocolor.hasAlpha() +// ? protocolor.getAlpha().getValue() +// : 1.0; +// +// return new java.awt.Color( +// protocolor.getRed(), +// protocolor.getGreen(), +// protocolor.getBlue(), +// alpha); +// } +// +// public static Color toProto(java.awt.Color color) { +// float red = (float) color.getRed(); +// float green = (float) color.getGreen(); +// float blue = (float) color.getBlue(); +// float denominator = 255.0; +// Color.Builder resultBuilder = +// Color +// .newBuilder() +// .setRed(red / denominator) +// .setGreen(green / denominator) +// .setBlue(blue / denominator); +// int alpha = color.getAlpha(); +// if (alpha != 255) { +// result.setAlpha( +// FloatValue +// .newBuilder() +// .setValue(((float) alpha) / denominator) +// .build()); +// } +// return resultBuilder.build(); +// } +// // ... +// +// Example (iOS / Obj-C): +// +// // ... +// static UIColor* fromProto(Color* protocolor) { +// float red = [protocolor red]; +// float green = [protocolor green]; +// float blue = [protocolor blue]; +// FloatValue* alpha_wrapper = [protocolor alpha]; +// float alpha = 1.0; +// if (alpha_wrapper != nil) { +// alpha = [alpha_wrapper value]; +// } +// return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; +// } +// +// static Color* toProto(UIColor* color) { +// CGFloat red, green, blue, alpha; +// if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { +// return nil; +// } +// Color* result = [Color alloc] init]; +// [result setRed:red]; +// [result setGreen:green]; +// [result setBlue:blue]; +// if (alpha <= 0.9999) { +// [result setAlpha:floatWrapperWithValue(alpha)]; +// } +// [result autorelease]; +// return result; +// } +// // ... +// +// Example (JavaScript): +// +// // ... +// +// var protoToCssColor = function(rgb_color) { +// var redFrac = rgb_color.red || 0.0; +// var greenFrac = rgb_color.green || 0.0; +// var blueFrac = rgb_color.blue || 0.0; +// var red = Math.floor(redFrac * 255); +// var green = Math.floor(greenFrac * 255); +// var blue = Math.floor(blueFrac * 255); +// +// if (!('alpha' in rgb_color)) { +// return rgbToCssColor_(red, green, blue); +// } +// +// var alphaFrac = rgb_color.alpha.value || 0.0; +// var rgbParams = [red, green, blue].join(','); +// return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); +// }; +// +// var rgbToCssColor_ = function(red, green, blue) { +// var rgbNumber = new Number((red << 16) | (green << 8) | blue); +// var hexString = rgbNumber.toString(16); +// var missingZeros = 6 - hexString.length; +// var resultBuilder = ['#']; +// for (var i = 0; i < missingZeros; i++) { +// resultBuilder.push('0'); +// } +// resultBuilder.push(hexString); +// return resultBuilder.join(''); +// }; +// +// // ... +type Color struct { + // The amount of red in the color as a value in the interval [0, 1]. + Red float32 `protobuf:"fixed32,1,opt,name=red" json:"red,omitempty"` + // The amount of green in the color as a value in the interval [0, 1]. + Green float32 `protobuf:"fixed32,2,opt,name=green" json:"green,omitempty"` + // The amount of blue in the color as a value in the interval [0, 1]. + Blue float32 `protobuf:"fixed32,3,opt,name=blue" json:"blue,omitempty"` + // The fraction of this color that should be applied to the pixel. That is, + // the final pixel color is defined by the equation: + // + // pixel color = alpha * (this color) + (1.0 - alpha) * (background color) + // + // This means that a value of 1.0 corresponds to a solid color, whereas + // a value of 0.0 corresponds to a completely transparent color. This + // uses a wrapper message rather than a simple float scalar so that it is + // possible to distinguish between a default value and the value being unset. + // If omitted, this color object is to be rendered as a solid color + // (as if the alpha value had been explicitly given with a value of 1.0). + Alpha *google_protobuf.FloatValue `protobuf:"bytes,4,opt,name=alpha" json:"alpha,omitempty"` +} + +func (m *Color) Reset() { *m = Color{} } +func (m *Color) String() string { return proto.CompactTextString(m) } +func (*Color) ProtoMessage() {} +func (*Color) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Color) GetRed() float32 { + if m != nil { + return m.Red + } + return 0 +} + +func (m *Color) GetGreen() float32 { + if m != nil { + return m.Green + } + return 0 +} + +func (m *Color) GetBlue() float32 { + if m != nil { + return m.Blue + } + return 0 +} + +func (m *Color) GetAlpha() *google_protobuf.FloatValue { + if m != nil { + return m.Alpha + } + return nil +} + +func init() { + proto.RegisterType((*Color)(nil), "google.type.Color") +} + +func init() { proto.RegisterFile("google/type/color.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 224 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0xc5, 0x30, + 0x10, 0xc7, 0x49, 0xfb, 0xea, 0x70, 0x6f, 0x50, 0x82, 0x60, 0x50, 0x90, 0x87, 0xd3, 0x9b, 0x12, + 0x54, 0x70, 0x71, 0xab, 0xa0, 0x6b, 0x29, 0xe2, 0x20, 0x38, 0xa4, 0xf5, 0x8c, 0x42, 0xec, 0x85, + 0xb4, 0x55, 0xfc, 0x3a, 0x7e, 0x52, 0xc9, 0xa5, 0x42, 0x97, 0x70, 0xb9, 0xdf, 0xef, 0x92, 0xff, + 0xc1, 0x89, 0x23, 0x72, 0x1e, 0xcd, 0xf4, 0x13, 0xd0, 0xf4, 0xe4, 0x29, 0xea, 0x10, 0x69, 0x22, + 0xb9, 0xcd, 0x40, 0x27, 0x70, 0x7a, 0xbe, 0x58, 0x8c, 0xba, 0xf9, 0xcd, 0x7c, 0x47, 0x1b, 0x02, + 0xc6, 0x31, 0xcb, 0x17, 0x5f, 0x50, 0xdd, 0xa5, 0x59, 0x79, 0x04, 0x65, 0xc4, 0x57, 0x25, 0x76, + 0x62, 0x5f, 0xb4, 0xa9, 0x94, 0xc7, 0x50, 0xb9, 0x88, 0x38, 0xa8, 0x82, 0x7b, 0xf9, 0x22, 0x25, + 0x6c, 0x3a, 0x3f, 0xa3, 0x2a, 0xb9, 0xc9, 0xb5, 0xbc, 0x84, 0xca, 0xfa, 0xf0, 0x6e, 0xd5, 0x66, + 0x27, 0xf6, 0xdb, 0xab, 0x33, 0xbd, 0x24, 0xf8, 0xff, 0x54, 0xdf, 0x7b, 0xb2, 0xd3, 0x93, 0xf5, + 0x33, 0xb6, 0xd9, 0xac, 0x5f, 0xe0, 0xb0, 0xa7, 0x4f, 0xbd, 0x8a, 0x5a, 0x03, 0x07, 0x69, 0xd2, + 0x4c, 0x23, 0x9e, 0x6f, 0x16, 0xe4, 0xc8, 0xdb, 0xc1, 0x69, 0x8a, 0xce, 0x38, 0x1c, 0xf8, 0x45, + 0x93, 0x91, 0x0d, 0x1f, 0xe3, 0x6a, 0xfb, 0x5b, 0x3e, 0x7f, 0x8b, 0xf2, 0xe1, 0xb1, 0xe9, 0x0e, + 0x58, 0xbc, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x42, 0x23, 0x2a, 0xd0, 0x25, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go new file mode 100644 index 000000000..40beaab09 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/date.proto + +/* +Package date is a generated protocol buffer package. + +It is generated from these files: + google/type/date.proto + +It has these top-level messages: + Date +*/ +package date + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents a whole calendar date, e.g. date of birth. The time of day and +// time zone are either specified elsewhere or are not significant. The date +// is relative to the Proleptic Gregorian Calendar. The day may be 0 to +// represent a year and month where the day is not significant, e.g. credit card +// expiration date. The year may be 0 to represent a month and day independent +// of year, e.g. anniversary date. Related types are [google.type.TimeOfDay][google.type.TimeOfDay] +// and `google.protobuf.Timestamp`. +type Date struct { + // Year of date. Must be from 1 to 9999, or 0 if specifying a date without + // a year. + Year int32 `protobuf:"varint,1,opt,name=year" json:"year,omitempty"` + // Month of year. Must be from 1 to 12. + Month int32 `protobuf:"varint,2,opt,name=month" json:"month,omitempty"` + // Day of month. Must be from 1 to 31 and valid for the year and month, or 0 + // if specifying a year/month where the day is not significant. + Day int32 `protobuf:"varint,3,opt,name=day" json:"day,omitempty"` +} + +func (m *Date) Reset() { *m = Date{} } +func (m *Date) String() string { return proto.CompactTextString(m) } +func (*Date) ProtoMessage() {} +func (*Date) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Date) GetYear() int32 { + if m != nil { + return m.Year + } + return 0 +} + +func (m *Date) GetMonth() int32 { + if m != nil { + return m.Month + } + return 0 +} + +func (m *Date) GetDay() int32 { + if m != nil { + return m.Day + } + return 0 +} + +func init() { + proto.RegisterType((*Date)(nil), "google.type.Date") +} + +func init() { proto.RegisterFile("google/type/date.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 172 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x4f, 0x49, 0x2c, 0x49, 0xd5, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x88, 0xeb, 0x81, 0xc4, 0x95, 0x9c, 0xb8, 0x58, 0x5c, 0x12, 0x4b, + 0x52, 0x85, 0x84, 0xb8, 0x58, 0x2a, 0x53, 0x13, 0x8b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x11, 0x2e, 0xd6, 0xdc, 0xfc, 0xbc, 0x92, 0x0c, 0x09, 0x26, 0xb0, 0x20, 0x84, + 0x23, 0x24, 0xc0, 0xc5, 0x9c, 0x92, 0x58, 0x29, 0xc1, 0x0c, 0x16, 0x03, 0x31, 0x9d, 0x62, 0xb9, + 0xf8, 0x93, 0xf3, 0x73, 0xf5, 0x90, 0x8c, 0x75, 0xe2, 0x04, 0x19, 0x1a, 0x00, 0xb2, 0x2e, 0x80, + 0x31, 0xca, 0x04, 0x2a, 0x93, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x97, 0x5f, 0x94, 0xae, 0x9f, + 0x9e, 0x9a, 0x07, 0x76, 0x8c, 0x3e, 0x44, 0x2a, 0xb1, 0x20, 0xb3, 0x18, 0xe1, 0x4e, 0x6b, 0x10, + 0xf1, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x48, 0x40, 0x12, 0x1b, 0x58, 0xa5, 0x31, 0x20, + 0x00, 0x00, 0xff, 0xff, 0x84, 0x95, 0xf3, 0x4c, 0xd0, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/dayofweek/dayofweek.pb.go b/vendor/google.golang.org/genproto/googleapis/type/dayofweek/dayofweek.pb.go new file mode 100644 index 000000000..eecd403fa --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/dayofweek/dayofweek.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/dayofweek.proto + +/* +Package dayofweek is a generated protocol buffer package. + +It is generated from these files: + google/type/dayofweek.proto + +It has these top-level messages: +*/ +package dayofweek + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents a day of week. +type DayOfWeek int32 + +const ( + // The unspecified day-of-week. + DayOfWeek_DAY_OF_WEEK_UNSPECIFIED DayOfWeek = 0 + // The day-of-week of Monday. + DayOfWeek_MONDAY DayOfWeek = 1 + // The day-of-week of Tuesday. + DayOfWeek_TUESDAY DayOfWeek = 2 + // The day-of-week of Wednesday. + DayOfWeek_WEDNESDAY DayOfWeek = 3 + // The day-of-week of Thursday. + DayOfWeek_THURSDAY DayOfWeek = 4 + // The day-of-week of Friday. + DayOfWeek_FRIDAY DayOfWeek = 5 + // The day-of-week of Saturday. + DayOfWeek_SATURDAY DayOfWeek = 6 + // The day-of-week of Sunday. + DayOfWeek_SUNDAY DayOfWeek = 7 +) + +var DayOfWeek_name = map[int32]string{ + 0: "DAY_OF_WEEK_UNSPECIFIED", + 1: "MONDAY", + 2: "TUESDAY", + 3: "WEDNESDAY", + 4: "THURSDAY", + 5: "FRIDAY", + 6: "SATURDAY", + 7: "SUNDAY", +} +var DayOfWeek_value = map[string]int32{ + "DAY_OF_WEEK_UNSPECIFIED": 0, + "MONDAY": 1, + "TUESDAY": 2, + "WEDNESDAY": 3, + "THURSDAY": 4, + "FRIDAY": 5, + "SATURDAY": 6, + "SUNDAY": 7, +} + +func (x DayOfWeek) String() string { + return proto.EnumName(DayOfWeek_name, int32(x)) +} +func (DayOfWeek) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func init() { + proto.RegisterEnum("google.type.DayOfWeek", DayOfWeek_name, DayOfWeek_value) +} + +func init() { proto.RegisterFile("google/type/dayofweek.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x4f, 0x49, 0xac, 0xcc, 0x4f, 0x2b, 0x4f, 0x4d, + 0xcd, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x48, 0xea, 0x81, 0x24, 0xb5, 0x5a, + 0x18, 0xb9, 0x38, 0x5d, 0x12, 0x2b, 0xfd, 0xd3, 0xc2, 0x53, 0x53, 0xb3, 0x85, 0xa4, 0xb9, 0xc4, + 0x5d, 0x1c, 0x23, 0xe3, 0xfd, 0xdd, 0xe2, 0xc3, 0x5d, 0x5d, 0xbd, 0xe3, 0x43, 0xfd, 0x82, 0x03, + 0x5c, 0x9d, 0x3d, 0xdd, 0x3c, 0x5d, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x7c, 0xfd, 0xfd, + 0x5c, 0x1c, 0x23, 0x05, 0x18, 0x85, 0xb8, 0xb9, 0xd8, 0x43, 0x42, 0x5d, 0x83, 0x41, 0x1c, 0x26, + 0x21, 0x5e, 0x2e, 0xce, 0x70, 0x57, 0x17, 0x3f, 0x08, 0x97, 0x59, 0x88, 0x87, 0x8b, 0x23, 0xc4, + 0x23, 0x34, 0x08, 0xcc, 0x63, 0x01, 0xe9, 0x72, 0x0b, 0xf2, 0x04, 0xb1, 0x59, 0x41, 0x32, 0xc1, + 0x8e, 0x21, 0xa1, 0x41, 0x20, 0x1e, 0x1b, 0x48, 0x26, 0x38, 0x14, 0x6c, 0x1e, 0xbb, 0x53, 0x26, + 0x17, 0x7f, 0x72, 0x7e, 0xae, 0x1e, 0x92, 0xcb, 0x9c, 0xf8, 0xe0, 0xce, 0x0a, 0x00, 0x39, 0x3b, + 0x80, 0x31, 0xca, 0x0e, 0x2a, 0x9d, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x97, 0x5f, 0x94, 0xae, + 0x9f, 0x9e, 0x9a, 0x07, 0xf6, 0x94, 0x3e, 0x44, 0x2a, 0xb1, 0x20, 0xb3, 0x18, 0xcd, 0xd3, 0xd6, + 0x70, 0xd6, 0x22, 0x26, 0x66, 0xf7, 0x90, 0x80, 0x24, 0x36, 0xb0, 0x06, 0x63, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x6e, 0x23, 0xb2, 0xb3, 0x24, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/latlng/latlng.pb.go b/vendor/google.golang.org/genproto/googleapis/type/latlng/latlng.pb.go new file mode 100644 index 000000000..9a845f9c6 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/latlng/latlng.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/latlng.proto + +/* +Package latlng is a generated protocol buffer package. + +It is generated from these files: + google/type/latlng.proto + +It has these top-level messages: + LatLng +*/ +package latlng + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An object representing a latitude/longitude pair. This is expressed as a pair +// of doubles representing degrees latitude and degrees longitude. Unless +// specified otherwise, this must conform to the +// WGS84 +// standard. Values must be within normalized ranges. +// +// Example of normalization code in Python: +// +// def NormalizeLongitude(longitude): +// """Wraps decimal degrees longitude to [-180.0, 180.0].""" +// q, r = divmod(longitude, 360.0) +// if r > 180.0 or (r == 180.0 and q <= -1.0): +// return r - 360.0 +// return r +// +// def NormalizeLatLng(latitude, longitude): +// """Wraps decimal degrees latitude and longitude to +// [-90.0, 90.0] and [-180.0, 180.0], respectively.""" +// r = latitude % 360.0 +// if r <= 90.0: +// return r, NormalizeLongitude(longitude) +// elif r >= 270.0: +// return r - 360, NormalizeLongitude(longitude) +// else: +// return 180 - r, NormalizeLongitude(longitude + 180.0) +// +// assert 180.0 == NormalizeLongitude(180.0) +// assert -180.0 == NormalizeLongitude(-180.0) +// assert -179.0 == NormalizeLongitude(181.0) +// assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) +// assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) +// assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) +// assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) +// assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) +// assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) +// assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) +// assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) +// assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) +// assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) +type LatLng struct { + // The latitude in degrees. It must be in the range [-90.0, +90.0]. + Latitude float64 `protobuf:"fixed64,1,opt,name=latitude" json:"latitude,omitempty"` + // The longitude in degrees. It must be in the range [-180.0, +180.0]. + Longitude float64 `protobuf:"fixed64,2,opt,name=longitude" json:"longitude,omitempty"` +} + +func (m *LatLng) Reset() { *m = LatLng{} } +func (m *LatLng) String() string { return proto.CompactTextString(m) } +func (*LatLng) ProtoMessage() {} +func (*LatLng) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *LatLng) GetLatitude() float64 { + if m != nil { + return m.Latitude + } + return 0 +} + +func (m *LatLng) GetLongitude() float64 { + if m != nil { + return m.Longitude + } + return 0 +} + +func init() { + proto.RegisterType((*LatLng)(nil), "google.type.LatLng") +} + +func init() { proto.RegisterFile("google/type/latlng.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0xcf, 0x49, 0x2c, 0xc9, 0xc9, 0x4b, 0xd7, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0xc8, 0xe8, 0x81, 0x64, 0x94, 0x9c, 0xb8, 0xd8, 0x7c, + 0x12, 0x4b, 0x7c, 0xf2, 0xd2, 0x85, 0xa4, 0xb8, 0x38, 0x72, 0x12, 0x4b, 0x32, 0x4b, 0x4a, 0x53, + 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x18, 0x83, 0xe0, 0x7c, 0x21, 0x19, 0x2e, 0xce, 0x9c, 0xfc, + 0xbc, 0x74, 0x88, 0x24, 0x13, 0x58, 0x12, 0x21, 0xe0, 0x94, 0xc0, 0xc5, 0x9f, 0x9c, 0x9f, 0xab, + 0x87, 0x64, 0xac, 0x13, 0x37, 0xc4, 0xd0, 0x00, 0x90, 0x85, 0x01, 0x8c, 0x51, 0x16, 0x50, 0xb9, + 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x73, + 0xf4, 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xc8, 0x6e, 0xb5, 0x86, 0x50, 0x8b, 0x98, 0x98, 0xdd, + 0x43, 0x02, 0x92, 0xd8, 0xc0, 0x4a, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x69, 0x12, 0x54, + 0x1c, 0xd5, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/money/money.pb.go b/vendor/google.golang.org/genproto/googleapis/type/money/money.pb.go new file mode 100644 index 000000000..34bf8ab66 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/money/money.pb.go @@ -0,0 +1,92 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/money.proto + +/* +Package money is a generated protocol buffer package. + +It is generated from these files: + google/type/money.proto + +It has these top-level messages: + Money +*/ +package money + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents an amount of money with its currency type. +type Money struct { + // The 3-letter currency code defined in ISO 4217. + CurrencyCode string `protobuf:"bytes,1,opt,name=currency_code,json=currencyCode" json:"currency_code,omitempty"` + // The whole units of the amount. + // For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. + Units int64 `protobuf:"varint,2,opt,name=units" json:"units,omitempty"` + // Number of nano (10^-9) units of the amount. + // The value must be between -999,999,999 and +999,999,999 inclusive. + // If `units` is positive, `nanos` must be positive or zero. + // If `units` is zero, `nanos` can be positive, zero, or negative. + // If `units` is negative, `nanos` must be negative or zero. + // For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. + Nanos int32 `protobuf:"varint,3,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Money) Reset() { *m = Money{} } +func (m *Money) String() string { return proto.CompactTextString(m) } +func (*Money) ProtoMessage() {} +func (*Money) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Money) GetCurrencyCode() string { + if m != nil { + return m.CurrencyCode + } + return "" +} + +func (m *Money) GetUnits() int64 { + if m != nil { + return m.Units + } + return 0 +} + +func (m *Money) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Money)(nil), "google.type.Money") +} + +func init() { proto.RegisterFile("google/type/money.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0xcf, 0xcd, 0xcf, 0x4b, 0xad, 0xd4, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x48, 0xe8, 0x81, 0x24, 0x94, 0x22, 0xb8, 0x58, 0x7d, 0x41, + 0x72, 0x42, 0xca, 0x5c, 0xbc, 0xc9, 0xa5, 0x45, 0x45, 0xa9, 0x79, 0xc9, 0x95, 0xf1, 0xc9, 0xf9, + 0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x3c, 0x30, 0x41, 0xe7, 0xfc, 0x94, 0x54, + 0x21, 0x11, 0x2e, 0xd6, 0xd2, 0xbc, 0xcc, 0x92, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xe6, 0x20, + 0x08, 0x07, 0x24, 0x9a, 0x97, 0x98, 0x97, 0x5f, 0x2c, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1a, 0x04, + 0xe1, 0x38, 0xc5, 0x72, 0xf1, 0x27, 0xe7, 0xe7, 0xea, 0x21, 0x59, 0xe6, 0xc4, 0x05, 0xb6, 0x2a, + 0x00, 0xe4, 0x8a, 0x00, 0xc6, 0x28, 0x33, 0xa8, 0x54, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x5e, + 0x7e, 0x51, 0xba, 0x7e, 0x7a, 0x6a, 0x1e, 0xd8, 0x8d, 0xfa, 0x10, 0xa9, 0xc4, 0x82, 0xcc, 0x62, + 0x24, 0xf7, 0x5b, 0x83, 0xc9, 0x45, 0x4c, 0xcc, 0xee, 0x21, 0x01, 0x49, 0x6c, 0x60, 0x85, 0xc6, + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xc2, 0x5b, 0xa5, 0xe7, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/postaladdress/postal_address.pb.go b/vendor/google.golang.org/genproto/googleapis/type/postaladdress/postal_address.pb.go new file mode 100644 index 000000000..430f3b450 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/postaladdress/postal_address.pb.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/postal_address.proto + +/* +Package postaladdress is a generated protocol buffer package. + +It is generated from these files: + google/type/postal_address.proto + +It has these top-level messages: + PostalAddress +*/ +package postaladdress + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents a postal address, e.g. for postal delivery or payments addresses. +// Given a postal address, a postal service can deliver items to a premise, P.O. +// Box or similar. +// It is not intended to model geographical locations (roads, towns, +// mountains). +// +// In typical usage an address would be created via user input or from importing +// existing data, depending on the type of process. +// +// Advice on address input / editing: +// - Use an i18n-ready address widget such as +// https://github.com/googlei18n/libaddressinput) +// - Users should not be presented with UI elements for input or editing of +// fields outside countries where that field is used. +// +// For more guidance on how to use this schema, please see: +// https://support.google.com/business/answer/6397478 +type PostalAddress struct { + // The schema revision of the `PostalAddress`. + // All new revisions **must** be backward compatible with old revisions. + Revision int32 `protobuf:"varint,1,opt,name=revision" json:"revision,omitempty"` + // Required. CLDR region code of the country/region of the address. This + // is never inferred and it is up to the user to ensure the value is + // correct. See http://cldr.unicode.org/ and + // http://www.unicode.org/cldr/charts/30/supplemental/territory_information.html + // for details. Example: "CH" for Switzerland. + RegionCode string `protobuf:"bytes,2,opt,name=region_code,json=regionCode" json:"region_code,omitempty"` + // Optional. BCP-47 language code of the contents of this address (if + // known). This is often the UI language of the input form or is expected + // to match one of the languages used in the address' country/region, or their + // transliterated equivalents. + // This can affect formatting in certain countries, but is not critical + // to the correctness of the data and will never affect any validation or + // other non-formatting related operations. + // + // If this value is not known, it should be omitted (rather than specifying a + // possibly incorrect default). + // + // Examples: "zh-Hant", "ja", "ja-Latn", "en". + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Optional. Postal code of the address. Not all countries use or require + // postal codes to be present, but where they are used, they may trigger + // additional validation with other parts of the address (e.g. state/zip + // validation in the U.S.A.). + PostalCode string `protobuf:"bytes,4,opt,name=postal_code,json=postalCode" json:"postal_code,omitempty"` + // Optional. Additional, country-specific, sorting code. This is not used + // in most regions. Where it is used, the value is either a string like + // "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a number + // alone, representing the "sector code" (Jamaica), "delivery area indicator" + // (Malawi) or "post office indicator" (e.g. Côte d'Ivoire). + SortingCode string `protobuf:"bytes,5,opt,name=sorting_code,json=sortingCode" json:"sorting_code,omitempty"` + // Optional. Highest administrative subdivision which is used for postal + // addresses of a country or region. + // For example, this can be a state, a province, an oblast, or a prefecture. + // Specifically, for Spain this is the province and not the autonomous + // community (e.g. "Barcelona" and not "Catalonia"). + // Many countries don't use an administrative area in postal addresses. E.g. + // in Switzerland this should be left unpopulated. + AdministrativeArea string `protobuf:"bytes,6,opt,name=administrative_area,json=administrativeArea" json:"administrative_area,omitempty"` + // Optional. Generally refers to the city/town portion of the address. + // Examples: US city, IT comune, UK post town. + // In regions of the world where localities are not well defined or do not fit + // into this structure well, leave locality empty and use address_lines. + Locality string `protobuf:"bytes,7,opt,name=locality" json:"locality,omitempty"` + // Optional. Sublocality of the address. + // For example, this can be neighborhoods, boroughs, districts. + Sublocality string `protobuf:"bytes,8,opt,name=sublocality" json:"sublocality,omitempty"` + // Unstructured address lines describing the lower levels of an address. + // + // Because values in address_lines do not have type information and may + // sometimes contain multiple values in a single field (e.g. + // "Austin, TX"), it is important that the line order is clear. The order of + // address lines should be "envelope order" for the country/region of the + // address. In places where this can vary (e.g. Japan), address_language is + // used to make it explicit (e.g. "ja" for large-to-small ordering and + // "ja-Latn" or "en" for small-to-large). This way, the most specific line of + // an address can be selected based on the language. + // + // The minimum permitted structural representation of an address consists + // of a region_code with all remaining information placed in the + // address_lines. It would be possible to format such an address very + // approximately without geocoding, but no semantic reasoning could be + // made about any of the address components until it was at least + // partially resolved. + // + // Creating an address only containing a region_code and address_lines, and + // then geocoding is the recommended way to handle completely unstructured + // addresses (as opposed to guessing which parts of the address should be + // localities or administrative areas). + AddressLines []string `protobuf:"bytes,9,rep,name=address_lines,json=addressLines" json:"address_lines,omitempty"` + // Optional. The recipient at the address. + // This field may, under certain circumstances, contain multiline information. + // For example, it might contain "care of" information. + Recipients []string `protobuf:"bytes,10,rep,name=recipients" json:"recipients,omitempty"` + // Optional. The name of the organization at the address. + Organization string `protobuf:"bytes,11,opt,name=organization" json:"organization,omitempty"` +} + +func (m *PostalAddress) Reset() { *m = PostalAddress{} } +func (m *PostalAddress) String() string { return proto.CompactTextString(m) } +func (*PostalAddress) ProtoMessage() {} +func (*PostalAddress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *PostalAddress) GetRevision() int32 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *PostalAddress) GetRegionCode() string { + if m != nil { + return m.RegionCode + } + return "" +} + +func (m *PostalAddress) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *PostalAddress) GetPostalCode() string { + if m != nil { + return m.PostalCode + } + return "" +} + +func (m *PostalAddress) GetSortingCode() string { + if m != nil { + return m.SortingCode + } + return "" +} + +func (m *PostalAddress) GetAdministrativeArea() string { + if m != nil { + return m.AdministrativeArea + } + return "" +} + +func (m *PostalAddress) GetLocality() string { + if m != nil { + return m.Locality + } + return "" +} + +func (m *PostalAddress) GetSublocality() string { + if m != nil { + return m.Sublocality + } + return "" +} + +func (m *PostalAddress) GetAddressLines() []string { + if m != nil { + return m.AddressLines + } + return nil +} + +func (m *PostalAddress) GetRecipients() []string { + if m != nil { + return m.Recipients + } + return nil +} + +func (m *PostalAddress) GetOrganization() string { + if m != nil { + return m.Organization + } + return "" +} + +func init() { + proto.RegisterType((*PostalAddress)(nil), "google.type.PostalAddress") +} + +func init() { proto.RegisterFile("google/type/postal_address.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x31, 0x6f, 0xea, 0x30, + 0x10, 0xc7, 0x15, 0xf2, 0xe0, 0xc1, 0x05, 0xf4, 0x24, 0xbf, 0x25, 0xea, 0x50, 0x52, 0xba, 0x30, + 0x25, 0x43, 0xc7, 0x4e, 0x50, 0xa9, 0x5d, 0x3a, 0x44, 0xa8, 0x53, 0x97, 0xc8, 0x24, 0x27, 0xcb, + 0x52, 0xf0, 0x45, 0xb6, 0x41, 0xa2, 0xdf, 0xa1, 0x5f, 0xa2, 0x9f, 0xb4, 0xb2, 0x9d, 0xd2, 0x30, + 0xde, 0xef, 0x7e, 0x49, 0xee, 0xee, 0x1f, 0xc8, 0x04, 0x91, 0x68, 0xb1, 0xb0, 0xe7, 0x0e, 0x8b, + 0x8e, 0x8c, 0xe5, 0x6d, 0xc5, 0x9b, 0x46, 0xa3, 0x31, 0x79, 0xa7, 0xc9, 0x12, 0x4b, 0x82, 0x91, + 0x3b, 0x63, 0xf5, 0x19, 0xc3, 0xa2, 0xf4, 0xd6, 0x26, 0x48, 0xec, 0x06, 0xa6, 0x1a, 0x4f, 0xd2, + 0x48, 0x52, 0x69, 0x94, 0x45, 0xeb, 0xf1, 0xee, 0x52, 0xb3, 0x25, 0x24, 0x1a, 0x85, 0x24, 0x55, + 0xd5, 0xd4, 0x60, 0x3a, 0xca, 0xa2, 0xf5, 0x6c, 0x07, 0x01, 0x3d, 0x51, 0x83, 0xec, 0x1e, 0x16, + 0x2d, 0x57, 0xe2, 0xc8, 0x05, 0x06, 0x25, 0xf6, 0xca, 0xfc, 0x07, 0x7a, 0x69, 0x09, 0x49, 0x3f, + 0x98, 0x57, 0xfe, 0x84, 0xb7, 0x04, 0xe4, 0x85, 0x3b, 0x98, 0x1b, 0xd2, 0x56, 0x2a, 0x11, 0x8c, + 0xb1, 0x37, 0x92, 0x9e, 0x79, 0xa5, 0x80, 0xff, 0xbc, 0x39, 0x48, 0x25, 0x8d, 0xd5, 0xdc, 0xca, + 0x13, 0x56, 0x5c, 0x23, 0x4f, 0x27, 0xde, 0x64, 0xd7, 0xad, 0x8d, 0x46, 0xee, 0xd6, 0x6a, 0xa9, + 0xe6, 0xad, 0xb4, 0xe7, 0xf4, 0xaf, 0xb7, 0x2e, 0x35, 0xcb, 0x20, 0x31, 0xc7, 0xfd, 0xa5, 0x3d, + 0xed, 0x3f, 0xf7, 0x8b, 0xdc, 0x5e, 0xfd, 0x11, 0xab, 0x56, 0x2a, 0x34, 0xe9, 0x2c, 0x8b, 0xdd, + 0x5e, 0x3d, 0x7c, 0x75, 0x8c, 0xdd, 0x02, 0x68, 0xac, 0x65, 0x27, 0x51, 0x59, 0x93, 0x82, 0x37, + 0x06, 0x84, 0xad, 0x60, 0x4e, 0x5a, 0x70, 0x25, 0x3f, 0xb8, 0x75, 0xd7, 0x4d, 0xc2, 0x6d, 0x86, + 0x6c, 0x7b, 0x84, 0x7f, 0x35, 0x1d, 0xf2, 0x41, 0x44, 0x5b, 0x76, 0x95, 0x4f, 0xe9, 0x32, 0x2c, + 0xa3, 0xf7, 0xe7, 0x5e, 0x11, 0xe4, 0x6e, 0x9b, 0x93, 0x16, 0x85, 0x40, 0xe5, 0x13, 0x2e, 0x42, + 0x8b, 0x77, 0xd2, 0x0c, 0x7f, 0x83, 0x7e, 0xd6, 0xc7, 0xab, 0xea, 0x6b, 0x14, 0xbf, 0xbc, 0x95, + 0xfb, 0x89, 0x7f, 0xf0, 0xe1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xda, 0x86, 0xd3, 0x22, 0x3e, 0x02, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/timeofday/timeofday.pb.go b/vendor/google.golang.org/genproto/googleapis/type/timeofday/timeofday.pb.go new file mode 100644 index 000000000..703c74c21 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/timeofday/timeofday.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/timeofday.proto + +/* +Package timeofday is a generated protocol buffer package. + +It is generated from these files: + google/type/timeofday.proto + +It has these top-level messages: + TimeOfDay +*/ +package timeofday + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Represents a time of day. The date and time zone are either not significant +// or are specified elsewhere. An API may chose to allow leap seconds. Related +// types are [google.type.Date][google.type.Date] and `google.protobuf.Timestamp`. +type TimeOfDay struct { + // Hours of day in 24 hour format. Should be from 0 to 23. An API may choose + // to allow the value "24:00:00" for scenarios like business closing time. + Hours int32 `protobuf:"varint,1,opt,name=hours" json:"hours,omitempty"` + // Minutes of hour of day. Must be from 0 to 59. + Minutes int32 `protobuf:"varint,2,opt,name=minutes" json:"minutes,omitempty"` + // Seconds of minutes of the time. Must normally be from 0 to 59. An API may + // allow the value 60 if it allows leap-seconds. + Seconds int32 `protobuf:"varint,3,opt,name=seconds" json:"seconds,omitempty"` + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + Nanos int32 `protobuf:"varint,4,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *TimeOfDay) Reset() { *m = TimeOfDay{} } +func (m *TimeOfDay) String() string { return proto.CompactTextString(m) } +func (*TimeOfDay) ProtoMessage() {} +func (*TimeOfDay) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TimeOfDay) GetHours() int32 { + if m != nil { + return m.Hours + } + return 0 +} + +func (m *TimeOfDay) GetMinutes() int32 { + if m != nil { + return m.Minutes + } + return 0 +} + +func (m *TimeOfDay) GetSeconds() int32 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *TimeOfDay) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*TimeOfDay)(nil), "google.type.TimeOfDay") +} + +func init() { proto.RegisterFile("google/type/timeofday.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 198 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x2f, 0xc9, 0xcc, 0x4d, 0xcd, 0x4f, 0x4b, 0x49, + 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x48, 0xea, 0x81, 0x24, 0x95, 0xb2, + 0xb9, 0x38, 0x43, 0x32, 0x73, 0x53, 0xfd, 0xd3, 0x5c, 0x12, 0x2b, 0x85, 0x44, 0xb8, 0x58, 0x33, + 0xf2, 0x4b, 0x8b, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, 0x20, 0x1c, 0x21, 0x09, 0x2e, + 0xf6, 0xdc, 0xcc, 0xbc, 0xd2, 0x92, 0xd4, 0x62, 0x09, 0x26, 0xb0, 0x38, 0x8c, 0x0b, 0x92, 0x29, + 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0x29, 0x96, 0x60, 0x86, 0xc8, 0x40, 0xb9, 0x20, 0x93, 0xf2, 0x12, + 0xf3, 0xf2, 0x8b, 0x25, 0x58, 0x20, 0x26, 0x81, 0x39, 0x4e, 0x99, 0x5c, 0xfc, 0xc9, 0xf9, 0xb9, + 0x7a, 0x48, 0xf6, 0x3b, 0xf1, 0xc1, 0x6d, 0x0f, 0x00, 0x39, 0x2e, 0x80, 0x31, 0xca, 0x0e, 0x2a, + 0x9d, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x97, 0x5f, 0x94, 0xae, 0x9f, 0x9e, 0x9a, 0x07, 0x76, + 0xba, 0x3e, 0x44, 0x2a, 0xb1, 0x20, 0xb3, 0x18, 0xcd, 0x6b, 0xd6, 0x70, 0xd6, 0x22, 0x26, 0x66, + 0xf7, 0x90, 0x80, 0x24, 0x36, 0xb0, 0x06, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x47, 0x79, + 0x6a, 0x06, 0x0a, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/watcher/v1/watch.pb.go b/vendor/google.golang.org/genproto/googleapis/watcher/v1/watch.pb.go new file mode 100644 index 000000000..c3128144e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/watcher/v1/watch.pb.go @@ -0,0 +1,376 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/watcher/v1/watch.proto + +/* +Package watcher is a generated protocol buffer package. + +It is generated from these files: + google/watcher/v1/watch.proto + +It has these top-level messages: + Request + ChangeBatch + Change +*/ +package watcher + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/any" +import _ "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A reported value can be in one of the following states: +type Change_State int32 + +const ( + // The element exists and its full value is included in data. + Change_EXISTS Change_State = 0 + // The element does not exist. + Change_DOES_NOT_EXIST Change_State = 1 + // Element may or may not exist. Used only for initial state delivery when + // the client is not interested in fetching the initial state. See the + // "Initial State" section above. + Change_INITIAL_STATE_SKIPPED Change_State = 2 + // The element may exist, but some error has occurred. More information is + // available in the data field - the value is a serialized Status + // proto (from [google.rpc.Status][]) + Change_ERROR Change_State = 3 +) + +var Change_State_name = map[int32]string{ + 0: "EXISTS", + 1: "DOES_NOT_EXIST", + 2: "INITIAL_STATE_SKIPPED", + 3: "ERROR", +} +var Change_State_value = map[string]int32{ + "EXISTS": 0, + "DOES_NOT_EXIST": 1, + "INITIAL_STATE_SKIPPED": 2, + "ERROR": 3, +} + +func (x Change_State) String() string { + return proto.EnumName(Change_State_name, int32(x)) +} +func (Change_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// The message used by the client to register interest in an entity. +type Request struct { + // The `target` value **must** be a valid URL path pointing to an entity + // to watch. Note that the service name **must** be + // removed from the target field (e.g., the target field must say + // "/foo/bar", not "myservice.googleapis.com/foo/bar"). A client is + // also allowed to pass system-specific parameters in the URL that + // are only obeyed by some implementations. Some parameters will be + // implementation-specific. However, some have predefined meaning + // and are listed here: + // + // * recursive = true|false [default=false] + // If set to true, indicates that the client wants to watch all elements + // of entities in the subtree rooted at the entity's name in `target`. For + // descendants that are not the immediate children of the target, the + // `Change.element` will contain slashes. + // + // Note that some namespaces and entities will not support recursive + // watching. When watching such an entity, a client must not set recursive + // to true. Otherwise, it will receive an `UNIMPLEMENTED` error. + // + // Normal URL encoding must be used inside `target`. For example, if a query + // parameter name or value, or the non-query parameter portion of `target` + // contains a special character, it must be %-encoded. We recommend that + // clients and servers use their runtime's URL library to produce and consume + // target values. + Target string `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` + // The `resume_marker` specifies how much of the existing underlying state is + // delivered to the client when the watch request is received by the + // system. The client can set this marker in one of the following ways to get + // different semantics: + // + // * Parameter is not specified or has the value "". + // Semantics: Fetch initial state. + // The client wants the entity's initial state to be delivered. See the + // description in "Initial State". + // + // * Parameter is set to the string "now" (UTF-8 encoding). + // Semantics: Fetch new changes only. + // The client just wants to get the changes received by the system after + // the watch point. The system may deliver changes from before the watch + // point as well. + // + // * Parameter is set to a value received in an earlier + // `Change.resume_marker` field while watching the same entity. + // Semantics: Resume from a specific point. + // The client wants to receive the changes from a specific point; this + // value must correspond to a value received in the `Change.resume_marker` + // field. The system may deliver changes from before the `resume_marker` + // as well. If the system cannot resume the stream from this point (e.g., + // if it is too far behind in the stream), it can raise the + // `FAILED_PRECONDITION` error. + // + // An implementation MUST support an unspecified parameter and the + // empty string "" marker (initial state fetching) and the "now" marker. + // It need not support resuming from a specific point. + ResumeMarker []byte `protobuf:"bytes,2,opt,name=resume_marker,json=resumeMarker,proto3" json:"resume_marker,omitempty"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Request) GetTarget() string { + if m != nil { + return m.Target + } + return "" +} + +func (m *Request) GetResumeMarker() []byte { + if m != nil { + return m.ResumeMarker + } + return nil +} + +// A batch of Change messages. +type ChangeBatch struct { + // A list of Change messages. + Changes []*Change `protobuf:"bytes,1,rep,name=changes" json:"changes,omitempty"` +} + +func (m *ChangeBatch) Reset() { *m = ChangeBatch{} } +func (m *ChangeBatch) String() string { return proto.CompactTextString(m) } +func (*ChangeBatch) ProtoMessage() {} +func (*ChangeBatch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ChangeBatch) GetChanges() []*Change { + if m != nil { + return m.Changes + } + return nil +} + +// A Change indicates the most recent state of an element. +type Change struct { + // Name of the element, interpreted relative to the entity's actual + // name. "" refers to the entity itself. The element name is a valid + // UTF-8 string. + Element string `protobuf:"bytes,1,opt,name=element" json:"element,omitempty"` + // The state of the `element`. + State Change_State `protobuf:"varint,2,opt,name=state,enum=google.watcher.v1.Change_State" json:"state,omitempty"` + // The actual change data. This field is present only when `state() == EXISTS` + // or `state() == ERROR`. Please see [google.protobuf.Any][google.protobuf.Any] about how to use + // the Any type. + Data *google_protobuf1.Any `protobuf:"bytes,6,opt,name=data" json:"data,omitempty"` + // If present, provides a compact representation of all the messages that have + // been received by the caller for the given entity, e.g., it could be a + // sequence number or a multi-part timestamp/version vector. This marker can + // be provided in the Request message, allowing the caller to resume the stream + // watching at a specific point without fetching the initial state. + ResumeMarker []byte `protobuf:"bytes,4,opt,name=resume_marker,json=resumeMarker,proto3" json:"resume_marker,omitempty"` + // If true, this Change is followed by more Changes that are in the same group + // as this Change. + Continued bool `protobuf:"varint,5,opt,name=continued" json:"continued,omitempty"` +} + +func (m *Change) Reset() { *m = Change{} } +func (m *Change) String() string { return proto.CompactTextString(m) } +func (*Change) ProtoMessage() {} +func (*Change) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Change) GetElement() string { + if m != nil { + return m.Element + } + return "" +} + +func (m *Change) GetState() Change_State { + if m != nil { + return m.State + } + return Change_EXISTS +} + +func (m *Change) GetData() *google_protobuf1.Any { + if m != nil { + return m.Data + } + return nil +} + +func (m *Change) GetResumeMarker() []byte { + if m != nil { + return m.ResumeMarker + } + return nil +} + +func (m *Change) GetContinued() bool { + if m != nil { + return m.Continued + } + return false +} + +func init() { + proto.RegisterType((*Request)(nil), "google.watcher.v1.Request") + proto.RegisterType((*ChangeBatch)(nil), "google.watcher.v1.ChangeBatch") + proto.RegisterType((*Change)(nil), "google.watcher.v1.Change") + proto.RegisterEnum("google.watcher.v1.Change_State", Change_State_name, Change_State_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Watcher service + +type WatcherClient interface { + // Start a streaming RPC to get watch information from the server. + Watch(ctx context.Context, in *Request, opts ...grpc.CallOption) (Watcher_WatchClient, error) +} + +type watcherClient struct { + cc *grpc.ClientConn +} + +func NewWatcherClient(cc *grpc.ClientConn) WatcherClient { + return &watcherClient{cc} +} + +func (c *watcherClient) Watch(ctx context.Context, in *Request, opts ...grpc.CallOption) (Watcher_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watcher_serviceDesc.Streams[0], c.cc, "/google.watcher.v1.Watcher/Watch", opts...) + if err != nil { + return nil, err + } + x := &watcherWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Watcher_WatchClient interface { + Recv() (*ChangeBatch, error) + grpc.ClientStream +} + +type watcherWatchClient struct { + grpc.ClientStream +} + +func (x *watcherWatchClient) Recv() (*ChangeBatch, error) { + m := new(ChangeBatch) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watcher service + +type WatcherServer interface { + // Start a streaming RPC to get watch information from the server. + Watch(*Request, Watcher_WatchServer) error +} + +func RegisterWatcherServer(s *grpc.Server, srv WatcherServer) { + s.RegisterService(&_Watcher_serviceDesc, srv) +} + +func _Watcher_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WatcherServer).Watch(m, &watcherWatchServer{stream}) +} + +type Watcher_WatchServer interface { + Send(*ChangeBatch) error + grpc.ServerStream +} + +type watcherWatchServer struct { + grpc.ServerStream +} + +func (x *watcherWatchServer) Send(m *ChangeBatch) error { + return x.ServerStream.SendMsg(m) +} + +var _Watcher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.watcher.v1.Watcher", + HandlerType: (*WatcherServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watcher_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "google/watcher/v1/watch.proto", +} + +func init() { proto.RegisterFile("google/watcher/v1/watch.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 449 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0xc6, 0xdd, 0x92, 0xd2, 0xd3, 0x31, 0x75, 0x16, 0x43, 0x69, 0x19, 0x10, 0x85, 0x9b, 0x5c, + 0x25, 0xac, 0x13, 0x12, 0x12, 0x57, 0x2d, 0x0b, 0x52, 0x04, 0x5b, 0x2b, 0x27, 0x12, 0x13, 0x37, + 0x91, 0x97, 0x99, 0xac, 0xa2, 0xb1, 0x4b, 0xe2, 0x0e, 0xed, 0x96, 0x57, 0x40, 0x3c, 0x19, 0xaf, + 0xc0, 0x83, 0xa0, 0xda, 0x0e, 0x20, 0xb2, 0xde, 0x9d, 0xf3, 0xfd, 0xd8, 0xe7, 0x3b, 0x36, 0x3c, + 0x29, 0x84, 0x28, 0x96, 0x2c, 0xfc, 0x4a, 0x65, 0x7e, 0xcd, 0xaa, 0xf0, 0xe6, 0x58, 0x97, 0xc1, + 0xaa, 0x12, 0x52, 0xe0, 0x03, 0x4d, 0x07, 0x86, 0x0e, 0x6e, 0x8e, 0x47, 0x47, 0xc6, 0x41, 0x57, + 0x8b, 0x90, 0x72, 0x2e, 0x24, 0x95, 0x0b, 0xc1, 0x6b, 0x6d, 0x18, 0x0d, 0x0d, 0xab, 0xba, 0xcb, + 0xf5, 0xa7, 0x90, 0xf2, 0x5b, 0x43, 0x3d, 0xfe, 0x9f, 0x62, 0xe5, 0x4a, 0x1a, 0xd2, 0x7b, 0x0b, + 0x5d, 0xc2, 0xbe, 0xac, 0x59, 0x2d, 0xf1, 0x23, 0xb0, 0x25, 0xad, 0x0a, 0x26, 0x1d, 0xe4, 0x22, + 0xbf, 0x47, 0x4c, 0x87, 0x9f, 0xc3, 0x83, 0x8a, 0xd5, 0xeb, 0x92, 0x65, 0x25, 0xad, 0x3e, 0xb3, + 0xca, 0xe9, 0xb8, 0xc8, 0xdf, 0x23, 0x7b, 0x1a, 0x3c, 0x53, 0x98, 0x37, 0x85, 0xfe, 0x9b, 0x6b, + 0xca, 0x0b, 0x36, 0xdd, 0x4c, 0x8c, 0x4f, 0xa0, 0x9b, 0xab, 0xb6, 0x76, 0x90, 0xbb, 0xe3, 0xf7, + 0xc7, 0xc3, 0xa0, 0x95, 0x28, 0xd0, 0x06, 0xd2, 0x28, 0xbd, 0x1f, 0x1d, 0xb0, 0x35, 0x86, 0x1d, + 0xe8, 0xb2, 0x25, 0x2b, 0x19, 0x6f, 0x86, 0x69, 0x5a, 0xfc, 0x12, 0xac, 0x5a, 0x52, 0xc9, 0xd4, + 0x14, 0xfb, 0xe3, 0x67, 0x5b, 0xcf, 0x0d, 0x92, 0x8d, 0x8c, 0x68, 0x35, 0xf6, 0x61, 0xf7, 0x8a, + 0x4a, 0xea, 0xd8, 0x2e, 0xf2, 0xfb, 0xe3, 0x87, 0x8d, 0xab, 0xd9, 0x49, 0x30, 0xe1, 0xb7, 0x44, + 0x29, 0xda, 0x71, 0x77, 0xdb, 0x71, 0xf1, 0x11, 0xf4, 0x72, 0xc1, 0xe5, 0x82, 0xaf, 0xd9, 0x95, + 0x63, 0xb9, 0xc8, 0xbf, 0x4f, 0xfe, 0x02, 0xde, 0x19, 0x58, 0xea, 0x72, 0x0c, 0x60, 0x47, 0x17, + 0x71, 0x92, 0x26, 0x83, 0x7b, 0x18, 0xc3, 0xfe, 0xe9, 0x2c, 0x4a, 0xb2, 0xf3, 0x59, 0x9a, 0x29, + 0x70, 0x80, 0xf0, 0x10, 0x0e, 0xe3, 0xf3, 0x38, 0x8d, 0x27, 0xef, 0xb3, 0x24, 0x9d, 0xa4, 0x51, + 0x96, 0xbc, 0x8b, 0xe7, 0xf3, 0xe8, 0x74, 0xd0, 0xc1, 0x3d, 0xb0, 0x22, 0x42, 0x66, 0x64, 0xb0, + 0x33, 0xce, 0xa1, 0xfb, 0x41, 0xa7, 0xc3, 0x17, 0x60, 0xa9, 0x12, 0x8f, 0xee, 0xc8, 0x6d, 0x1e, + 0x72, 0xf4, 0x74, 0xeb, 0x4e, 0xd4, 0xe3, 0x78, 0x07, 0xdf, 0x7e, 0xfe, 0xfa, 0xde, 0xe9, 0xe3, + 0xde, 0x9f, 0x5f, 0xf7, 0x02, 0x4d, 0x33, 0x38, 0xcc, 0x45, 0xd9, 0x76, 0x4e, 0x41, 0x5d, 0x38, + 0xdf, 0x2c, 0x6a, 0x8e, 0x3e, 0xbe, 0x32, 0x82, 0x42, 0x2c, 0x29, 0x2f, 0x02, 0x51, 0x15, 0x61, + 0xc1, 0xb8, 0x5a, 0x63, 0xa8, 0x29, 0xba, 0x5a, 0xd4, 0xff, 0x7c, 0xeb, 0xd7, 0xa6, 0xbc, 0xb4, + 0x95, 0xe8, 0xe4, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x48, 0x0a, 0xba, 0x6c, 0xfa, 0x02, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/api/api.pb.go b/vendor/google.golang.org/genproto/protobuf/api/api.pb.go new file mode 100644 index 000000000..d41ce3c66 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/api/api.pb.go @@ -0,0 +1,350 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/api.proto + +/* +Package api is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/api.proto + +It has these top-level messages: + Api + Method + Mixin +*/ +package api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "google.golang.org/genproto/protobuf/source_context" +import google_protobuf2 "google.golang.org/genproto/protobuf/ptype" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*google_protobuf2.Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *google_protobuf.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax google_protobuf2.Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Api) Reset() { *m = Api{} } +func (m *Api) String() string { return proto.CompactTextString(m) } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*google_protobuf2.Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *google_protobuf.SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() google_protobuf2.Syntax { + if m != nil { + return m.Syntax + } + return google_protobuf2.Syntax_SYNTAX_PROTO2 +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*google_protobuf2.Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"` + // The source syntax of this method. + Syntax google_protobuf2.Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Method) Reset() { *m = Method{} } +func (m *Method) String() string { return proto.CompactTextString(m) } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*google_protobuf2.Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() google_protobuf2.Syntax { + if m != nil { + return m.Syntax + } + return google_protobuf2.Syntax_SYNTAX_PROTO2 +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root" json:"root,omitempty"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (m *Mixin) String() string { return proto.CompactTextString(m) } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xcf, 0x8e, 0xd3, 0x30, + 0x10, 0xc6, 0x95, 0xa4, 0x4d, 0x17, 0xaf, 0xe8, 0x82, 0x91, 0xc0, 0xf4, 0xb0, 0x8a, 0x56, 0x1c, + 0x22, 0x2a, 0x12, 0x51, 0x8e, 0x9c, 0x5a, 0x84, 0x7a, 0x40, 0x88, 0x28, 0x05, 0x21, 0x71, 0xa9, + 0xd2, 0x62, 0x82, 0xa5, 0xc4, 0x63, 0x6c, 0x07, 0xda, 0xd7, 0xe1, 0xc8, 0x91, 0x37, 0xe0, 0xcd, + 0x50, 0x9c, 0xb8, 0x7f, 0xd2, 0x22, 0xb1, 0x37, 0x8f, 0xbf, 0xdf, 0x7c, 0x99, 0xf9, 0xac, 0xa0, + 0xc7, 0x39, 0x40, 0x5e, 0xd0, 0x58, 0x48, 0xd0, 0xb0, 0xaa, 0xbe, 0xc4, 0x99, 0x60, 0x91, 0x29, + 0xf0, 0x55, 0x23, 0x45, 0x56, 0x1a, 0x3d, 0xe9, 0xb2, 0x0a, 0x2a, 0xb9, 0xa6, 0xcb, 0x35, 0x70, + 0x4d, 0x37, 0xba, 0x01, 0x47, 0xa3, 0x2e, 0xa5, 0xb7, 0xa2, 0x35, 0xb9, 0xf9, 0xe3, 0x22, 0x6f, + 0x2a, 0x18, 0xc6, 0xa8, 0xc7, 0xb3, 0x92, 0x12, 0x27, 0x70, 0xc2, 0x3b, 0xa9, 0x39, 0xe3, 0xe7, + 0x68, 0x50, 0x52, 0xfd, 0x15, 0x3e, 0x2b, 0xe2, 0x06, 0x5e, 0x78, 0x39, 0x79, 0x14, 0x75, 0x06, + 0x88, 0xde, 0x1a, 0x3d, 0xb5, 0x5c, 0xdd, 0x02, 0x42, 0x33, 0xe0, 0x8a, 0x78, 0xff, 0x68, 0x79, + 0x67, 0xf4, 0xd4, 0x72, 0x98, 0xa0, 0xc1, 0x77, 0x2a, 0x15, 0x03, 0x4e, 0x7a, 0xe6, 0xe3, 0xb6, + 0xc4, 0xaf, 0xd1, 0xf0, 0x78, 0x1f, 0xd2, 0x0f, 0x9c, 0xf0, 0x72, 0x72, 0x7d, 0xe2, 0xb9, 0x30, + 0xd8, 0xab, 0x86, 0x4a, 0xef, 0xaa, 0xc3, 0x12, 0x47, 0xc8, 0x2f, 0xd9, 0x86, 0x71, 0x45, 0x7c, + 0x33, 0xd2, 0xc3, 0xd3, 0x2d, 0x6a, 0x39, 0x6d, 0x29, 0x1c, 0x23, 0x5f, 0x6d, 0xb9, 0xce, 0x36, + 0x64, 0x10, 0x38, 0xe1, 0xf0, 0xcc, 0x0a, 0x0b, 0x23, 0xa7, 0x2d, 0x76, 0xf3, 0xdb, 0x45, 0x7e, + 0x13, 0xc4, 0xd9, 0x18, 0x43, 0x74, 0x4f, 0xd2, 0x6f, 0x15, 0x55, 0x7a, 0x59, 0x07, 0xbf, 0xac, + 0x64, 0x41, 0x5c, 0xa3, 0x0f, 0xdb, 0xfb, 0xf7, 0x5b, 0x41, 0x3f, 0xc8, 0x02, 0x8f, 0xd1, 0x7d, + 0x4b, 0x2a, 0x2d, 0x69, 0x56, 0x32, 0x9e, 0x13, 0x2f, 0x70, 0xc2, 0x8b, 0xd4, 0x5a, 0x2c, 0xec, + 0x3d, 0x7e, 0x5a, 0xc3, 0x4a, 0x00, 0x57, 0x74, 0xef, 0xdb, 0x24, 0x78, 0x65, 0x05, 0x6b, 0xfc, + 0x0c, 0xe1, 0x1d, 0xbb, 0x77, 0xee, 0x1b, 0xe7, 0x9d, 0xcb, 0xde, 0xfa, 0xe0, 0x15, 0xfd, 0xff, + 0x7c, 0xc5, 0x5b, 0x87, 0x16, 0xa3, 0xbe, 0x89, 0xfd, 0x6c, 0x64, 0x18, 0xf5, 0x24, 0x80, 0x6e, + 0x63, 0x32, 0xe7, 0x59, 0x85, 0x1e, 0xac, 0xa1, 0xec, 0xda, 0xce, 0x2e, 0xa6, 0x82, 0x25, 0x75, + 0x91, 0x38, 0x9f, 0xc6, 0xad, 0x98, 0x43, 0x91, 0xf1, 0x3c, 0x02, 0x99, 0xc7, 0x39, 0xe5, 0x06, + 0x3d, 0xfa, 0x9d, 0x5e, 0x66, 0x82, 0xfd, 0x74, 0xbd, 0x79, 0x32, 0xfb, 0xe5, 0x5e, 0xcf, 0x9b, + 0x9e, 0xc4, 0xce, 0xf9, 0x91, 0x16, 0xc5, 0x1b, 0x0e, 0x3f, 0x78, 0x1d, 0x9e, 0x5a, 0xf9, 0xa6, + 0xf1, 0xc5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x07, 0x73, 0x11, 0x97, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 000000000..485bf0066 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,267 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +/* +Package field_mask is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/field_mask.proto + +It has these top-level messages: + FieldMask +*/ +package field_mask + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x9d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0x2e, 0x62, 0x62, 0x76, 0x0f, 0x70, + 0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x31, 0x21, 0x00, 0xaa, 0x5a, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, + 0x2f, 0xbf, 0x3c, 0x2f, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x6c, 0x8c, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x5a, 0xdb, 0x3a, 0xc0, 0xea, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/ptype/type.pb.go b/vendor/google.golang.org/genproto/protobuf/ptype/type.pb.go new file mode 100644 index 000000000..b9ad3f104 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/ptype/type.pb.go @@ -0,0 +1,538 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/type.proto + +/* +Package ptype is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/type.proto + +It has these top-level messages: + Type + Field + Enum + EnumValue + Option +*/ +package ptype + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "google.golang.org/genproto/protobuf/source_context" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (x Syntax) String() string { + return proto.EnumName(Syntax_name, int32(x)) +} +func (Syntax) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x Field_Kind) String() string { + return proto.EnumName(Field_Kind_name, int32(x)) +} +func (Field_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (x Field_Cardinality) String() string { + return proto.EnumName(Field_Cardinality_name, int32(x)) +} +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} } + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *google_protobuf1.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Type) Reset() { *m = Type{} } +func (m *Type) String() string { return proto.CompactTextString(m) } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *google_protobuf1.SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *google_protobuf1.SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (m *Enum) String() string { return proto.CompactTextString(m) } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *google_protobuf1.SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (m *EnumValue) String() string { return proto.CompactTextString(m) } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *google_protobuf.Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Option) Reset() { *m = Option{} } +func (m *Option) String() string { return proto.CompactTextString(m) } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 810 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xda, 0x56, + 0x14, 0x8e, 0x8d, 0xf1, 0xe0, 0xc3, 0xc0, 0xdc, 0xdc, 0x44, 0x89, 0x33, 0x91, 0x52, 0x44, 0xbb, + 0x40, 0x59, 0x80, 0x0a, 0xa3, 0x51, 0xa5, 0xae, 0x60, 0xf0, 0x50, 0x6b, 0x88, 0xed, 0x5e, 0x4c, + 0x93, 0xe9, 0x06, 0x79, 0xe0, 0x0e, 0x22, 0x31, 0xd7, 0x08, 0xdb, 0xed, 0xb0, 0xe8, 0x23, 0xf4, + 0x25, 0xba, 0xec, 0xba, 0x0f, 0xd1, 0x47, 0xea, 0xae, 0xd5, 0xbd, 0x06, 0x63, 0x7e, 0x2a, 0x4d, + 0x9b, 0xcd, 0x68, 0xce, 0xf7, 0x7d, 0xe7, 0xf7, 0x1e, 0x8e, 0xe1, 0x7c, 0x1a, 0x04, 0x53, 0x9f, + 0x36, 0x16, 0xcb, 0x20, 0x0a, 0xee, 0xe2, 0xfb, 0x46, 0xb4, 0x5a, 0xd0, 0xba, 0xb0, 0xf0, 0x59, + 0xc2, 0xd5, 0x37, 0xdc, 0xf9, 0xab, 0x7d, 0xb1, 0xc7, 0x56, 0x09, 0x7b, 0xfe, 0xd5, 0x3e, 0x15, + 0x06, 0xf1, 0x72, 0x4c, 0x47, 0xe3, 0x80, 0x45, 0xf4, 0x21, 0x4a, 0x54, 0xd5, 0x5f, 0x65, 0x50, + 0xdc, 0xd5, 0x82, 0x62, 0x0c, 0x0a, 0xf3, 0xe6, 0x54, 0x97, 0x2a, 0x52, 0x4d, 0x23, 0xe2, 0x7f, + 0x5c, 0x07, 0xf5, 0x7e, 0x46, 0xfd, 0x49, 0xa8, 0xcb, 0x95, 0x5c, 0xad, 0xd8, 0x7c, 0x51, 0xdf, + 0xcb, 0x5f, 0xbf, 0xe6, 0x34, 0x59, 0xab, 0xf0, 0x0b, 0x50, 0x03, 0x46, 0x83, 0xfb, 0x50, 0xcf, + 0x55, 0x72, 0x35, 0x8d, 0xac, 0x2d, 0xfc, 0x35, 0x9c, 0x04, 0x8b, 0x68, 0x16, 0xb0, 0x50, 0x57, + 0x44, 0xa0, 0x97, 0x07, 0x81, 0x6c, 0xc1, 0x93, 0x8d, 0x0e, 0x1b, 0x50, 0xde, 0xad, 0x57, 0xcf, + 0x57, 0xa4, 0x5a, 0xb1, 0xf9, 0xe6, 0xc0, 0x73, 0x20, 0x64, 0x57, 0x89, 0x8a, 0x94, 0xc2, 0xac, + 0x89, 0x1b, 0xa0, 0x86, 0x2b, 0x16, 0x79, 0x0f, 0xba, 0x5a, 0x91, 0x6a, 0xe5, 0x23, 0x89, 0x07, + 0x82, 0x26, 0x6b, 0x59, 0xf5, 0x0f, 0x15, 0xf2, 0xa2, 0x29, 0xdc, 0x00, 0xe5, 0xd3, 0x8c, 0x4d, + 0xc4, 0x40, 0xca, 0xcd, 0xd7, 0xc7, 0x5b, 0xaf, 0xdf, 0xcc, 0xd8, 0x84, 0x08, 0x21, 0xee, 0x42, + 0x71, 0xec, 0x2d, 0x27, 0x33, 0xe6, 0xf9, 0xb3, 0x68, 0xa5, 0xcb, 0xc2, 0xaf, 0xfa, 0x2f, 0x7e, + 0x57, 0x5b, 0x25, 0xc9, 0xba, 0xf1, 0x19, 0xb2, 0x78, 0x7e, 0x47, 0x97, 0x7a, 0xae, 0x22, 0xd5, + 0xf2, 0x64, 0x6d, 0xa5, 0xef, 0xa3, 0x64, 0xde, 0xe7, 0x15, 0x14, 0xf8, 0x72, 0x8c, 0xe2, 0xa5, + 0x2f, 0xfa, 0xd3, 0xc8, 0x09, 0xb7, 0x87, 0x4b, 0x1f, 0x7f, 0x01, 0x45, 0x31, 0xfc, 0xd1, 0x8c, + 0x4d, 0xe8, 0x83, 0x7e, 0x22, 0x62, 0x81, 0x80, 0x4c, 0x8e, 0xf0, 0x3c, 0x0b, 0x6f, 0xfc, 0x89, + 0x4e, 0xf4, 0x42, 0x45, 0xaa, 0x15, 0xc8, 0xda, 0xca, 0xbe, 0x95, 0xf6, 0xc8, 0xb7, 0x7a, 0x0d, + 0xda, 0xc7, 0x30, 0x60, 0x23, 0x51, 0x1f, 0x88, 0x3a, 0x0a, 0x1c, 0xb0, 0x78, 0x8d, 0x5f, 0x42, + 0x69, 0x42, 0xef, 0xbd, 0xd8, 0x8f, 0x46, 0x3f, 0x79, 0x7e, 0x4c, 0xf5, 0xa2, 0x10, 0x9c, 0xae, + 0xc1, 0x1f, 0x38, 0x56, 0xfd, 0x53, 0x06, 0x85, 0x4f, 0x12, 0x23, 0x38, 0x75, 0x6f, 0x1d, 0x63, + 0x34, 0xb4, 0x6e, 0x2c, 0xfb, 0xbd, 0x85, 0x9e, 0xe0, 0x33, 0x28, 0x0a, 0xa4, 0x6b, 0x0f, 0x3b, + 0x7d, 0x03, 0x49, 0xb8, 0x0c, 0x20, 0x80, 0xeb, 0xbe, 0xdd, 0x76, 0x91, 0x9c, 0xda, 0xa6, 0xe5, + 0x5e, 0x5e, 0xa0, 0x5c, 0xea, 0x30, 0x4c, 0x00, 0x25, 0x2b, 0x68, 0x35, 0x51, 0x3e, 0xcd, 0x71, + 0x6d, 0x7e, 0x30, 0xba, 0x97, 0x17, 0x48, 0xdd, 0x45, 0x5a, 0x4d, 0x74, 0x82, 0x4b, 0xa0, 0x09, + 0xa4, 0x63, 0xdb, 0x7d, 0x54, 0x48, 0x63, 0x0e, 0x5c, 0x62, 0x5a, 0x3d, 0xa4, 0xa5, 0x31, 0x7b, + 0xc4, 0x1e, 0x3a, 0x08, 0xd2, 0x08, 0xef, 0x8c, 0xc1, 0xa0, 0xdd, 0x33, 0x50, 0x31, 0x55, 0x74, + 0x6e, 0x5d, 0x63, 0x80, 0x4e, 0x77, 0xca, 0x6a, 0x35, 0x51, 0x29, 0x4d, 0x61, 0x58, 0xc3, 0x77, + 0xa8, 0x8c, 0x9f, 0x42, 0x29, 0x49, 0xb1, 0x29, 0xe2, 0x6c, 0x0f, 0xba, 0xbc, 0x40, 0x68, 0x5b, + 0x48, 0x12, 0xe5, 0xe9, 0x0e, 0x70, 0x79, 0x81, 0x70, 0x35, 0x82, 0x62, 0x66, 0xb7, 0xf0, 0x4b, + 0x78, 0x76, 0xd5, 0x26, 0x5d, 0xd3, 0x6a, 0xf7, 0x4d, 0xf7, 0x36, 0x33, 0x57, 0x1d, 0x9e, 0x67, + 0x09, 0xdb, 0x71, 0x4d, 0xdb, 0x6a, 0xf7, 0x91, 0xb4, 0xcf, 0x10, 0xe3, 0xfb, 0xa1, 0x49, 0x8c, + 0x2e, 0x92, 0x0f, 0x19, 0xc7, 0x68, 0xbb, 0x46, 0x17, 0xe5, 0xaa, 0x7f, 0x4b, 0xa0, 0x18, 0x2c, + 0x9e, 0x1f, 0x3d, 0x23, 0xdf, 0x80, 0x46, 0x59, 0x3c, 0x4f, 0x9e, 0x3f, 0xb9, 0x24, 0xe7, 0x07, + 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0x90, 0xad, 0x38, 0xbb, 0x8c, 0xb9, 0xff, 0x7d, 0x38, 0x94, 0xcf, + 0x3b, 0x1c, 0xf9, 0xc7, 0x1d, 0x8e, 0x8f, 0xa0, 0xa5, 0x2d, 0x1c, 0x9d, 0xc2, 0xf6, 0x87, 0x2d, + 0xef, 0xfc, 0xb0, 0xff, 0x7b, 0x8f, 0xd5, 0xef, 0x40, 0x4d, 0xa0, 0xa3, 0x89, 0xde, 0x42, 0x7e, + 0x33, 0x6a, 0xde, 0xf8, 0xf3, 0x83, 0x70, 0x6d, 0xb6, 0x22, 0x89, 0xe4, 0x6d, 0x1d, 0xd4, 0xa4, + 0x0f, 0xbe, 0x6c, 0x83, 0x5b, 0xcb, 0x6d, 0x7f, 0x18, 0x39, 0xc4, 0x76, 0xed, 0x26, 0x7a, 0xb2, + 0x0f, 0xb5, 0x90, 0xd4, 0xf9, 0x05, 0x9e, 0x8d, 0x83, 0xf9, 0x7e, 0xc4, 0x8e, 0xc6, 0x3f, 0x21, + 0x0e, 0xb7, 0x1c, 0xe9, 0xc7, 0xc6, 0x9a, 0x9d, 0x06, 0xbe, 0xc7, 0xa6, 0xf5, 0x60, 0x39, 0x6d, + 0x4c, 0x29, 0x13, 0xda, 0xed, 0xc7, 0x68, 0xc1, 0x0f, 0xd5, 0xb7, 0xe2, 0xef, 0x5f, 0x92, 0xf4, + 0x9b, 0x9c, 0xeb, 0x39, 0x9d, 0xdf, 0xe5, 0x37, 0xbd, 0xc4, 0xd5, 0xd9, 0x94, 0xfa, 0x9e, 0xfa, + 0xfe, 0x0d, 0x0b, 0x7e, 0x66, 0x3c, 0x41, 0x78, 0xa7, 0x0a, 0xff, 0xd6, 0x3f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x6d, 0x2b, 0xc0, 0xd8, 0x24, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/source_context/source_context.pb.go b/vendor/google.golang.org/genproto/protobuf/source_context/source_context.pb.go new file mode 100644 index 000000000..0cedee098 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/source_context/source_context.pb.go @@ -0,0 +1,70 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +/* +Package source_context is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/source_context.proto + +It has these top-level messages: + SourceContext +*/ +package source_context + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName" json:"file_name,omitempty"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (m *SourceContext) String() string { return proto.CompactTextString(m) } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 184 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0xa6, 0x32, 0x72, 0x09, 0x27, 0xe7, + 0xe7, 0xea, 0xa1, 0x99, 0xe2, 0x24, 0x84, 0x62, 0x46, 0x00, 0x48, 0x38, 0x80, 0x31, 0xca, 0x11, + 0xaa, 0x2c, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, 0x2f, 0xbf, 0x28, 0x5d, 0x3f, 0x3d, 0x35, 0x0f, + 0xac, 0x09, 0x97, 0x33, 0xad, 0x51, 0xb9, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, + 0xb9, 0x43, 0x4c, 0x0a, 0x80, 0xea, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, + 0x0b, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x1b, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0x5c, 0xbd, 0xa4, 0x22, 0x05, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/regen.go b/vendor/google.golang.org/genproto/regen.go new file mode 100644 index 000000000..9c906f209 --- /dev/null +++ b/vendor/google.golang.org/genproto/regen.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// Regen.go regenerates the genproto repository. +// +// Regen.go recursively walks through each directory named by given arguments, +// looking for all .proto files. (Symlinks are not followed.) +// If the pkg_prefix flag is not an empty string, +// any proto file without `go_package` option +// or whose option does not begin with the prefix is ignored. +// Protoc is executed on remaining files, +// one invocation per set of files declaring the same Go package. +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var goPkgOptRe = regexp.MustCompile(`(?m)^option go_package = (.*);`) + +func usage() { + fmt.Fprintln(os.Stderr, `usage: go run regen.go -go_out=path/to/output [-pkg_prefix=pkg/prefix] roots... + +Most users will not need to run this file directly. +To regenerate this repository, run regen.sh instead.`) + flag.PrintDefaults() +} + +func main() { + goOutDir := flag.String("go_out", "", "go_out argument to pass to protoc-gen-go") + pkgPrefix := flag.String("pkg_prefix", "", "only include proto files with go_package starting with this prefix") + flag.Usage = usage + flag.Parse() + + if *goOutDir == "" { + log.Fatal("need go_out flag") + } + + pkgFiles := make(map[string][]string) + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() || !strings.HasSuffix(path, ".proto") { + return nil + } + pkg, err := goPkg(path) + if err != nil { + return err + } + pkgFiles[pkg] = append(pkgFiles[pkg], path) + return nil + } + for _, root := range flag.Args() { + if err := filepath.Walk(root, walkFn); err != nil { + log.Fatal(err) + } + } + for pkg, fnames := range pkgFiles { + if !strings.HasPrefix(pkg, *pkgPrefix) { + continue + } + if out, err := protoc(*goOutDir, flag.Args(), fnames); err != nil { + log.Fatalf("error executing protoc: %s\n%s", err, out) + } + } +} + +// goPkg reports the import path declared in the given file's +// `go_package` option. If the option is missing, goPkg returns empty string. +func goPkg(fname string) (string, error) { + content, err := ioutil.ReadFile(fname) + if err != nil { + return "", err + } + + var pkgName string + if match := goPkgOptRe.FindSubmatch(content); len(match) > 0 { + pn, err := strconv.Unquote(string(match[1])) + if err != nil { + return "", err + } + pkgName = pn + } + if p := strings.IndexRune(pkgName, ';'); p > 0 { + pkgName = pkgName[:p] + } + return pkgName, nil +} + +// protoc executes the "protoc" command on files named in fnames, +// passing go_out and include flags specified in goOut and includes respectively. +// protoc returns combined output from stdout and stderr. +func protoc(goOut string, includes, fnames []string) ([]byte, error) { + args := []string{"--go_out=plugins=grpc:" + goOut} + for _, inc := range includes { + args = append(args, "-I", inc) + } + args = append(args, fnames...) + return exec.Command("protoc", args...).CombinedOutput() +} diff --git a/vendor/google.golang.org/genproto/regen.sh b/vendor/google.golang.org/genproto/regen.sh new file mode 100755 index 000000000..8d9c73197 --- /dev/null +++ b/vendor/google.golang.org/genproto/regen.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +set -e + +PKG=google.golang.org/genproto +PROTO_REPO=https://github.com/google/protobuf +PROTO_SUBDIR=src/google/protobuf +API_REPO=https://github.com/googleapis/googleapis + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +root=$(go list -f '{{.Root}}' $PKG/... | head -n1) +if [ -z "$root" ]; then + die "cannot find root of $PKG" +fi + +remove_dirs= +trap 'rm -rf $remove_dirs' EXIT + +if [ -z "$PROTOBUF" ]; then + proto_repo_dir=$(mktemp -d -t regen-cds-proto.XXXXXX) + git clone -q $PROTO_REPO $proto_repo_dir & + remove_dirs="$proto_repo_dir" + # The protoc include directory is actually the "src" directory of the repo. + protodir="$proto_repo_dir/src" +else + protodir="$PROTOBUF/src" +fi + +if [ -z "$GOOGLEAPIS" ]; then + apidir=$(mktemp -d -t regen-cds-api.XXXXXX) + git clone -q $API_REPO $apidir & + remove_dirs="$remove_dirs $apidir" +else + apidir="$GOOGLEAPIS" +fi + +wait + +# Nuke everything, we'll generate them back +rm -r googleapis/ protobuf/ + +go run regen.go -go_out "$root/src" -pkg_prefix "$PKG" "$apidir" "$protodir" + +# Sanity check the build. +echo 1>&2 "Checking that the libraries build..." +go build -v ./... + +echo 1>&2 "All done!" diff --git a/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE b/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000..642f85a16 --- /dev/null +++ b/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE @@ -0,0 +1,14 @@ +Please answer these questions before submitting your issue. + +### What version of gRPC are you using? + +### What version of Go are you using (`go version`)? + +### What operating system (Linux, Windows, …) and version? + +### What did you do? +If possible, provide a recipe for reproducing the error. + +### What did you expect to see? + +### What did you see instead? diff --git a/vendor/google.golang.org/grpc/.please-update b/vendor/google.golang.org/grpc/.please-update new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 000000000..22bf25004 --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,20 @@ +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + +matrix: + include: + - go: 1.9.x + env: ARCH=386 + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi + +script: + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi + - make test testrace diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..a5c6e06e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. + +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/Documentation/gomock-example.md b/vendor/google.golang.org/grpc/Documentation/gomock-example.md new file mode 100644 index 000000000..54743e897 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/gomock-example.md @@ -0,0 +1,182 @@ +# Mocking Service for gRPC + +[Example code unary RPC](https://github.com/grpc/grpc-go/tree/master/examples/helloworld/mock_helloworld) + +[Example code streaming RPC](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/mock_routeguide) + +## Why? + +To test client-side logic without the overhead of connecting to a real server. Mocking enables users to write light-weight unit tests to check functionalities on client-side without invoking RPC calls to a server. + +## Idea: Mock the client stub that connects to the server. + +We use Gomock to mock the client interface (in the generated code) and programmatically set its methods to expect and return pre-determined values. This enables users to write tests around the client logic and use this mocked stub while making RPC calls. + +## How to use Gomock? + +Documentation on Gomock can be found [here](https://github.com/golang/mock). +A quick reading of the documentation should enable users to follow the code below. + +Consider a gRPC service based on following proto file: + +```proto +//helloworld.proto + +package helloworld; + +message HelloRequest { + string name = 1; +} + +message HelloReply { + string name = 1; +} + +service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} +} +``` + +The generated file helloworld.pb.go will have a client interface for each service defined in the proto file. This interface will have methods corresponding to each rpc inside that service. + +```Go +type GreeterClient interface { + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} +``` + +The generated code also contains a struct that implements this interface. + +```Go +type greeterClient struct { + cc *grpc.ClientConn +} +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error){ + // ... + // gRPC specific code here + // ... +} +``` + +Along with this the generated code has a method to create an instance of this struct. +```Go +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient +``` + +The user code uses this function to create an instance of the struct greeterClient which then can be used to make rpc calls to the server. +We will mock this interface GreeterClient and use an instance of that mock to make rpc calls. These calls instead of going to server will return pre-determined values. + +To create a mock we’ll use [mockgen](https://github.com/golang/mock#running-mockgen). +From the directory ``` examples/helloworld/ ``` run ``` mockgen google.golang.org/grpc/examples/helloworld/helloworld GreeterClient > mock_helloworld/hw_mock.go ``` + +Notice that in the above command we specify GreeterClient as the interface to be mocked. + +The user test code can import the package generated by mockgen along with library package gomock to write unit tests around client-side logic. +```Go +import "github.com/golang/mock/gomock" +import hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" +``` + +An instance of the mocked interface can be created as: +```Go +mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) +``` +This mocked object can be programmed to expect calls to its methods and return pre-determined values. For instance, we can program mockGreeterClient to expect a call to its method SayHello and return a HelloReply with message “Mocked RPC”. + +```Go +mockGreeterClient.EXPECT().SayHello( + gomock.Any(), // expect any value for first parameter + gomock.Any(), // expect any value for second parameter +).Return(&helloworld.HelloReply{Message: “Mocked RPC”}, nil) +``` + +gomock.Any() indicates that the parameter can have any value or type. We can indicate specific values for built-in types with gomock.Eq(). +However, if the test code needs to specify the parameter to have a proto message type, we can replace gomock.Any() with an instance of a struct that implements gomock.Matcher interface. + +```Go +type rpcMsg struct { + msg proto.Message +} + +func (r *rpcMsg) Matches(msg interface{}) bool { + m, ok := msg.(proto.Message) + if !ok { + return false + } + return proto.Equal(m, r.msg) +} + +func (r *rpcMsg) String() string { + return fmt.Sprintf("is %s", r.msg) +} + +... + +req := &helloworld.HelloRequest{Name: "unit_test"} +mockGreeterClient.EXPECT().SayHello( + gomock.Any(), + &rpcMsg{msg: req}, +).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) +``` + +## Mock streaming RPCs: + +For our example we consider the case of bi-directional streaming RPCs. Concretely, we'll write a test for RouteChat function from the route guide example to demonstrate how to write mocks for streams. + +RouteChat is a bi-directional streaming RPC, which means calling RouteChat returns a stream that can __Send__ and __Recv__ messages to and from the server, respectively. We'll start by creating a mock of this stream interface returned by RouteChat and then we'll mock the client interface and set expectation on the method RouteChat to return our mocked stream. + +### Generating mocking code: +Like before we'll use [mockgen](https://github.com/golang/mock#running-mockgen). From the `examples/route_guide` directory run: `mockgen google.golang.org/grpc/examples/route_guide/routeguide RouteGuideClient,RouteGuide_RouteChatClient > mock_route_guide/rg_mock.go` + +Notice that we are mocking both client(`RouteGuideClient`) and stream(`RouteGuide_RouteChatClient`) interfaces here. + +This will create a file `rg_mock.go` under directory `mock_route_guide`. This file contins all the mocking code we need to write our test. + +In our test code, like before, we import the this mocking code along with the generated code + +```go +import ( + rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" + rgpb "google.golang.org/grpc/examples/route_guide/routeguide" +) +``` + +Now conside a test that takes the RouteGuide client object as a parameter, makes a RouteChat rpc call and sends a message on the resulting stream. Furthermore, this test expects to see the same message to be received on the stream. + +```go +var msg = ... + +// Creates a RouteChat call and sends msg on it. +// Checks if the received message was equal to msg. +func testRouteChat(client rgb.RouteChatClient) error{ + ... +} +``` + +We can inject our mock in here by simply passing it as an argument to the method. + +Creating mock for stream interface: + +```go + stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) +} +``` + +Setting Expectations: + +```go + stream.EXPECT().Send(gomock.Any()).Return(nil) + stream.EXPECT().Recv().Return(msg, nil) +``` + +Creating mock for client interface: + +```go + rgclient := rgmock.NewMockRouteGuideClient(ctrl) +``` + +Setting Expectations: + +```go + rgclient.EXPECT().RouteChat(gomock.Any()).Return(stream, nil) +``` diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md new file mode 100644 index 000000000..1b6b14e9d --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md @@ -0,0 +1,41 @@ +# Authentication + +As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. + +# Enabling TLS on a gRPC client + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) +``` + +# Enabling TLS on a gRPC server + +```Go +creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) +if err != nil { + log.Fatalf("Failed to generate credentials %v", err) +} +lis, err := net.Listen("tcp", ":0") +server := grpc.NewServer(grpc.Creds(creds)) +... +server.Serve(lis) +``` + +# Authenticating with Google + +## Google Compute Engine (GCE) + +```Go +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) +``` + +## JWT + +```Go +jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) +if err != nil { + log.Fatalf("Failed to create JWT credentials: %v", err) +} +conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), grpc.WithPerRPCCredentials(jwtCreds)) +``` + diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md new file mode 100644 index 000000000..c9a88c689 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md @@ -0,0 +1,204 @@ +# Metadata + +gRPC supports sending metadata between client and server. +This doc shows how to send and receive metadata in gRPC-go. + +## Background + +Four kinds of service method: + +- [Unary RPC](https://grpc.io/docs/guides/concepts.html#unary-rpc) +- [Server streaming RPC](https://grpc.io/docs/guides/concepts.html#server-streaming-rpc) +- [Client streaming RPC](https://grpc.io/docs/guides/concepts.html#client-streaming-rpc) +- [Bidirectional streaming RPC](https://grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc) + +And concept of [metadata](https://grpc.io/docs/guides/concepts.html#metadata). + +## Constructing metadata + +A metadata can be created using package [metadata](https://godoc.org/google.golang.org/grpc/metadata). +The type MD is actually a map from string to a list of strings: + +```go +type MD map[string][]string +``` + +Metadata can be read like a normal map. +Note that the value type of this map is `[]string`, +so that users can attach multiple values using a single key. + +### Creating a new metadata + +A metadata can be created from a `map[string]string` using function `New`: + +```go +md := metadata.New(map[string]string{"key1": "val1", "key2": "val2"}) +``` + +Another way is to use `Pairs`. +Values with the same key will be merged into a list: + +```go +md := metadata.Pairs( + "key1", "val1", + "key1", "val1-2", // "key1" will have map value []string{"val1", "val1-2"} + "key2", "val2", +) +``` + +__Note:__ all the keys will be automatically converted to lowercase, +so "key1" and "kEy1" will be the same key and their values will be merged into the same list. +This happens for both `New` and `Pairs`. + +### Storing binary data in metadata + +In metadata, keys are always strings. But values can be strings or binary data. +To store binary data value in metadata, simply add "-bin" suffix to the key. +The values with "-bin" suffixed keys will be encoded when creating the metadata: + +```go +md := metadata.Pairs( + "key", "string value", + "key-bin", string([]byte{96, 102}), // this binary data will be encoded (base64) before sending + // and will be decoded after being transferred. +) +``` + +## Retrieving metadata from context + +Metadata can be retrieved from context using `FromIncomingContext`: + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.SomeRequest) (*pb.SomeResponse, err) { + md, ok := metadata.FromIncomingContext(ctx) + // do something with metadata +} +``` + +## Sending and receiving metadata - client side + +[//]: # "TODO: uncomment next line after example source added" +[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)." + +### Sending metadata + +To send metadata to server, the client can wrap the metadata into a context using `NewOutgoingContext`, and make the RPC with this context: + +```go +md := metadata.Pairs("key", "val") + +// create a new context with this metadata +ctx := metadata.NewOutgoingContext(context.Background(), md) + +// make unary RPC +response, err := client.SomeRPC(ctx, someRequest) + +// or make streaming RPC +stream, err := client.SomeStreamingRPC(ctx) +``` + +To read this back from the context on the client (e.g. in an interceptor) before the RPC is sent, use `FromOutgoingContext`. + +### Receiving metadata + +Metadata that a client can receive includes header and trailer. + +#### Unary call + +Header and trailer sent along with a unary call can be retrieved using function [Header](https://godoc.org/google.golang.org/grpc#Header) and [Trailer](https://godoc.org/google.golang.org/grpc#Trailer) in [CallOption](https://godoc.org/google.golang.org/grpc#CallOption): + +```go +var header, trailer metadata.MD // variable to store header and trailer +r, err := client.SomeRPC( + ctx, + someRequest, + grpc.Header(&header), // will retrieve header + grpc.Trailer(&trailer), // will retrieve trailer +) + +// do something with header and trailer +``` + +#### Streaming call + +For streaming calls including: + +- Server streaming RPC +- Client streaming RPC +- Bidirectional streaming RPC + +Header and trailer can be retrieved from the returned stream using function `Header` and `Trailer` in interface [ClientStream](https://godoc.org/google.golang.org/grpc#ClientStream): + +```go +stream, err := client.SomeStreamingRPC(ctx) + +// retrieve header +header, err := stream.Header() + +// retrieve trailer +trailer := stream.Trailer() + +``` + +## Sending and receiving metadata - server side + +[//]: # "TODO: uncomment next line after example source added" +[//]: # "Real metadata sending and receiving examples are available [here](TODO:example_dir)." + +### Receiving metadata + +To read metadata sent by the client, the server needs to retrieve it from RPC context. +If it is a unary call, the RPC handler's context can be used. +For streaming calls, the server needs to get context from the stream. + +#### Unary call + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + // do something with metadata +} +``` + +#### Streaming call + +```go +func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) // get context from stream + // do something with metadata +} +``` + +### Sending metadata + +#### Unary call + +To send header and trailer to client in unary call, the server can call [SendHeader](https://godoc.org/google.golang.org/grpc#SendHeader) and [SetTrailer](https://godoc.org/google.golang.org/grpc#SetTrailer) functions in module [grpc](https://godoc.org/google.golang.org/grpc). +These two functions take a context as the first parameter. +It should be the RPC handler's context or one derived from it: + +```go +func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { + // create and send header + header := metadata.Pairs("header-key", "val") + grpc.SendHeader(ctx, header) + // create and set trailer + trailer := metadata.Pairs("trailer-key", "val") + grpc.SetTrailer(ctx, trailer) +} +``` + +#### Streaming call + +For streaming calls, header and trailer can be sent using function `SendHeader` and `SetTrailer` in interface [ServerStream](https://godoc.org/google.golang.org/grpc#ServerStream): + +```go +func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { + // create and send header + header := metadata.Pairs("header-key", "val") + stream.SendHeader(header) + // create and set trailer + trailer := metadata.Pairs("trailer-key", "val") + stream.SetTrailer(trailer) +} +``` diff --git a/vendor/google.golang.org/grpc/Documentation/server-reflection-tutorial.md b/vendor/google.golang.org/grpc/Documentation/server-reflection-tutorial.md new file mode 100644 index 000000000..ca8e30cb4 --- /dev/null +++ b/vendor/google.golang.org/grpc/Documentation/server-reflection-tutorial.md @@ -0,0 +1,152 @@ +# gRPC Server Reflection Tutorial + +gRPC Server Reflection provides information about publicly-accessible gRPC +services on a server, and assists clients at runtime to construct RPC +requests and responses without precompiled service information. It is used by +gRPC CLI, which can be used to introspect server protos and send/receive test +RPCs. + +## Enable Server Reflection + +gRPC-go Server Reflection is implemented in package [reflection](https://github.com/grpc/grpc-go/tree/master/reflection). To enable server reflection, you need to import this package and register reflection service on your gRPC server. + +For example, to enable server reflection in `example/helloworld`, we need to make the following changes: + +```diff +--- a/examples/helloworld/greeter_server/main.go ++++ b/examples/helloworld/greeter_server/main.go +@@ -40,6 +40,7 @@ import ( + "golang.org/x/net/context" + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" ++ "google.golang.org/grpc/reflection" + ) + + const ( +@@ -61,6 +62,8 @@ func main() { + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) ++ // Register reflection service on gRPC server. ++ reflection.Register(s) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +``` + +We have made this change in `example/helloworld`, and we will use it as an example to show the use of gRPC server reflection and gRPC CLI in this tutorial. + +## gRPC CLI + +After enabling Server Reflection in a server application, you can use gRPC CLI to check its services. +gRPC CLI is only available in c++. Instructions on how to use gRPC CLI can be found at [command_line_tool.md](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md). + +To build gRPC CLI: + +```sh +git clone https://github.com/grpc/grpc +cd grpc +make grpc_cli +cd bins/opt # grpc_cli is in directory bins/opt/ +``` + +## Use gRPC CLI to check services + +First, start the helloworld server in grpc-go directory: + +```sh +$ cd +$ go run examples/helloworld/greeter_server/main.go +``` + +Open a new terminal and make sure you are in the directory where grpc_cli lives: + +```sh +$ cd /bins/opt +``` + +### List services + +`grpc_cli ls` command lists services and methods exposed at a given port: + +- List all the services exposed at a given port + + ```sh + $ ./grpc_cli ls localhost:50051 + ``` + + output: + ```sh + helloworld.Greeter + grpc.reflection.v1alpha.ServerReflection + ``` + +- List one service with details + + `grpc_cli ls` command inspects a service given its full name (in the format of + \.\). It can print information with a long listing format + when `-l` flag is set. This flag can be used to get more details about a + service. + + ```sh + $ ./grpc_cli ls localhost:50051 helloworld.Greeter -l + ``` + + output: + ```sh + filename: helloworld.proto + package: helloworld; + service Greeter { + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + } + + ``` + +### List methods + +- List one method with details + + `grpc_cli ls` command also inspects a method given its full name (in the + format of \.\.\). + + ```sh + $ ./grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l + ``` + + output: + ```sh + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + ``` + +### Inspect message types + +We can use`grpc_cli type` command to inspect request/response types given the +full name of the type (in the format of \.\). + +- Get information about the request type + + ```sh + $ ./grpc_cli type localhost:50051 helloworld.HelloRequest + ``` + + output: + ```sh + message HelloRequest { + optional string name = 1[json_name = "name"]; + } + ``` + +### Call a remote method + +We can send RPCs to a server and get responses using `grpc_cli call` command. + +- Call a unary method + + ```sh + $ ./grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" + ``` + + output: + ```sh + message: "Hello gRPC CLI" + ``` diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..39606b564 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,45 @@ +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + coverage diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..622a5dc3e --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,45 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.7 or later. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..090fbe87c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var ( + DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, + } +) + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) time.Duration { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/backoff_test.go b/vendor/google.golang.org/grpc/backoff_test.go new file mode 100644 index 000000000..37e8e3f62 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff_test.go @@ -0,0 +1,29 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "testing" + +func TestBackoffConfigDefaults(t *testing.T) { + b := BackoffConfig{} + setDefaults(&b) + if b != DefaultBackoffConfig { + t.Fatalf("expected BackoffConfig to pickup default parameters: %v != %v", b, DefaultBackoffConfig) + } +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 000000000..ab65049dd --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,408 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..84e10b630 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + // defaultBuilder is the default balancer to use. + defaultBuilder Builder // TODO(bar) install pickfirst as default. +) + +// Register registers the balancer builder to the balancer map. +// b.Name will be used as the name registered with this builder. +func Register(b Builder) { + m[b.Name()] = b +} + +// Get returns the resolver builder registered with the given name. +// If no builder is register with the name, the default pickfirst will +// be used. +func Get(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return defaultBuilder +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot everytime its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // this call until a new picker is updated and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..99e71cd39 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,241 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return &rrBuilder{} +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrBuilder struct{} + +func (*rrBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &rrBalancer{ + cc: cc, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: newPicker([]balancer.SubConn{}, nil), + } +} + +func (*rrBuilder) Name() string { + return "roundrobin" +} + +type rrBalancer struct { + cc balancer.ClientConn + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker *picker +} + +func (b *rrBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("roundrobin.rrBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("roundrobin.rrBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("roundrobin.rrBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - or does round robin selection of all READY SubConns otherwise. +func (b *rrBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = newPicker(nil, balancer.ErrTransientFailure) + return + } + var readySCs []balancer.SubConn + for sc, st := range b.scStates { + if st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + b.picker = newPicker(readySCs, nil) +} + +func (b *rrBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("roundrobin.rrBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("roundrobin.rrBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) + return +} + +// Close is a nop because roundrobin balancer doesn't internal state to clean +// up, and it doesn't need to call RemoveSubConn for the SubConns. +func (b *rrBalancer) Close() { +} + +type picker struct { + // If err is not nil, Pick always returns this err. It's immutable after + // picker is created. + err error + + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func newPicker(scs []balancer.SubConn, err error) *picker { + grpclog.Infof("roundrobinPicker: newPicker called with scs: %v, %v", scs, err) + if err != nil { + return &picker{err: err} + } + return &picker{ + subConns: scs, + } +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go new file mode 100644 index 000000000..505ddaf9d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go @@ -0,0 +1,470 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package roundrobin + +import ( + "fmt" + "net" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" +) + +type testServer struct { + testpb.TestServiceServer +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + return nil +} + +type test struct { + servers []*grpc.Server + addresses []string +} + +func (t *test) cleanup() { + for _, s := range t.servers { + s.Stop() + } +} + +func startTestServers(count int) (_ *test, err error) { + t := &test{} + + defer func() { + if err != nil { + for _, s := range t.servers { + s.Stop() + } + } + }() + for i := 0; i < count; i++ { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("Failed to listen %v", err) + } + + s := grpc.NewServer() + testpb.RegisterTestServiceServer(s, &testServer{}) + t.servers = append(t.servers, s) + t.addresses = append(t.addresses, lis.Addr().String()) + + go func(s *grpc.Server, l net.Listener) { + s.Serve(l) + }(s, lis) + } + + return t, nil +} + +func TestOneBackend(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } +} + +func TestBackendsRoundRobin(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 5 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } +} + +func TestAddressesRemoved(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{}) + for i := 0; i < 1000; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) == codes.DeadlineExceeded { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("No RPC failed after removing all addresses, want RPC to fail with DeadlineExceeded") +} + +func TestCloseWithPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + testc := testpb.NewTestServiceClient(cc) + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until cc is closed. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) == codes.DeadlineExceeded { + t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing") + } + cancel() + }() + } + cc.Close() + wg.Wait() +} + +func TestNewAddressWhileBlocking(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, nil", err) + } + + r.NewAddress([]resolver.Address{}) + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + testc.EmptyCall(context.Background(), &testpb.Empty{}) + }() + } + time.Sleep(50 * time.Millisecond) + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + wg.Wait() +} + +func TestOneServerDown(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 3 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } + + // Stop one server, RPCs should roundrobin among the remaining servers. + backendCount-- + test.servers[backendCount].Stop() + // Loop until see server[backendCount-1] twice without seeing server[backendCount]. + var targetSeen int + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + switch p.Addr.String() { + case test.addresses[backendCount-1]: + targetSeen++ + case test.addresses[backendCount]: + // Reset targetSeen if peer is server[backendCount]. + targetSeen = 0 + } + // Break to make sure the last picked address is server[-1], so the following for loop won't be flaky. + if targetSeen >= 2 { + break + } + } + if targetSeen != 2 { + t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]") + } + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } +} + +func TestAllServersDown(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 3 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } + + // All servers are stopped, failfast RPC should fail with unavailable. + for i := 0; i < backendCount; i++ { + test.servers[i].Stop() + } + time.Sleep(100 * time.Millisecond) + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) == codes.Unavailable { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped") +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..e4a95fd5c --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,248 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + grpclog.Infof("ccBalancerWrapper: removing subconn") + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) + acbw.mu.Lock() + defer acbw.mu.Unlock() + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.acbw = acbw + if acState != connectivity.Idle { + ac.connect(false) + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect(false) +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_test.go b/vendor/google.golang.org/grpc/balancer_test.go new file mode 100644 index 000000000..29dbe0a67 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_test.go @@ -0,0 +1,798 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "strconv" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/test/leakcheck" +) + +type testWatcher struct { + // the channel to receives name resolution updates + update chan *naming.Update + // the side channel to get to know how many updates in a batch + side chan int + // the channel to notifiy update injector that the update reading is done + readDone chan int +} + +func (w *testWatcher) Next() (updates []*naming.Update, err error) { + n := <-w.side + if n == 0 { + return nil, fmt.Errorf("w.side is closed") + } + for i := 0; i < n; i++ { + u := <-w.update + if u != nil { + updates = append(updates, u) + } + } + w.readDone <- 0 + return +} + +func (w *testWatcher) Close() { + close(w.side) +} + +// Inject naming resolution updates to the testWatcher. +func (w *testWatcher) inject(updates []*naming.Update) { + w.side <- len(updates) + for _, u := range updates { + w.update <- u + } + <-w.readDone +} + +type testNameResolver struct { + w *testWatcher + addr string +} + +func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { + r.w = &testWatcher{ + update: make(chan *naming.Update, 1), + side: make(chan int, 1), + readDone: make(chan int), + } + r.w.side <- 1 + r.w.update <- &naming.Update{ + Op: naming.Add, + Addr: r.addr, + } + go func() { + <-r.w.readDone + }() + return r.w, nil +} + +func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver, func()) { + var servers []*server + for i := 0; i < numServers; i++ { + s := newTestServer() + servers = append(servers, s) + go s.start(t, 0, maxStreams) + s.wait(t, 2*time.Second) + } + // Point to server[0] + addr := "localhost:" + servers[0].port + return servers, &testNameResolver{ + addr: addr, + }, func() { + for i := 0; i < numServers; i++ { + servers[i].stop() + } + } +} + +func TestNameDiscovery(t *testing.T) { + defer leakcheck.Check(t) + // Start 2 servers on 2 ports. + numServers := 2 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + req := "port" + var reply string + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Inject the name resolution change to remove servers[0] and add servers[1]. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }) + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + }) + r.w.inject(updates) + // Loop until the rpcs in flight talks to servers[1]. + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestEmptyAddrs(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) + } + // Inject name resolution change to remove the server so that there is no address + // available after that. + u := &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until the above updates apply. + for { + time.Sleep(10 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil { + cancel() + break + } + cancel() + } +} + +func TestRoundRobin(t *testing.T) { + defer leakcheck.Check(t) + // Start 3 servers on 3 ports. + numServers := 3 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + req := "port" + var reply string + // Loop until servers[1] is up + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + // Add server2[2] to the service discovery. + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until both servers[2] are up. + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(10 * time.Millisecond) + } + // Check the incoming RPCs served in a round-robin manner. + for i := 0; i < 10; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[i%numServers].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port) + } + } +} + +func TestCloseWithPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Remove the server. + updates := []*naming.Update{{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }} + r.w.inject(updates) + // Loop until the above update applies. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() + break + } + time.Sleep(10 * time.Millisecond) + cancel() + } + // Issue 2 RPCs which should be completed with error status once cc is closed. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + go func() { + defer wg.Done() + var reply string + time.Sleep(5 * time.Millisecond) + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + time.Sleep(5 * time.Millisecond) + cc.Close() + wg.Wait() +} + +func TestGetOnWaitChannel(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Remove all servers so that all upcoming RPCs will block on waitCh. + updates := []*naming.Update{{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }} + r.w.inject(updates) + for { + var reply string + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() + break + } + cancel() + time.Sleep(10 * time.Millisecond) + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) + } + }() + // Add a connected server to get the above RPC through. + updates = []*naming.Update{{ + Op: naming.Add, + Addr: "localhost:" + servers[0].port, + }} + r.w.inject(updates) + // Wait until the above RPC succeeds. + wg.Wait() +} + +func TestOneServerDown(t *testing.T) { + defer leakcheck.Check(t) + // Start 2 servers. + numServers := 2 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] to the service discovery. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + }) + r.w.inject(updates) + req := "port" + var reply string + // Loop until servers[1] is up + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + + var wg sync.WaitGroup + numRPC := 100 + sleepDuration := 10 * time.Millisecond + wg.Add(1) + go func() { + time.Sleep(sleepDuration) + // After sleepDuration, kill server[0]. + servers[0].stop() + wg.Done() + }() + + // All non-failfast RPCs should not block because there's at least one connection available. + for i := 0; i < numRPC; i++ { + wg.Add(1) + go func() { + time.Sleep(sleepDuration) + // After sleepDuration, invoke RPC. + // server[0] is killed around the same time to make it racy between balancer and gRPC internals. + Invoke(context.Background(), "/foo/bar", &req, &reply, cc, FailFast(false)) + wg.Done() + }() + } + wg.Wait() +} + +func TestOneAddressRemoval(t *testing.T) { + defer leakcheck.Check(t) + // Start 2 servers. + numServers := 2 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] to the service discovery. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + }) + r.w.inject(updates) + req := "port" + var reply string + // Loop until servers[1] is up + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + + var wg sync.WaitGroup + numRPC := 100 + sleepDuration := 10 * time.Millisecond + wg.Add(1) + go func() { + time.Sleep(sleepDuration) + // After sleepDuration, delete server[0]. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }) + r.w.inject(updates) + wg.Done() + }() + + // All non-failfast RPCs should not fail because there's at least one connection available. + for i := 0; i < numRPC; i++ { + wg.Add(1) + go func() { + var reply string + time.Sleep(sleepDuration) + // After sleepDuration, invoke RPC. + // server[0] is removed around the same time to make it racy between balancer and gRPC internals. + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + wg.Done() + }() + } + wg.Wait() +} + +func checkServerUp(t *testing.T, currentServer *server) { + req := "port" + port := currentServer.port + cc, err := Dial("localhost:"+port, WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == port { + break + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstEmptyAddrs(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) + } + // Inject name resolution change to remove the server so that there is no address + // available after that. + u := &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until the above updates apply. + for { + time.Sleep(10 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil { + cancel() + break + } + cancel() + } +} + +func TestPickFirstCloseWithPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Remove the server. + updates := []*naming.Update{{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }} + r.w.inject(updates) + // Loop until the above update applies. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() + break + } + time.Sleep(10 * time.Millisecond) + cancel() + } + // Issue 2 RPCs which should be completed with error status once cc is closed. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + go func() { + defer wg.Done() + var reply string + time.Sleep(5 * time.Millisecond) + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + time.Sleep(5 * time.Millisecond) + cc.Close() + wg.Wait() +} + +func TestPickFirstOrderAllServerUp(t *testing.T) { + defer leakcheck.Check(t) + // Start 3 servers on 3 ports. + numServers := 3 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] and [2] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + + // Loop until all 3 servers are up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + checkServerUp(t, servers[2]) + + // Check the incoming RPCs served in server[0] + req := "port" + var reply string + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[0] in the balancer, the incoming RPCs served in server[1] + // For test addrconn, close server[0] instead + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until it changes to server[1] + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Add server[0] back to the balancer, the incoming RPCs served in server[1] + // Add is append operation, the order of Notify now is {server[1].port server[2].port server[0].port} + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[1] in the balancer, the incoming RPCs served in server[2] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[2].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[2] in the balancer, the incoming RPCs served in server[0] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstOrderOneServerDown(t *testing.T) { + defer leakcheck.Check(t) + // Start 3 servers on 3 ports. + numServers := 3 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] and [2] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + + // Loop until all 3 servers are up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + checkServerUp(t, servers[2]) + + // Check the incoming RPCs served in server[0] + req := "port" + var reply string + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // server[0] down, incoming RPCs served in server[1], but the order of Notify still remains + // {server[0] server[1] server[2]} + servers[0].stop() + // Loop until it changes to server[1] + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // up the server[0] back, the incoming RPCs served in server[1] + p, _ := strconv.Atoi(servers[0].port) + servers[0] = newTestServer() + go servers[0].start(t, p, math.MaxUint32) + defer servers[0].stop() + servers[0].wait(t, 2*time.Second) + checkServerUp(t, servers[0]) + + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[1] in the balancer, the incoming RPCs served in server[0] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstOneAddressRemoval(t *testing.T) { + defer leakcheck.Check(t) + // Start 2 servers. + numServers := 2 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("localhost:"+servers[0].port, WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] to the service discovery. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + }) + r.w.inject(updates) + + // Create a new cc to Loop until servers[1] is up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + + var wg sync.WaitGroup + numRPC := 100 + sleepDuration := 10 * time.Millisecond + wg.Add(1) + go func() { + time.Sleep(sleepDuration) + // After sleepDuration, delete server[0]. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }) + r.w.inject(updates) + wg.Done() + }() + + // All non-failfast RPCs should not fail because there's at least one connection available. + for i := 0; i < numRPC; i++ { + wg.Add(1) + go func() { + var reply string + time.Sleep(sleepDuration) + // After sleepDuration, invoke RPC. + // server[0] is removed around the same time to make it racy between balancer and gRPC internals. + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 000000000..9d0616080 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,367 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(cc.Target(), BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.cc.Target(), + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.cc.Target()}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + oldSC.UpdateAddresses(newAddrs) + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } + return +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. + return +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() + return +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmain/main.go b/vendor/google.golang.org/grpc/benchmark/benchmain/main.go new file mode 100644 index 000000000..0cc1f25e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmain/main.go @@ -0,0 +1,499 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package main provides benchmark with setting flags. + +An example to run some benchmarks with profiling enabled: + +go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ + -compression=on -maxConcurrentCalls=1 -trace=off \ + -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ + -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result + +As a suggestion, when creating a branch, you can run this benchmark and save the result +file "-resultFile=basePerf", and later when you at the middle of the work or finish the +work, you can get the benchmark result and compare it with the base anytime. + +Assume there are two result files names as "basePerf" and "curPerf" created by adding +-resultFile=basePerf and -resultFile=curPerf. + To format the curPerf, run: + go run benchmark/benchresult/main.go curPerf + To observe how the performance changes based on a base result, run: + go run benchmark/benchresult/main.go basePerf curPerf +*/ +package main + +import ( + "encoding/gob" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "reflect" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + bm "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/latency" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +const ( + modeOn = "on" + modeOff = "off" + modeBoth = "both" +) + +var allCompressionModes = []string{modeOn, modeOff, modeBoth} +var allTraceModes = []string{modeOn, modeOff, modeBoth} + +const ( + workloadsUnary = "unary" + workloadsStreaming = "streaming" + workloadsAll = "all" +) + +var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsAll} + +var ( + runMode = []bool{true, true} // {runUnary, runStream} + // When set the latency to 0 (no delay), the result is slower than the real result with no delay + // because latency simulation section has extra operations + ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. + kbps = []int{0, 10240} // if non-positive, infinite + mtu = []int{0} // if non-positive, infinite + maxConcurrentCalls = []int{1, 8, 64, 512} + reqSizeBytes = []int{1, 1024, 1024 * 1024} + respSizeBytes = []int{1, 1024, 1024 * 1024} + enableTrace []bool + benchtime time.Duration + memProfile, cpuProfile string + memProfileRate int + enableCompressor []bool + networkMode string + benchmarkResultFile string + networks = map[string]latency.Network{ + "Local": latency.Local, + "LAN": latency.LAN, + "WAN": latency.WAN, + "Longhaul": latency.Longhaul, + } +) + +func unaryBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + caller, close := makeFuncUnary(benchFeatures) + defer close() + runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) +} + +func streamBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + caller, close := makeFuncStream(benchFeatures) + defer close() + runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) +} + +func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) { + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + opts := []grpc.DialOption{} + sopts := []grpc.ServerOption{} + if benchFeatures.EnableCompressor { + sopts = append(sopts, + grpc.RPCCompressor(nopCompressor{}), + grpc.RPCDecompressor(nopDecompressor{}), + ) + opts = append(opts, + grpc.WithCompressor(nopCompressor{}), + grpc.WithDecompressor(nopDecompressor{}), + ) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + })) + opts = append(opts, grpc.WithInsecure()) + + target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) + conn := bm.NewClientConn(target, opts...) + tc := testpb.NewBenchmarkServiceClient(conn) + return func(int) { + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + }, func() { + conn.Close() + stopper() + } +} + +func makeFuncStream(benchFeatures stats.Features) (func(int), func()) { + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + opts := []grpc.DialOption{} + sopts := []grpc.ServerOption{} + if benchFeatures.EnableCompressor { + sopts = append(sopts, + grpc.RPCCompressor(grpc.NewGZIPCompressor()), + grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), + ) + opts = append(opts, + grpc.WithCompressor(grpc.NewGZIPCompressor()), + grpc.WithDecompressor(grpc.NewGZIPDecompressor()), + ) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + })) + opts = append(opts, grpc.WithInsecure()) + + target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) + conn := bm.NewClientConn(target, opts...) + tc := testpb.NewBenchmarkServiceClient(conn) + streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls) + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + streams[i] = stream + } + return func(pos int) { + streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + }, func() { + conn.Close() + stopper() + } +} + +func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { + if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { + if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } +} + +func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + // Warm up connection. + for i := 0; i < 10; i++ { + caller(0) + } + // Run benchmark. + startTimer() + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + bmEnd := time.Now().Add(benchtime) + var count int32 + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + go func(pos int) { + for { + t := time.Now() + if t.After(bmEnd) { + break + } + start := time.Now() + caller(pos) + elapse := time.Since(start) + atomic.AddInt32(&count, 1) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }(i) + } + wg.Wait() + stopTimer(count) +} + +// Initiate main function to get settings of features. +func init() { + var ( + workloads, traceMode, compressorMode, readLatency string + readKbps, readMtu, readMaxConcurrentCalls intSliceType + readReqSizeBytes, readRespSizeBytes intSliceType + ) + flag.StringVar(&workloads, "workloads", workloadsAll, + fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", "))) + flag.StringVar(&traceMode, "trace", modeOff, + fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", "))) + flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list") + flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark") + flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list") + flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list") + flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks") + flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list") + flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list") + flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.") + flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+ + "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+ + "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.") + flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided") + flag.StringVar(&compressorMode, "compression", modeOff, + fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", "))) + flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file") + flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul") + flag.Parse() + if flag.NArg() != 0 { + log.Fatal("Error: unparsed arguments: ", flag.Args()) + } + switch workloads { + case workloadsUnary: + runMode[0] = true + runMode[1] = false + case workloadsStreaming: + runMode[0] = false + runMode[1] = true + case workloadsAll: + runMode[0] = true + runMode[1] = true + default: + log.Fatalf("Unknown workloads setting: %v (want one of: %v)", + workloads, strings.Join(allWorkloads, ", ")) + } + enableCompressor = setMode(compressorMode) + enableTrace = setMode(traceMode) + // Time input formats as (time + unit). + readTimeFromInput(<c, readLatency) + readIntFromIntSlice(&kbps, readKbps) + readIntFromIntSlice(&mtu, readMtu) + readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls) + readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes) + readIntFromIntSlice(&respSizeBytes, readRespSizeBytes) + // Re-write latency, kpbs and mtu if network mode is set. + if network, ok := networks[networkMode]; ok { + ltc = []time.Duration{network.Latency} + kbps = []int{network.Kbps} + mtu = []int{network.MTU} + } +} + +func setMode(name string) []bool { + switch name { + case modeOn: + return []bool{true} + case modeOff: + return []bool{false} + case modeBoth: + return []bool{false, true} + default: + log.Fatalf("Unknown %s setting: %v (want one of: %v)", + name, name, strings.Join(allCompressionModes, ", ")) + return []bool{} + } +} + +type intSliceType []int + +func (intSlice *intSliceType) String() string { + return fmt.Sprintf("%v", *intSlice) +} + +func (intSlice *intSliceType) Set(value string) error { + if len(*intSlice) > 0 { + return errors.New("interval flag already set") + } + for _, num := range strings.Split(value, ",") { + next, err := strconv.Atoi(num) + if err != nil { + return err + } + *intSlice = append(*intSlice, next) + } + return nil +} + +func readIntFromIntSlice(values *[]int, replace intSliceType) { + // If not set replace in the flag, just return to run the default settings. + if len(replace) == 0 { + return + } + *values = replace +} + +func readTimeFromInput(values *[]time.Duration, replace string) { + if strings.Compare(replace, "") != 0 { + *values = []time.Duration{} + for _, ltc := range strings.Split(replace, ",") { + duration, err := time.ParseDuration(ltc) + if err != nil { + log.Fatal(err.Error()) + } + *values = append(*values, duration) + } + } +} + +func main() { + before() + featuresPos := make([]int, 8) + // 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize + featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu), + len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor)} + initalPos := make([]int, len(featuresPos)) + s := stats.NewStats(10) + s.SortLatency() + var memStats runtime.MemStats + var results testing.BenchmarkResult + var startAllocs, startBytes uint64 + var startTime time.Time + start := true + var startTimer = func() { + runtime.ReadMemStats(&memStats) + startAllocs = memStats.Mallocs + startBytes = memStats.TotalAlloc + startTime = time.Now() + } + var stopTimer = func(count int32) { + runtime.ReadMemStats(&memStats) + results = testing.BenchmarkResult{N: int(count), T: time.Now().Sub(startTime), + Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes} + } + sharedPos := make([]bool, len(featuresPos)) + for i := 0; i < len(featuresPos); i++ { + if featuresNum[i] <= 1 { + sharedPos[i] = true + } + } + + // Run benchmarks + resultSlice := []stats.BenchResults{} + for !reflect.DeepEqual(featuresPos, initalPos) || start { + start = false + benchFeature := stats.Features{ + NetworkMode: networkMode, + EnableTrace: enableTrace[featuresPos[0]], + Latency: ltc[featuresPos[1]], + Kbps: kbps[featuresPos[2]], + Mtu: mtu[featuresPos[3]], + MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]], + ReqSizeBytes: reqSizeBytes[featuresPos[5]], + RespSizeBytes: respSizeBytes[featuresPos[6]], + EnableCompressor: enableCompressor[featuresPos[7]], + } + + grpc.EnableTracing = enableTrace[featuresPos[0]] + if runMode[0] { + unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) + s.SetBenchmarkResult("Unary", benchFeature, results.N, + results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) + fmt.Println(s.BenchString()) + fmt.Println(s.String()) + resultSlice = append(resultSlice, s.GetBenchmarkResults()) + s.Clear() + } + if runMode[1] { + streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) + s.SetBenchmarkResult("Stream", benchFeature, results.N, + results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) + fmt.Println(s.BenchString()) + fmt.Println(s.String()) + resultSlice = append(resultSlice, s.GetBenchmarkResults()) + s.Clear() + } + bm.AddOne(featuresPos, featuresNum) + } + after(resultSlice) +} + +func before() { + if memProfile != "" { + runtime.MemProfileRate = memProfileRate + } + if cpuProfile != "" { + f, err := os.Create(cpuProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) + f.Close() + return + } + } +} + +func after(data []stats.BenchResults) { + if cpuProfile != "" { + pprof.StopCPUProfile() // flushes profile to disk + } + if memProfile != "" { + f, err := os.Create(memProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + runtime.GC() // materialize all statistics + if err = pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err) + os.Exit(2) + } + f.Close() + } + if benchmarkResultFile != "" { + f, err := os.Create(benchmarkResultFile) + if err != nil { + log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err) + } + dataEncoder := gob.NewEncoder(f) + dataEncoder.Encode(data) + f.Close() + } +} + +// nopCompressor is a compressor that just copies data. +type nopCompressor struct{} + +func (nopCompressor) Do(w io.Writer, p []byte) error { + n, err := w.Write(p) + if err != nil { + return err + } + if n != len(p) { + return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) + } + return nil +} + +func (nopCompressor) Type() string { return "nop" } + +// nopDecompressor is a decompressor that just copies data. +type nopDecompressor struct{} + +func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } +func (nopDecompressor) Type() string { return "nop" } diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark.go b/vendor/google.golang.org/grpc/benchmark/benchmark.go new file mode 100644 index 000000000..f09fa4543 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmark.go @@ -0,0 +1,364 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing grpc_testing/control.proto grpc_testing/messages.proto grpc_testing/payloads.proto grpc_testing/services.proto grpc_testing/stats.proto + +/* +Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. +*/ +package benchmark + +import ( + "fmt" + "io" + "net" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/latency" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +// AddOne add 1 to the features slice +func AddOne(features []int, featuresMaxPosition []int) { + for i := len(features) - 1; i >= 0; i-- { + features[i] = (features[i] + 1) + if features[i]/featuresMaxPosition[i] == 0 { + break + } + features[i] = features[i] % featuresMaxPosition[i] + } +} + +// Allows reuse of the same testpb.Payload object. +func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + p.Type = t + p.Body = body + return +} + +func newPayload(t testpb.PayloadType, size int) *testpb.Payload { + p := new(testpb.Payload) + setPayload(p, t, size) + return p +} + +type testServer struct { +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: newPayload(in.ResponseType, int(in.ResponseSize)), + }, nil +} + +func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + response := &testpb.SimpleResponse{ + Payload: new(testpb.Payload), + } + in := new(testpb.SimpleRequest) + for { + // use ServerStream directly to reuse the same testpb.SimpleRequest object + err := stream.(grpc.ServerStream).RecvMsg(in) + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + if err := stream.Send(response); err != nil { + return err + } + } +} + +// byteBufServer is a gRPC server that sends and receives byte buffer. +// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead. +type byteBufServer struct { + respSize int32 +} + +// UnaryCall is an empty function and is not used for benchmark. +// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated. +func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + for { + var in []byte + err := stream.(grpc.ServerStream).RecvMsg(&in) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + out := make([]byte, s.respSize) + if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { + return err + } + } +} + +// ServerInfo contains the information to create a gRPC benchmark server. +type ServerInfo struct { + // Addr is the address of the server. + Addr string + + // Type is the type of the server. + // It should be "protobuf" or "bytebuf". + Type string + + // Metadata is an optional configuration. + // For "protobuf", it's ignored. + // For "bytebuf", it should be an int representing response size. + Metadata interface{} + + // Network can simulate latency + Network *latency.Network +} + +// StartServer starts a gRPC server serving a benchmark service according to info. +// It returns its listen address and a function to stop the server. +func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) { + lis, err := net.Listen("tcp", info.Addr) + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + nw := info.Network + if nw != nil { + lis = nw.Listener(lis) + } + opts = append(opts, grpc.WriteBufferSize(128*1024)) + opts = append(opts, grpc.ReadBufferSize(128*1024)) + s := grpc.NewServer(opts...) + switch info.Type { + case "protobuf": + testpb.RegisterBenchmarkServiceServer(s, &testServer{}) + case "bytebuf": + respSize, ok := info.Metadata.(int32) + if !ok { + grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type) + } + testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) + default: + grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type) + } + go s.Serve(lis) + return lis.Addr().String(), func() { + s.Stop() + } +} + +// DoUnaryCall performs an unary RPC with given stub and request and response sizes. +func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, ", err) + } + return nil +} + +// DoStreamingRoundTrip performs a round trip for a single streaming rpc. +func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { + pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) + req := &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSize), + Payload: pl, + } + if err := stream.Send(req); err != nil { + return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) + } + if _, err := stream.Recv(); err != nil { + // EOF is a valid error here. + if err == io.EOF { + return nil + } + return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want ", err) + } + return nil +} + +// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer. +func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { + out := make([]byte, reqSize) + if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want ", err) + } + var in []byte + if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { + // EOF is a valid error here. + if err == io.EOF { + return nil + } + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want ", err) + } + return nil +} + +// NewClientConn creates a gRPC client connection to addr. +func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { + opts = append(opts, grpc.WithWriteBufferSize(128*1024)) + opts = append(opts, grpc.WithReadBufferSize(128*1024)) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + } + return conn +} + +func runUnary(b *testing.B, benchFeatures stats.Features) { + s := stats.AddStats(b, 38) + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + defer stopper() + conn := NewClientConn( + target, grpc.WithInsecure(), + grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + }), + ) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + for i := 0; i < 10; i++ { + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + } + ch := make(chan int, benchFeatures.MaxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + go func() { + for range ch { + start := time.Now() + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + close(ch) + wg.Wait() + b.StopTimer() + conn.Close() +} + +func runStream(b *testing.B, benchFeatures stats.Features) { + s := stats.AddStats(b, 38) + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + defer stopper() + conn := NewClientConn( + target, grpc.WithInsecure(), + grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + }), + ) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 10; i++ { + streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + } + + ch := make(chan struct{}, benchFeatures.MaxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + go func() { + for range ch { + start := time.Now() + streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch <- struct{}{} + } + close(ch) + wg.Wait() + b.StopTimer() + conn.Close() +} +func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { + if err := DoUnaryCall(client, reqSize, respSize); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { + if err := DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark_test.go b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go new file mode 100644 index 000000000..8dc7d3c55 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go @@ -0,0 +1,85 @@ +// +build go1.7 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package benchmark + +import ( + "fmt" + "os" + "reflect" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark/stats" +) + +func BenchmarkClient(b *testing.B) { + enableTrace := []bool{true, false} // run both enable and disable by default + // When set the latency to 0 (no delay), the result is slower than the real result with no delay + // because latency simulation section has extra operations + latency := []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. + kbps := []int{0, 10240} // if non-positive, infinite + mtu := []int{0} // if non-positive, infinite + maxConcurrentCalls := []int{1, 8, 64, 512} + reqSizeBytes := []int{1, 1024 * 1024} + respSizeBytes := []int{1, 1024 * 1024} + featuresCurPos := make([]int, 7) + + // 0:enableTracing 1:md 2:ltc 3:kbps 4:mtu 5:maxC 6:connCount 7:reqSize 8:respSize + featuresMaxPosition := []int{len(enableTrace), len(latency), len(kbps), len(mtu), len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes)} + initalPos := make([]int, len(featuresCurPos)) + + // run benchmarks + start := true + for !reflect.DeepEqual(featuresCurPos, initalPos) || start { + start = false + tracing := "Trace" + if !enableTrace[featuresCurPos[0]] { + tracing = "noTrace" + } + + benchFeature := stats.Features{ + EnableTrace: enableTrace[featuresCurPos[0]], + Latency: latency[featuresCurPos[1]], + Kbps: kbps[featuresCurPos[2]], + Mtu: mtu[featuresCurPos[3]], + MaxConcurrentCalls: maxConcurrentCalls[featuresCurPos[4]], + ReqSizeBytes: reqSizeBytes[featuresCurPos[5]], + RespSizeBytes: respSizeBytes[featuresCurPos[6]], + } + + grpc.EnableTracing = enableTrace[featuresCurPos[0]] + b.Run(fmt.Sprintf("Unary-%s-%s", + tracing, benchFeature.String()), func(b *testing.B) { + runUnary(b, benchFeature) + }) + + b.Run(fmt.Sprintf("Stream-%s-%s", + tracing, benchFeature.String()), func(b *testing.B) { + runStream(b, benchFeature) + }) + AddOne(featuresCurPos, featuresMaxPosition) + } +} + +func TestMain(m *testing.M) { + os.Exit(stats.RunTestMain(m)) +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchresult/main.go b/vendor/google.golang.org/grpc/benchmark/benchresult/main.go new file mode 100644 index 000000000..40226cff1 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchresult/main.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +To format the benchmark result: + go run benchmark/benchresult/main.go resultfile + +To see the performance change based on a old result: + go run benchmark/benchresult/main.go resultfile_old resultfile +It will print the comparison result of intersection benchmarks between two files. + +*/ +package main + +import ( + "encoding/gob" + "fmt" + "log" + "os" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/benchmark/stats" +) + +func createMap(fileName string, m map[string]stats.BenchResults) { + f, err := os.Open(fileName) + if err != nil { + log.Fatalf("Read file %s error: %s\n", fileName, err) + } + defer f.Close() + var data []stats.BenchResults + decoder := gob.NewDecoder(f) + if err = decoder.Decode(&data); err != nil { + log.Fatalf("Decode file %s error: %s\n", fileName, err) + } + for _, d := range data { + m[d.RunMode+"-"+d.Features.String()] = d + } +} + +func intChange(title string, val1, val2 int64) string { + return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", title, strconv.FormatInt(val1, 10), + strconv.FormatInt(val2, 10), float64(val2-val1)*100/float64(val1)) +} + +func timeChange(title int, val1, val2 time.Duration) string { + return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", strconv.Itoa(title)+" latency", val1.String(), + val2.String(), float64(val2-val1)*100/float64(val1)) +} + +func compareTwoMap(m1, m2 map[string]stats.BenchResults) { + for k2, v2 := range m2 { + if v1, ok := m1[k2]; ok { + changes := k2 + "\n" + changes += fmt.Sprintf("%10s %12s %12s %8s\n", "Title", "Before", "After", "Percentage") + changes += intChange("Bytes/op", v1.AllocedBytesPerOp, v2.AllocedBytesPerOp) + changes += intChange("Allocs/op", v1.AllocsPerOp, v2.AllocsPerOp) + changes += timeChange(v1.Latency[1].Percent, v1.Latency[1].Value, v2.Latency[1].Value) + changes += timeChange(v1.Latency[2].Percent, v1.Latency[2].Value, v2.Latency[2].Value) + fmt.Printf("%s\n", changes) + } + } +} + +func compareBenchmark(file1, file2 string) { + var BenchValueFile1 map[string]stats.BenchResults + var BenchValueFile2 map[string]stats.BenchResults + BenchValueFile1 = make(map[string]stats.BenchResults) + BenchValueFile2 = make(map[string]stats.BenchResults) + + createMap(file1, BenchValueFile1) + createMap(file2, BenchValueFile2) + + compareTwoMap(BenchValueFile1, BenchValueFile2) +} + +func printline(benchName, ltc50, ltc90, allocByte, allocsOp interface{}) { + fmt.Printf("%-80v%12v%12v%12v%12v\n", benchName, ltc50, ltc90, allocByte, allocsOp) +} + +func formatBenchmark(fileName string) { + f, err := os.Open(fileName) + if err != nil { + log.Fatalf("Read file %s error: %s\n", fileName, err) + } + defer f.Close() + var data []stats.BenchResults + decoder := gob.NewDecoder(f) + if err = decoder.Decode(&data); err != nil { + log.Fatalf("Decode file %s error: %s\n", fileName, err) + } + if len(data) == 0 { + log.Fatalf("No data in file %s\n", fileName) + } + printPos := data[0].SharedPosion + fmt.Println("\nShared features:\n" + strings.Repeat("-", 20)) + fmt.Print(stats.PartialPrintString(printPos, data[0].Features, true)) + fmt.Println(strings.Repeat("-", 35)) + for i := 0; i < len(data[0].SharedPosion); i++ { + printPos[i] = !printPos[i] + } + printline("Name", "latency-50", "latency-90", "Alloc (B)", "Alloc (#)") + for _, d := range data { + name := d.RunMode + stats.PartialPrintString(printPos, d.Features, false) + printline(name, d.Latency[1].Value.String(), d.Latency[2].Value.String(), + d.AllocedBytesPerOp, d.AllocsPerOp) + } +} + +func main() { + if len(os.Args) == 2 { + formatBenchmark(os.Args[1]) + } else { + compareBenchmark(os.Args[1], os.Args[2]) + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/client/main.go b/vendor/google.golang.org/grpc/benchmark/client/main.go new file mode 100644 index 000000000..9aa587e3a --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/client/main.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +var ( + server = flag.String("server", "", "The server address") + maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") + trace = flag.Bool("trace", true, "Whether tracing is on") + rpcType = flag.Int("rpc_type", 0, + `Configure different client rpc type. Valid options are: + 0 : unary call; + 1 : streaming call.`) +) + +func unaryCaller(client testpb.BenchmarkServiceClient) { + benchmark.DoUnaryCall(client, 1, 1) +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(stream, 1, 1) +} + +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) { + s = stats.NewStats(256) + conn = benchmark.NewClientConn(*server) + tc = testpb.NewBenchmarkServiceClient(conn) + return s, conn, tc +} + +func closeLoopUnary() { + s, conn, tc := buildConnection() + + for i := 0; i < 100; i++ { + unaryCaller(tc) + } + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + for range ch { + start := time.Now() + unaryCaller(tc) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) + +} + +func closeLoopStream() { + s, conn, tc := buildConnection() + ch := make(chan int, *maxConcurrentRPCs*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(*maxConcurrentRPCs) + // Distribute RPCs over maxConcurrentCalls workers. + for i := 0; i < *maxConcurrentRPCs; i++ { + go func() { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + // Do some warm up. + for i := 0; i < 100; i++ { + streamCaller(stream) + } + for range ch { + start := time.Now() + streamCaller(stream) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + // Stop the client when time is up. + done := make(chan struct{}) + go func() { + <-time.After(time.Duration(*duration) * time.Second) + close(done) + }() + ok := true + for ok { + select { + case ch <- 0: + case <-done: + ok = false + } + } + close(ch) + wg.Wait() + conn.Close() + grpclog.Println(s.String()) +} + +func main() { + flag.Parse() + grpc.EnableTracing = *trace + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Client profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + switch *rpcType { + case 0: + closeLoopUnary() + case 1: + closeLoopStream() + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go new file mode 100644 index 000000000..b88832f75 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go @@ -0,0 +1,1194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: control.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + control.proto + messages.proto + payloads.proto + services.proto + stats.proto + +It has these top-level messages: + PoissonParams + UniformParams + DeterministicParams + ParetoParams + ClosedLoopParams + LoadParams + SecurityParams + ClientConfig + ClientStatus + Mark + ClientArgs + ServerConfig + ServerArgs + ServerStatus + CoreRequest + CoreResponse + Void + Scenario + Scenarios + Payload + EchoStatus + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse + ReconnectParams + ReconnectInfo + ByteBufferParams + SimpleProtoParams + ComplexProtoParams + PayloadConfig + ServerStats + HistogramParams + HistogramData + ClientStats +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ClientType int32 + +const ( + ClientType_SYNC_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNC_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNC_CLIENT": 0, + "ASYNC_CLIENT": 1, +} + +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) +} +func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type ServerType int32 + +const ( + ServerType_SYNC_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 + ServerType_ASYNC_GENERIC_SERVER ServerType = 2 +) + +var ServerType_name = map[int32]string{ + 0: "SYNC_SERVER", + 1: "ASYNC_SERVER", + 2: "ASYNC_GENERIC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNC_SERVER": 0, + "ASYNC_SERVER": 1, + "ASYNC_GENERIC_SERVER": 2, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} +func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} +func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +type PoissonParams struct { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *PoissonParams) Reset() { *m = PoissonParams{} } +func (m *PoissonParams) String() string { return proto.CompactTextString(m) } +func (*PoissonParams) ProtoMessage() {} +func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *PoissonParams) GetOfferedLoad() float64 { + if m != nil { + return m.OfferedLoad + } + return 0 +} + +type UniformParams struct { + InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"` + InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"` +} + +func (m *UniformParams) Reset() { *m = UniformParams{} } +func (m *UniformParams) String() string { return proto.CompactTextString(m) } +func (*UniformParams) ProtoMessage() {} +func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *UniformParams) GetInterarrivalLo() float64 { + if m != nil { + return m.InterarrivalLo + } + return 0 +} + +func (m *UniformParams) GetInterarrivalHi() float64 { + if m != nil { + return m.InterarrivalHi + } + return 0 +} + +type DeterministicParams struct { + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *DeterministicParams) Reset() { *m = DeterministicParams{} } +func (m *DeterministicParams) String() string { return proto.CompactTextString(m) } +func (*DeterministicParams) ProtoMessage() {} +func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DeterministicParams) GetOfferedLoad() float64 { + if m != nil { + return m.OfferedLoad + } + return 0 +} + +type ParetoParams struct { + InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"` + Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"` +} + +func (m *ParetoParams) Reset() { *m = ParetoParams{} } +func (m *ParetoParams) String() string { return proto.CompactTextString(m) } +func (*ParetoParams) ProtoMessage() {} +func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ParetoParams) GetInterarrivalBase() float64 { + if m != nil { + return m.InterarrivalBase + } + return 0 +} + +func (m *ParetoParams) GetAlpha() float64 { + if m != nil { + return m.Alpha + } + return 0 +} + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +type ClosedLoopParams struct { +} + +func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} } +func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) } +func (*ClosedLoopParams) ProtoMessage() {} +func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type LoadParams struct { + // Types that are valid to be assigned to Load: + // *LoadParams_ClosedLoop + // *LoadParams_Poisson + // *LoadParams_Uniform + // *LoadParams_Determ + // *LoadParams_Pareto + Load isLoadParams_Load `protobuf_oneof:"load"` +} + +func (m *LoadParams) Reset() { *m = LoadParams{} } +func (m *LoadParams) String() string { return proto.CompactTextString(m) } +func (*LoadParams) ProtoMessage() {} +func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isLoadParams_Load interface { + isLoadParams_Load() +} + +type LoadParams_ClosedLoop struct { + ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,oneof"` +} +type LoadParams_Poisson struct { + Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,oneof"` +} +type LoadParams_Uniform struct { + Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,oneof"` +} +type LoadParams_Determ struct { + Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,oneof"` +} +type LoadParams_Pareto struct { + Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,oneof"` +} + +func (*LoadParams_ClosedLoop) isLoadParams_Load() {} +func (*LoadParams_Poisson) isLoadParams_Load() {} +func (*LoadParams_Uniform) isLoadParams_Load() {} +func (*LoadParams_Determ) isLoadParams_Load() {} +func (*LoadParams_Pareto) isLoadParams_Load() {} + +func (m *LoadParams) GetLoad() isLoadParams_Load { + if m != nil { + return m.Load + } + return nil +} + +func (m *LoadParams) GetClosedLoop() *ClosedLoopParams { + if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok { + return x.ClosedLoop + } + return nil +} + +func (m *LoadParams) GetPoisson() *PoissonParams { + if x, ok := m.GetLoad().(*LoadParams_Poisson); ok { + return x.Poisson + } + return nil +} + +func (m *LoadParams) GetUniform() *UniformParams { + if x, ok := m.GetLoad().(*LoadParams_Uniform); ok { + return x.Uniform + } + return nil +} + +func (m *LoadParams) GetDeterm() *DeterministicParams { + if x, ok := m.GetLoad().(*LoadParams_Determ); ok { + return x.Determ + } + return nil +} + +func (m *LoadParams) GetPareto() *ParetoParams { + if x, ok := m.GetLoad().(*LoadParams_Pareto); ok { + return x.Pareto + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadParams) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadParams_OneofMarshaler, _LoadParams_OneofUnmarshaler, _LoadParams_OneofSizer, []interface{}{ + (*LoadParams_ClosedLoop)(nil), + (*LoadParams_Poisson)(nil), + (*LoadParams_Uniform)(nil), + (*LoadParams_Determ)(nil), + (*LoadParams_Pareto)(nil), + } +} + +func _LoadParams_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClosedLoop); err != nil { + return err + } + case *LoadParams_Poisson: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Poisson); err != nil { + return err + } + case *LoadParams_Uniform: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Uniform); err != nil { + return err + } + case *LoadParams_Determ: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Determ); err != nil { + return err + } + case *LoadParams_Pareto: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Pareto); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadParams.Load has unexpected type %T", x) + } + return nil +} + +func _LoadParams_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadParams) + switch tag { + case 1: // load.closed_loop + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClosedLoopParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_ClosedLoop{msg} + return true, err + case 2: // load.poisson + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PoissonParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Poisson{msg} + return true, err + case 3: // load.uniform + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UniformParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Uniform{msg} + return true, err + case 4: // load.determ + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeterministicParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Determ{msg} + return true, err + case 5: // load.pareto + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ParetoParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Pareto{msg} + return true, err + default: + return false, nil + } +} + +func _LoadParams_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + s := proto.Size(x.ClosedLoop) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Poisson: + s := proto.Size(x.Poisson) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Uniform: + s := proto.Size(x.Uniform) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Determ: + s := proto.Size(x.Determ) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Pareto: + s := proto.Size(x.Pareto) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// presence of SecurityParams implies use of TLS +type SecurityParams struct { + UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa" json:"use_test_ca,omitempty"` + ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride" json:"server_host_override,omitempty"` +} + +func (m *SecurityParams) Reset() { *m = SecurityParams{} } +func (m *SecurityParams) String() string { return proto.CompactTextString(m) } +func (*SecurityParams) ProtoMessage() {} +func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *SecurityParams) GetUseTestCa() bool { + if m != nil { + return m.UseTestCa + } + return false +} + +func (m *SecurityParams) GetServerHostOverride() string { + if m != nil { + return m.ServerHostOverride + } + return "" +} + +type ClientConfig struct { + // List of targets to connect to. At least one target needs to be specified. + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel" json:"outstanding_rpcs_per_channel,omitempty"` + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels" json:"client_channels,omitempty"` + // Only for async client. Number of threads to use to start/manage RPCs. + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` + // The requested load for the entire client (aggregated over all the threads). + LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams" json:"load_params,omitempty"` + PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"` + // Specify the cores we should run the client on, if desired + CoreList []int32 `protobuf:"varint,13,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` + CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} +func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ClientConfig) GetServerTargets() []string { + if m != nil { + return m.ServerTargets + } + return nil +} + +func (m *ClientConfig) GetClientType() ClientType { + if m != nil { + return m.ClientType + } + return ClientType_SYNC_CLIENT +} + +func (m *ClientConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ClientConfig) GetOutstandingRpcsPerChannel() int32 { + if m != nil { + return m.OutstandingRpcsPerChannel + } + return 0 +} + +func (m *ClientConfig) GetClientChannels() int32 { + if m != nil { + return m.ClientChannels + } + return 0 +} + +func (m *ClientConfig) GetAsyncClientThreads() int32 { + if m != nil { + return m.AsyncClientThreads + } + return 0 +} + +func (m *ClientConfig) GetRpcType() RpcType { + if m != nil { + return m.RpcType + } + return RpcType_UNARY +} + +func (m *ClientConfig) GetLoadParams() *LoadParams { + if m != nil { + return m.LoadParams + } + return nil +} + +func (m *ClientConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +func (m *ClientConfig) GetHistogramParams() *HistogramParams { + if m != nil { + return m.HistogramParams + } + return nil +} + +func (m *ClientConfig) GetCoreList() []int32 { + if m != nil { + return m.CoreList + } + return nil +} + +func (m *ClientConfig) GetCoreLimit() int32 { + if m != nil { + return m.CoreLimit + } + return 0 +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} +func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +// Request current stats +type Mark struct { + // if true, the stats will be reset after taking their snapshot. + Reset_ bool `protobuf:"varint,1,opt,name=reset" json:"reset,omitempty"` +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} +func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Mark) GetReset_() bool { + if m != nil { + return m.Reset_ + } + return false +} + +type ClientArgs struct { + // Types that are valid to be assigned to Argtype: + // *ClientArgs_Setup + // *ClientArgs_Mark + Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} +func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isClientArgs_Argtype interface { + isClientArgs_Argtype() +} + +type ClientArgs_Setup struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ClientArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ClientArgs_Setup) isClientArgs_Argtype() {} +func (*ClientArgs_Mark) isClientArgs_Argtype() {} + +func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ClientArgs) GetSetup() *ClientConfig { + if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{ + (*ClientArgs_Setup)(nil), + (*ClientArgs_Mark)(nil), + } +} + +func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ClientArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClientArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ClientArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ClientArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // Port on which to listen. Zero means pick unused port. + Port int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"` + // Only for async server. Number of threads used to serve the requests. + AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads" json:"async_server_threads,omitempty"` + // Specify the number of cores to limit server to, if desired + CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` + // payload config, used in generic server + PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + // Specify the cores we should run the server on, if desired + CoreList []int32 `protobuf:"varint,10,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} +func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ServerConfig) GetServerType() ServerType { + if m != nil { + return m.ServerType + } + return ServerType_SYNC_SERVER +} + +func (m *ServerConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ServerConfig) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ServerConfig) GetAsyncServerThreads() int32 { + if m != nil { + return m.AsyncServerThreads + } + return 0 +} + +func (m *ServerConfig) GetCoreLimit() int32 { + if m != nil { + return m.CoreLimit + } + return 0 +} + +func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +func (m *ServerConfig) GetCoreList() []int32 { + if m != nil { + return m.CoreList + } + return nil +} + +type ServerArgs struct { + // Types that are valid to be assigned to Argtype: + // *ServerArgs_Setup + // *ServerArgs_Mark + Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} +func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +type isServerArgs_Argtype interface { + isServerArgs_Argtype() +} + +type ServerArgs_Setup struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ServerArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ServerArgs_Setup) isServerArgs_Argtype() {} +func (*ServerArgs_Mark) isServerArgs_Argtype() {} + +func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{ + (*ServerArgs_Setup)(nil), + (*ServerArgs_Mark)(nil), + } +} + +func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ServerArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ServerArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + // the port bound by the server + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // Number of cores available to the server + Cores int32 `protobuf:"varint,3,opt,name=cores" json:"cores,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} +func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *ServerStatus) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ServerStatus) GetCores() int32 { + if m != nil { + return m.Cores + } + return 0 +} + +type CoreRequest struct { +} + +func (m *CoreRequest) Reset() { *m = CoreRequest{} } +func (m *CoreRequest) String() string { return proto.CompactTextString(m) } +func (*CoreRequest) ProtoMessage() {} +func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type CoreResponse struct { + // Number of cores available on the server + Cores int32 `protobuf:"varint,1,opt,name=cores" json:"cores,omitempty"` +} + +func (m *CoreResponse) Reset() { *m = CoreResponse{} } +func (m *CoreResponse) String() string { return proto.CompactTextString(m) } +func (*CoreResponse) ProtoMessage() {} +func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *CoreResponse) GetCores() int32 { + if m != nil { + return m.Cores + } + return 0 +} + +type Void struct { +} + +func (m *Void) Reset() { *m = Void{} } +func (m *Void) String() string { return proto.CompactTextString(m) } +func (*Void) ProtoMessage() {} +func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +// A single performance scenario: input to qps_json_driver +type Scenario struct { + // Human readable name for this scenario + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Client configuration + ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig" json:"client_config,omitempty"` + // Number of clients to start for the test + NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients" json:"num_clients,omitempty"` + // Server configuration + ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig" json:"server_config,omitempty"` + // Number of servers to start for the test + NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers" json:"num_servers,omitempty"` + // Warmup period, in seconds + WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds" json:"warmup_seconds,omitempty"` + // Benchmark time, in seconds + BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds" json:"benchmark_seconds,omitempty"` + // Number of workers to spawn locally (usually zero) + SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount" json:"spawn_local_worker_count,omitempty"` +} + +func (m *Scenario) Reset() { *m = Scenario{} } +func (m *Scenario) String() string { return proto.CompactTextString(m) } +func (*Scenario) ProtoMessage() {} +func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Scenario) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Scenario) GetClientConfig() *ClientConfig { + if m != nil { + return m.ClientConfig + } + return nil +} + +func (m *Scenario) GetNumClients() int32 { + if m != nil { + return m.NumClients + } + return 0 +} + +func (m *Scenario) GetServerConfig() *ServerConfig { + if m != nil { + return m.ServerConfig + } + return nil +} + +func (m *Scenario) GetNumServers() int32 { + if m != nil { + return m.NumServers + } + return 0 +} + +func (m *Scenario) GetWarmupSeconds() int32 { + if m != nil { + return m.WarmupSeconds + } + return 0 +} + +func (m *Scenario) GetBenchmarkSeconds() int32 { + if m != nil { + return m.BenchmarkSeconds + } + return 0 +} + +func (m *Scenario) GetSpawnLocalWorkerCount() int32 { + if m != nil { + return m.SpawnLocalWorkerCount + } + return 0 +} + +// A set of scenarios to be run with qps_json_driver +type Scenarios struct { + Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"` +} + +func (m *Scenarios) Reset() { *m = Scenarios{} } +func (m *Scenarios) String() string { return proto.CompactTextString(m) } +func (*Scenarios) ProtoMessage() {} +func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *Scenarios) GetScenarios() []*Scenario { + if m != nil { + return m.Scenarios + } + return nil +} + +func init() { + proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams") + proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams") + proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams") + proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams") + proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams") + proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams") + proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams") + proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") + proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") + proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") + proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") + proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") + proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") + proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") + proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest") + proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse") + proto.RegisterType((*Void)(nil), "grpc.testing.Void") + proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario") + proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios") + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) +} + +func init() { proto.RegisterFile("control.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6f, 0x6f, 0xdb, 0xb6, + 0x13, 0xb6, 0x1d, 0xdb, 0xb1, 0x4e, 0xb6, 0xe3, 0x1f, 0x7f, 0xe9, 0xa0, 0xa6, 0x69, 0x97, 0x6a, + 0x1b, 0x16, 0x64, 0x40, 0x5a, 0x78, 0x05, 0xba, 0x62, 0x2f, 0x02, 0xc7, 0x33, 0xea, 0x00, 0x69, + 0x96, 0xd1, 0x69, 0x87, 0xbe, 0x12, 0x18, 0x99, 0xb1, 0x85, 0xc8, 0xa2, 0x46, 0x52, 0x09, 0xf2, + 0x15, 0xf6, 0x99, 0xf6, 0x39, 0xf6, 0x35, 0xf6, 0x15, 0x06, 0xfe, 0x91, 0x23, 0xb9, 0x06, 0x9a, + 0x6d, 0xef, 0xc4, 0xbb, 0xe7, 0xe1, 0x91, 0xf7, 0xdc, 0x1d, 0x05, 0x9d, 0x90, 0x25, 0x92, 0xb3, + 0xf8, 0x30, 0xe5, 0x4c, 0x32, 0xd4, 0x9e, 0xf1, 0x34, 0x3c, 0x94, 0x54, 0xc8, 0x28, 0x99, 0xed, + 0x74, 0x53, 0x72, 0x17, 0x33, 0x32, 0x15, 0xc6, 0xbb, 0xe3, 0x0a, 0x49, 0xa4, 0x5d, 0xf8, 0x7d, + 0xe8, 0x9c, 0xb3, 0x48, 0x08, 0x96, 0x9c, 0x13, 0x4e, 0x16, 0x02, 0x3d, 0x87, 0x36, 0xbb, 0xba, + 0xa2, 0x9c, 0x4e, 0x03, 0x45, 0xf2, 0xaa, 0x7b, 0xd5, 0xfd, 0x2a, 0x76, 0xad, 0xed, 0x94, 0x91, + 0xa9, 0x4f, 0xa0, 0xf3, 0x3e, 0x89, 0xae, 0x18, 0x5f, 0x58, 0xce, 0xb7, 0xb0, 0x15, 0x25, 0x92, + 0x72, 0xc2, 0x79, 0x74, 0x43, 0xe2, 0x20, 0x66, 0x96, 0xd6, 0x2d, 0x9a, 0x4f, 0xd9, 0x27, 0xc0, + 0x79, 0xe4, 0xd5, 0x3e, 0x05, 0x8e, 0x23, 0xff, 0x07, 0xf8, 0xff, 0x4f, 0x54, 0x52, 0xbe, 0x88, + 0x92, 0x48, 0xc8, 0x28, 0x7c, 0xf8, 0xe1, 0x7e, 0x81, 0xf6, 0x39, 0xe1, 0x54, 0x32, 0x4b, 0xf9, + 0x0e, 0xfe, 0x57, 0x0a, 0x79, 0x49, 0x04, 0xb5, 0xbc, 0x5e, 0xd1, 0x71, 0x4c, 0x04, 0x45, 0xdb, + 0xd0, 0x20, 0x71, 0x3a, 0x27, 0xf6, 0x54, 0x66, 0xe1, 0x23, 0xe8, 0x0d, 0x63, 0x26, 0x54, 0x00, + 0x96, 0x9a, 0x6d, 0xfd, 0x3f, 0x6a, 0x00, 0x2a, 0x9e, 0x8d, 0x32, 0x00, 0x37, 0xd4, 0x90, 0x20, + 0x66, 0x2c, 0xd5, 0xfb, 0xbb, 0xfd, 0x67, 0x87, 0x45, 0x1d, 0x0e, 0x57, 0xf7, 0x18, 0x57, 0x30, + 0x84, 0x4b, 0x1b, 0x7a, 0x0d, 0x9b, 0xa9, 0x51, 0x42, 0x47, 0x77, 0xfb, 0x4f, 0xca, 0xf4, 0x92, + 0x4c, 0xe3, 0x0a, 0xce, 0xd1, 0x8a, 0x98, 0x19, 0x39, 0xbc, 0x8d, 0x75, 0xc4, 0x92, 0x56, 0x8a, + 0x68, 0xd1, 0xe8, 0x47, 0x68, 0x4e, 0x75, 0x92, 0xbd, 0xba, 0xe6, 0x3d, 0x2f, 0xf3, 0xd6, 0x08, + 0x30, 0xae, 0x60, 0x4b, 0x41, 0xaf, 0xa0, 0x99, 0xea, 0x3c, 0x7b, 0x0d, 0x4d, 0xde, 0x59, 0x39, + 0x6d, 0x41, 0x03, 0xc5, 0x32, 0xd8, 0xe3, 0x26, 0xd4, 0x95, 0x70, 0xfe, 0x25, 0x74, 0x27, 0x34, + 0xcc, 0x78, 0x24, 0xef, 0x6c, 0x06, 0x9f, 0x81, 0x9b, 0x09, 0x1a, 0x28, 0x7e, 0x10, 0x12, 0x9d, + 0xc1, 0x16, 0x76, 0x32, 0x41, 0x2f, 0xa8, 0x90, 0x43, 0x82, 0x5e, 0xc2, 0xb6, 0xa0, 0xfc, 0x86, + 0xf2, 0x60, 0xce, 0x84, 0x0c, 0xd8, 0x0d, 0xe5, 0x3c, 0x9a, 0x52, 0x9d, 0x2b, 0x07, 0x23, 0xe3, + 0x1b, 0x33, 0x21, 0x7f, 0xb6, 0x1e, 0xff, 0xf7, 0x06, 0xb4, 0x87, 0x71, 0x44, 0x13, 0x39, 0x64, + 0xc9, 0x55, 0x34, 0x43, 0xdf, 0x40, 0xd7, 0x6e, 0x21, 0x09, 0x9f, 0x51, 0x29, 0xbc, 0xea, 0xde, + 0xc6, 0xbe, 0x83, 0x3b, 0xc6, 0x7a, 0x61, 0x8c, 0xe8, 0x8d, 0xd2, 0x52, 0xd1, 0x02, 0x79, 0x97, + 0x9a, 0x00, 0xdd, 0xbe, 0xb7, 0xaa, 0xa5, 0x02, 0x5c, 0xdc, 0xa5, 0x54, 0x69, 0x98, 0x7f, 0xa3, + 0x11, 0x6c, 0x09, 0x7b, 0xad, 0x20, 0xd5, 0xf7, 0xb2, 0x92, 0xec, 0x96, 0xe9, 0xe5, 0xbb, 0xe3, + 0xae, 0x28, 0xe7, 0xe2, 0x08, 0x76, 0x59, 0x26, 0x85, 0x24, 0xc9, 0x34, 0x4a, 0x66, 0x01, 0x4f, + 0x43, 0x11, 0xa4, 0x94, 0x07, 0xe1, 0x9c, 0x24, 0x09, 0x8d, 0xb5, 0x5c, 0x0d, 0xfc, 0xb8, 0x80, + 0xc1, 0x69, 0x28, 0xce, 0x29, 0x1f, 0x1a, 0x80, 0xea, 0x33, 0x7b, 0x05, 0x4b, 0x11, 0x5a, 0xa5, + 0x06, 0xee, 0x1a, 0xb3, 0xc5, 0x09, 0x95, 0x55, 0x22, 0xee, 0x92, 0x30, 0xc8, 0x6f, 0x3c, 0xe7, + 0x94, 0x4c, 0x85, 0xb7, 0xa9, 0xd1, 0x48, 0xfb, 0xec, 0x5d, 0x8d, 0x07, 0xbd, 0x84, 0x16, 0x4f, + 0x43, 0x93, 0x9a, 0x96, 0x4e, 0xcd, 0xa3, 0xf2, 0xdd, 0x70, 0x1a, 0xea, 0xbc, 0x6c, 0x72, 0xf3, + 0xa1, 0xf2, 0xa9, 0x34, 0xcf, 0x13, 0x02, 0x3a, 0x21, 0x2b, 0xf9, 0xbc, 0x6f, 0x25, 0x0c, 0xf1, + 0x7d, 0x5b, 0x1d, 0x43, 0x3e, 0xbc, 0x82, 0x50, 0x6b, 0xe8, 0xb9, 0x6b, 0x5b, 0xc3, 0x60, 0x8c, + 0xcc, 0xb8, 0x93, 0x16, 0x97, 0x68, 0x0c, 0xbd, 0x79, 0x24, 0x24, 0x9b, 0x71, 0xb2, 0xc8, 0xcf, + 0xd0, 0xd6, 0xbb, 0x3c, 0x2d, 0xef, 0x32, 0xce, 0x51, 0xf6, 0x20, 0x5b, 0xf3, 0xb2, 0x01, 0x3d, + 0x01, 0x27, 0x64, 0x9c, 0x06, 0x71, 0x24, 0xa4, 0xd7, 0xd9, 0xdb, 0xd8, 0x6f, 0xe0, 0x96, 0x32, + 0x9c, 0x46, 0x42, 0xa2, 0xa7, 0x00, 0xd6, 0xb9, 0x88, 0xa4, 0xd7, 0xd5, 0xf9, 0x73, 0x8c, 0x77, + 0x11, 0x49, 0xff, 0x28, 0xaf, 0xc5, 0x89, 0x24, 0x32, 0x13, 0xe8, 0x05, 0x34, 0xf4, 0x18, 0xb6, + 0xa3, 0xe2, 0xf1, 0xba, 0xf2, 0x52, 0x50, 0x81, 0x0d, 0xce, 0xdf, 0x85, 0xfa, 0x3b, 0xc2, 0xaf, + 0xd5, 0x88, 0xe2, 0x54, 0x50, 0x69, 0x3b, 0xc4, 0x2c, 0xfc, 0x0c, 0xc0, 0x70, 0x06, 0x7c, 0x26, + 0x50, 0x1f, 0x1a, 0x82, 0xca, 0x2c, 0x9f, 0x43, 0x3b, 0xeb, 0x36, 0x37, 0xd9, 0x19, 0x57, 0xb0, + 0x81, 0xa2, 0x7d, 0xa8, 0x2f, 0x08, 0xbf, 0xb6, 0xb3, 0x07, 0x95, 0x29, 0x2a, 0xf2, 0xb8, 0x82, + 0x35, 0xe2, 0xd8, 0x81, 0x4d, 0xc2, 0x67, 0xaa, 0x00, 0xfc, 0x3f, 0x6b, 0xd0, 0x9e, 0xe8, 0xe6, + 0xb1, 0xc9, 0x7e, 0x03, 0x6e, 0xde, 0x62, 0xaa, 0x40, 0xaa, 0xeb, 0x7a, 0xc7, 0x10, 0x4c, 0xef, + 0x88, 0xe5, 0xf7, 0xba, 0xde, 0xa9, 0xfd, 0x8b, 0xde, 0x41, 0x50, 0x4f, 0x19, 0x97, 0xb6, 0x47, + 0xf4, 0xf7, 0x7d, 0x95, 0xe7, 0x67, 0x5b, 0x53, 0xe5, 0xf6, 0x54, 0xb6, 0xca, 0xcb, 0x6a, 0xb6, + 0x56, 0xd4, 0x5c, 0x53, 0x97, 0xce, 0x3f, 0xae, 0xcb, 0x52, 0x35, 0x41, 0xb9, 0x9a, 0x94, 0x9e, + 0xe6, 0x40, 0x0f, 0xd0, 0xb3, 0x28, 0xc0, 0x7f, 0xd4, 0x33, 0xca, 0xe5, 0x7c, 0x50, 0x95, 0xde, + 0x43, 0xf3, 0x2a, 0x5d, 0x66, 0xbf, 0x56, 0xc8, 0xfe, 0x36, 0x34, 0xd4, 0xbd, 0xcc, 0x28, 0x6c, + 0x60, 0xb3, 0xf0, 0x3b, 0xe0, 0x0e, 0x19, 0xa7, 0x98, 0xfe, 0x96, 0x51, 0x21, 0xfd, 0xaf, 0xa1, + 0x6d, 0x96, 0x22, 0x65, 0x89, 0x79, 0x89, 0x0d, 0xa9, 0x5a, 0x24, 0x35, 0xa1, 0xfe, 0x81, 0x45, + 0x53, 0xff, 0xaf, 0x1a, 0xb4, 0x26, 0x21, 0x4d, 0x08, 0x8f, 0x98, 0x8a, 0x99, 0x90, 0x85, 0x29, + 0x36, 0x07, 0xeb, 0x6f, 0x74, 0x04, 0x9d, 0x7c, 0x00, 0x1a, 0x7d, 0x6a, 0x9f, 0xeb, 0x04, 0xdc, + 0x0e, 0x8b, 0x6f, 0xc5, 0x97, 0xe0, 0x26, 0xd9, 0xc2, 0x8e, 0xc5, 0xfc, 0xe8, 0x90, 0x64, 0x0b, + 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0x79, 0x84, 0xfa, 0xe7, 0xb4, 0xc1, 0x6d, 0x51, 0x6c, 0x15, + 0x1b, 0xc1, 0xd8, 0xf2, 0xf9, 0xac, 0x22, 0x18, 0x8e, 0x50, 0xcf, 0xd5, 0x2d, 0xe1, 0x8b, 0x2c, + 0x0d, 0x04, 0x0d, 0x59, 0x32, 0x15, 0x5e, 0x53, 0x63, 0x3a, 0xc6, 0x3a, 0x31, 0x46, 0xf5, 0x83, + 0x73, 0x49, 0x93, 0x70, 0xae, 0xb4, 0x5c, 0x22, 0x4d, 0x65, 0xf7, 0x96, 0x8e, 0x1c, 0xfc, 0x1a, + 0x3c, 0x91, 0x92, 0xdb, 0x24, 0x88, 0x59, 0x48, 0xe2, 0xe0, 0x96, 0xf1, 0x6b, 0x7d, 0x83, 0x2c, + 0xc9, 0xab, 0xfc, 0x91, 0xf6, 0x9f, 0x2a, 0xf7, 0xaf, 0xda, 0x3b, 0x54, 0x4e, 0x7f, 0x00, 0x4e, + 0x9e, 0x70, 0x81, 0x5e, 0x81, 0x23, 0xf2, 0x85, 0x7e, 0x43, 0xdd, 0xfe, 0x17, 0x2b, 0xf7, 0xb6, + 0x6e, 0x7c, 0x0f, 0x3c, 0x78, 0x91, 0xcf, 0x28, 0xdd, 0xee, 0x5b, 0xe0, 0x4e, 0x3e, 0x9e, 0x0d, + 0x83, 0xe1, 0xe9, 0xc9, 0xe8, 0xec, 0xa2, 0x57, 0x41, 0x3d, 0x68, 0x0f, 0x8a, 0x96, 0xea, 0xc1, + 0x49, 0xde, 0x04, 0x25, 0xc2, 0x64, 0x84, 0x3f, 0x8c, 0x70, 0x91, 0x60, 0x2d, 0x55, 0xe4, 0xc1, + 0xb6, 0xb1, 0xbc, 0x1d, 0x9d, 0x8d, 0xf0, 0xc9, 0xd2, 0x53, 0x3b, 0xf8, 0x0a, 0x36, 0xed, 0xbb, + 0x84, 0x1c, 0x68, 0xbc, 0x3f, 0x1b, 0xe0, 0x8f, 0xbd, 0x0a, 0xea, 0x80, 0x33, 0xb9, 0xc0, 0xa3, + 0xc1, 0xbb, 0x93, 0xb3, 0xb7, 0xbd, 0xea, 0x65, 0x53, 0xff, 0x12, 0x7f, 0xff, 0x77, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto new file mode 100644 index 000000000..9379ef49a --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto @@ -0,0 +1,186 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "payloads.proto"; +import "stats.proto"; + +package grpc.testing; + +enum ClientType { + SYNC_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNC_SERVER = 0; + ASYNC_SERVER = 1; + ASYNC_GENERIC_SERVER = 2; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +message PoissonParams { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + double offered_load = 1; +} + +message UniformParams { + double interarrival_lo = 1; + double interarrival_hi = 2; +} + +message DeterministicParams { + double offered_load = 1; +} + +message ParetoParams { + double interarrival_base = 1; + double alpha = 2; +} + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +message ClosedLoopParams { +} + +message LoadParams { + oneof load { + ClosedLoopParams closed_loop = 1; + PoissonParams poisson = 2; + UniformParams uniform = 3; + DeterministicParams determ = 4; + ParetoParams pareto = 5; + }; +} + +// presence of SecurityParams implies use of TLS +message SecurityParams { + bool use_test_ca = 1; + string server_host_override = 2; +} + +message ClientConfig { + // List of targets to connect to. At least one target needs to be specified. + repeated string server_targets = 1; + ClientType client_type = 2; + SecurityParams security_params = 3; + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + int32 outstanding_rpcs_per_channel = 4; + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + int32 client_channels = 5; + // Only for async client. Number of threads to use to start/manage RPCs. + int32 async_client_threads = 7; + RpcType rpc_type = 8; + // The requested load for the entire client (aggregated over all the threads). + LoadParams load_params = 10; + PayloadConfig payload_config = 11; + HistogramParams histogram_params = 12; + + // Specify the cores we should run the client on, if desired + repeated int32 core_list = 13; + int32 core_limit = 14; +} + +message ClientStatus { + ClientStats stats = 1; +} + +// Request current stats +message Mark { + // if true, the stats will be reset after taking their snapshot. + bool reset = 1; +} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ServerConfig { + ServerType server_type = 1; + SecurityParams security_params = 2; + // Port on which to listen. Zero means pick unused port. + int32 port = 4; + // Only for async server. Number of threads used to serve the requests. + int32 async_server_threads = 7; + // Specify the number of cores to limit server to, if desired + int32 core_limit = 8; + // payload config, used in generic server + PayloadConfig payload_config = 9; + + // Specify the cores we should run the server on, if desired + repeated int32 core_list = 10; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + // the port bound by the server + int32 port = 2; + // Number of cores available to the server + int32 cores = 3; +} + +message CoreRequest { +} + +message CoreResponse { + // Number of cores available on the server + int32 cores = 1; +} + +message Void { +} + +// A single performance scenario: input to qps_json_driver +message Scenario { + // Human readable name for this scenario + string name = 1; + // Client configuration + ClientConfig client_config = 2; + // Number of clients to start for the test + int32 num_clients = 3; + // Server configuration + ServerConfig server_config = 4; + // Number of servers to start for the test + int32 num_servers = 5; + // Warmup period, in seconds + int32 warmup_seconds = 6; + // Benchmark time, in seconds + int32 benchmark_seconds = 7; + // Number of workers to spawn locally (usually zero) + int32 spawn_local_worker_count = 8; +} + +// A set of scenarios to be run with qps_json_driver +message Scenarios { + repeated Scenario scenarios = 1; +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go new file mode 100644 index 000000000..b34c5d59a --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go @@ -0,0 +1,479 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: messages.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Compression algorithms +type CompressionType int32 + +const ( + // No compression + CompressionType_NONE CompressionType = 0 + CompressionType_GZIP CompressionType = 1 + CompressionType_DEFLATE CompressionType = 2 +) + +var CompressionType_name = map[int32]string{ + 0: "NONE", + 1: "GZIP", + 2: "DEFLATE", +} +var CompressionType_value = map[string]int32{ + "NONE": 0, + "GZIP": 1, + "DEFLATE": 2, +} + +func (x CompressionType) String() string { + return proto.EnumName(CompressionType_name, int32(x)) +} +func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Payload) GetType() PayloadType { + if m != nil { + return m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +type EchoStatus struct { + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *EchoStatus) Reset() { *m = EchoStatus{} } +func (m *EchoStatus) String() string { return proto.CompactTextString(m) } +func (*EchoStatus) ProtoMessage() {} +func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *EchoStatus) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *EchoStatus) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil { + return m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil { + return m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil { + return m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil { + return m.FillOauthScope + } + return false +} + +func (m *SimpleRequest) GetResponseCompression() CompressionType { + if m != nil { + return m.ResponseCompression + } + return CompressionType_NONE +} + +func (m *SimpleRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil { + return m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil { + return m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *ResponseParameters) GetSize() int32 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil { + return m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil { + return m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *StreamingOutputCallRequest) GetResponseCompression() CompressionType { + if m != nil { + return m.ResponseCompression + } + return CompressionType_NONE +} + +func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +type ReconnectParams struct { + MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"` +} + +func (m *ReconnectParams) Reset() { *m = ReconnectParams{} } +func (m *ReconnectParams) String() string { return proto.CompactTextString(m) } +func (*ReconnectParams) ProtoMessage() {} +func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 { + if m != nil { + return m.MaxReconnectBackoffMs + } + return 0 +} + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +type ReconnectInfo struct { + Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"` + BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` +} + +func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } +func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) } +func (*ReconnectInfo) ProtoMessage() {} +func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *ReconnectInfo) GetPassed() bool { + if m != nil { + return m.Passed + } + return false +} + +func (m *ReconnectInfo) GetBackoffMs() []int32 { + if m != nil { + return m.BackoffMs + } + return nil +} + +func init() { + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams") + proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value) +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89, + 0x20, 0x05, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9a, 0x84, 0x75, 0x7b, 0xe1, 0x62, + 0x6d, 0x9c, 0x8d, 0x6b, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x9a, 0x1e, 0xb8, 0xf3, 0x83, 0xb9, 0xa3, + 0x5d, 0x7f, 0xc4, 0x69, 0x7b, 0x68, 0xe1, 0xc2, 0x6d, 0xf7, 0xed, 0x9b, 0x97, 0x79, 0x33, 0xcf, + 0x0a, 0x34, 0x3d, 0xca, 0x39, 0x71, 0x28, 0xef, 0x06, 0x21, 0x13, 0x0c, 0x35, 0x9c, 0x30, 0xb0, + 0xbb, 0x82, 0x72, 0xe1, 0xfa, 0x8e, 0x31, 0x82, 0xea, 0x94, 0xac, 0x96, 0x8c, 0xcc, 0xd1, 0x2b, + 0x28, 0x89, 0x55, 0x40, 0x75, 0xad, 0xad, 0x75, 0x9a, 0xbd, 0xbd, 0x6e, 0x9e, 0xd7, 0x4d, 0x48, + 0xe7, 0xab, 0x80, 0x62, 0x45, 0x43, 0x08, 0x4a, 0x33, 0x36, 0x5f, 0xe9, 0x85, 0xb6, 0xd6, 0x69, + 0x60, 0x75, 0x36, 0xde, 0x03, 0x0c, 0xec, 0x4b, 0x66, 0x0a, 0x22, 0x22, 0x2e, 0x19, 0x36, 0x9b, + 0xc7, 0x82, 0x65, 0xac, 0xce, 0x48, 0x87, 0x6a, 0xd2, 0x8f, 0x2a, 0xdc, 0xc2, 0xe9, 0xd5, 0xf8, + 0x55, 0x84, 0x6d, 0xd3, 0xf5, 0x82, 0x25, 0xc5, 0xf4, 0x7b, 0x44, 0xb9, 0x40, 0x1f, 0x60, 0x3b, + 0xa4, 0x3c, 0x60, 0x3e, 0xa7, 0xd6, 0xfd, 0x3a, 0x6b, 0xa4, 0x7c, 0x79, 0x43, 0x2f, 0x73, 0xf5, + 0xdc, 0xbd, 0x8e, 0x7f, 0xb1, 0xbc, 0x26, 0x99, 0xee, 0x35, 0x45, 0xaf, 0xa1, 0x1a, 0xc4, 0x0a, + 0x7a, 0xb1, 0xad, 0x75, 0xea, 0xbd, 0xdd, 0x3b, 0xe5, 0x71, 0xca, 0x92, 0xaa, 0x0b, 0x77, 0xb9, + 0xb4, 0x22, 0x4e, 0x43, 0x9f, 0x78, 0x54, 0x2f, 0xb5, 0xb5, 0x4e, 0x0d, 0x37, 0x24, 0x78, 0x91, + 0x60, 0xa8, 0x03, 0x2d, 0x45, 0x62, 0x24, 0x12, 0x97, 0x16, 0xb7, 0x59, 0x40, 0xf5, 0xb2, 0xe2, + 0x35, 0x25, 0x3e, 0x91, 0xb0, 0x29, 0x51, 0x34, 0x85, 0x27, 0x59, 0x93, 0x36, 0xf3, 0x82, 0x90, + 0x72, 0xee, 0x32, 0x5f, 0xaf, 0x28, 0xaf, 0x07, 0x9b, 0xcd, 0x1c, 0xaf, 0x09, 0xca, 0xef, 0xe3, + 0xb4, 0x34, 0xf7, 0x80, 0xfa, 0xb0, 0xb3, 0xb6, 0xad, 0x36, 0xa1, 0x57, 0x95, 0x33, 0x7d, 0x53, + 0x6c, 0xbd, 0x29, 0xdc, 0xcc, 0x46, 0xa2, 0xee, 0xc6, 0x4f, 0x68, 0xa6, 0xab, 0x88, 0xf1, 0xfc, + 0x98, 0xb4, 0x7b, 0x8d, 0x69, 0x1f, 0x6a, 0xd9, 0x84, 0xe2, 0x4d, 0x67, 0x77, 0xf4, 0x02, 0xea, + 0xf9, 0xc1, 0x14, 0xd5, 0x33, 0xb0, 0x6c, 0x28, 0xc6, 0x08, 0xf6, 0x4c, 0x11, 0x52, 0xe2, 0xb9, + 0xbe, 0x33, 0xf4, 0x83, 0x48, 0x1c, 0x93, 0xe5, 0x32, 0x8d, 0xc5, 0x43, 0x5b, 0x31, 0xce, 0x61, + 0xff, 0x2e, 0xb5, 0xc4, 0xd9, 0x5b, 0x78, 0x46, 0x1c, 0x27, 0xa4, 0x0e, 0x11, 0x74, 0x6e, 0x25, + 0x35, 0x71, 0x5e, 0xe2, 0xe0, 0xee, 0xae, 0x9f, 0x13, 0x69, 0x19, 0x1c, 0x63, 0x08, 0x28, 0xd5, + 0x98, 0x92, 0x90, 0x78, 0x54, 0xd0, 0x50, 0x65, 0x3e, 0x57, 0xaa, 0xce, 0xd2, 0xae, 0xeb, 0x0b, + 0x1a, 0xfe, 0x20, 0x32, 0x35, 0x49, 0x0a, 0x21, 0x85, 0x2e, 0xb8, 0xf1, 0xbb, 0x90, 0xeb, 0x70, + 0x12, 0x89, 0x1b, 0x86, 0xff, 0xf5, 0x3b, 0xf8, 0x02, 0x59, 0x4e, 0xac, 0x20, 0x6b, 0x55, 0x2f, + 0xb4, 0x8b, 0x9d, 0x7a, 0xaf, 0xbd, 0xa9, 0x72, 0xdb, 0x12, 0x46, 0xe1, 0x6d, 0x9b, 0x0f, 0xfe, + 0x6a, 0xfe, 0xcb, 0x98, 0x8f, 0xe1, 0xf9, 0x9d, 0x63, 0xff, 0xcb, 0xcc, 0x1b, 0x9f, 0x61, 0x07, + 0x53, 0x9b, 0xf9, 0x3e, 0xb5, 0x85, 0x1a, 0x16, 0x47, 0xef, 0x40, 0xf7, 0xc8, 0x95, 0x15, 0xa6, + 0xb0, 0x35, 0x23, 0xf6, 0x37, 0xb6, 0x58, 0x58, 0x1e, 0x4f, 0xe3, 0xe5, 0x91, 0xab, 0xac, 0xea, + 0x28, 0x7e, 0x3d, 0xe3, 0xc6, 0x29, 0x6c, 0x67, 0xe8, 0xd0, 0x5f, 0x30, 0xf4, 0x14, 0x2a, 0x01, + 0xe1, 0x9c, 0xc6, 0xcd, 0xd4, 0x70, 0x72, 0x43, 0x07, 0x00, 0x39, 0x4d, 0xb9, 0xd4, 0x32, 0xde, + 0x9a, 0xa5, 0x3a, 0x87, 0x1f, 0xa1, 0x9e, 0x4b, 0x06, 0x6a, 0x41, 0xe3, 0x78, 0x72, 0x36, 0xc5, + 0x03, 0xd3, 0xec, 0x1f, 0x8d, 0x06, 0xad, 0x47, 0x08, 0x41, 0xf3, 0x62, 0xbc, 0x81, 0x69, 0x08, + 0xa0, 0x82, 0xfb, 0xe3, 0x93, 0xc9, 0x59, 0xab, 0x70, 0xd8, 0x83, 0x9d, 0x1b, 0xfb, 0x40, 0x35, + 0x28, 0x8d, 0x27, 0x63, 0x59, 0x5c, 0x83, 0xd2, 0xa7, 0xaf, 0xc3, 0x69, 0x4b, 0x43, 0x75, 0xa8, + 0x9e, 0x0c, 0x4e, 0x47, 0xfd, 0xf3, 0x41, 0xab, 0x30, 0xab, 0xa8, 0xbf, 0x9a, 0x37, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, 0x1e, 0x7c, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto new file mode 100644 index 000000000..bd83f095f --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto @@ -0,0 +1,157 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Message definitions to be used by integration test service definitions. + +syntax = "proto3"; + +package grpc.testing; + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// Compression algorithms +enum CompressionType { + // No compression + NONE = 0; + GZIP = 1; + DEFLATE = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +message EchoStatus { + int32 code = 1; + string message = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Whether SimpleResponse should include username. + bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + bool fill_oauth_scope = 5; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + Payload payload = 1; + // The user the request came from, for verifying authentication was + // successful when the client expected it. + string username = 2; + // OAuth scope. + string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + Payload payload = 1; +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +message ReconnectParams { + int32 max_reconnect_backoff_ms = 1; +} + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +message ReconnectInfo { + bool passed = 1; + repeated int32 backoff_ms = 2; +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go new file mode 100644 index 000000000..d70d1f745 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go @@ -0,0 +1,250 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: payloads.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ByteBufferParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} } +func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) } +func (*ByteBufferParams) ProtoMessage() {} +func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *ByteBufferParams) GetReqSize() int32 { + if m != nil { + return m.ReqSize + } + return 0 +} + +func (m *ByteBufferParams) GetRespSize() int32 { + if m != nil { + return m.RespSize + } + return 0 +} + +type SimpleProtoParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} } +func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) } +func (*SimpleProtoParams) ProtoMessage() {} +func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *SimpleProtoParams) GetReqSize() int32 { + if m != nil { + return m.ReqSize + } + return 0 +} + +func (m *SimpleProtoParams) GetRespSize() int32 { + if m != nil { + return m.RespSize + } + return 0 +} + +type ComplexProtoParams struct { +} + +func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} } +func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) } +func (*ComplexProtoParams) ProtoMessage() {} +func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +type PayloadConfig struct { + // Types that are valid to be assigned to Payload: + // *PayloadConfig_BytebufParams + // *PayloadConfig_SimpleParams + // *PayloadConfig_ComplexParams + Payload isPayloadConfig_Payload `protobuf_oneof:"payload"` +} + +func (m *PayloadConfig) Reset() { *m = PayloadConfig{} } +func (m *PayloadConfig) String() string { return proto.CompactTextString(m) } +func (*PayloadConfig) ProtoMessage() {} +func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +type isPayloadConfig_Payload interface { + isPayloadConfig_Payload() +} + +type PayloadConfig_BytebufParams struct { + BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"` +} +type PayloadConfig_SimpleParams struct { + SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"` +} +type PayloadConfig_ComplexParams struct { + ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"` +} + +func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {} + +func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams { + if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok { + return x.BytebufParams + } + return nil +} + +func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok { + return x.SimpleParams + } + return nil +} + +func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok { + return x.ComplexParams + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{ + (*PayloadConfig_BytebufParams)(nil), + (*PayloadConfig_SimpleParams)(nil), + (*PayloadConfig_ComplexParams)(nil), + } +} + +func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BytebufParams); err != nil { + return err + } + case *PayloadConfig_SimpleParams: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SimpleParams); err != nil { + return err + } + case *PayloadConfig_ComplexParams: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ComplexParams); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x) + } + return nil +} + +func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PayloadConfig) + switch tag { + case 1: // payload.bytebuf_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ByteBufferParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_BytebufParams{msg} + return true, err + case 2: // payload.simple_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SimpleProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_SimpleParams{msg} + return true, err + case 3: // payload.complex_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ComplexProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_ComplexParams{msg} + return true, err + default: + return false, nil + } +} + +func _PayloadConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + s := proto.Size(x.BytebufParams) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_SimpleParams: + s := proto.Size(x.SimpleParams) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_ComplexParams: + s := proto.Size(x.ComplexParams) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams") + proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams") + proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams") + proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig") +} + +func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, + 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, + 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, + 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, + 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, + 0x2f, 0x4a, 0x2d, 0x0c, 0xce, 0xac, 0x4a, 0x15, 0x92, 0xe6, 0xe2, 0x2c, 0x4a, 0x2d, 0x2e, 0x80, + 0xc8, 0x31, 0x81, 0xe5, 0x38, 0x40, 0x02, 0x20, 0x49, 0x25, 0x6f, 0x2e, 0xc1, 0xe0, 0xcc, 0xdc, + 0x82, 0x9c, 0xd4, 0x00, 0x90, 0x45, 0x14, 0x1a, 0x26, 0xc2, 0x25, 0xe4, 0x9c, 0x0f, 0x32, 0xac, + 0x02, 0xc9, 0x34, 0xa5, 0x6f, 0x8c, 0x5c, 0xbc, 0x01, 0x10, 0xff, 0x38, 0xe7, 0xe7, 0xa5, 0x65, + 0xa6, 0x0b, 0xb9, 0x73, 0xf1, 0x25, 0x55, 0x96, 0xa4, 0x26, 0x95, 0xa6, 0xc5, 0x17, 0x80, 0xd5, + 0x80, 0x6d, 0xe1, 0x36, 0x92, 0xd3, 0x43, 0xf6, 0xa7, 0x1e, 0xba, 0x27, 0x3d, 0x18, 0x82, 0x78, + 0xa1, 0xfa, 0xa0, 0x0e, 0x75, 0xe3, 0xe2, 0x2d, 0x06, 0xbb, 0x1e, 0x66, 0x0e, 0x13, 0xd8, 0x1c, + 0x79, 0x54, 0x73, 0x30, 0x3c, 0xe8, 0xc1, 0x10, 0xc4, 0x03, 0xd1, 0x07, 0x35, 0xc7, 0x93, 0x8b, + 0x2f, 0x19, 0xe2, 0x70, 0x98, 0x41, 0xcc, 0x60, 0x83, 0x14, 0x50, 0x0d, 0xc2, 0xf4, 0x1c, 0xc8, + 0x49, 0x50, 0x9d, 0x10, 0x01, 0x27, 0x4e, 0x2e, 0x76, 0x68, 0xe4, 0x25, 0xb1, 0x81, 0x23, 0xcf, + 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto new file mode 100644 index 000000000..5d4871f5f --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto @@ -0,0 +1,40 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +message ByteBufferParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message SimpleProtoParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message ComplexProtoParams { + // TODO (vpai): Fill this in once the details of complex, representative + // protos are decided +} + +message PayloadConfig { + oneof payload { + ByteBufferParams bytebuf_params = 1; + SimpleProtoParams simple_params = 2; + ComplexProtoParams complex_params = 3; + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go new file mode 100644 index 000000000..50e350595 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go @@ -0,0 +1,442 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: services.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for BenchmarkService service + +type BenchmarkServiceClient interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) +} + +type benchmarkServiceClient struct { + cc *grpc.ClientConn +} + +func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient { + return &benchmarkServiceClient{cc} +} + +func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...) + if err != nil { + return nil, err + } + x := &benchmarkServiceStreamingCallClient{stream} + return x, nil +} + +type BenchmarkService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type benchmarkServiceStreamingCallClient struct { + grpc.ClientStream +} + +func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for BenchmarkService service + +type BenchmarkServiceServer interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(BenchmarkService_StreamingCallServer) error +} + +func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) { + s.RegisterService(&_BenchmarkService_serviceDesc, srv) +} + +func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) +} + +type BenchmarkService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type benchmarkServiceStreamingCallServer struct { + grpc.ServerStream +} + +func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _BenchmarkService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.BenchmarkService", + HandlerType: (*BenchmarkServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _BenchmarkService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingCall", + Handler: _BenchmarkService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "services.proto", +} + +// Client API for WorkerService service + +type WorkerServiceClient interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) + // Just return the core count - unary call + CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) + // Quit this worker + QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) +} + +type workerServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient { + return &workerServiceClient{cc} +} + +func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunServerClient{stream} + return x, nil +} + +type WorkerService_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerServiceRunServerClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunClientClient{stream} + return x, nil +} + +type WorkerService_RunClientClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerServiceRunClientClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { + out := new(CoreResponse) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { + out := new(Void) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for WorkerService service + +type WorkerServiceServer interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(WorkerService_RunServerServer) error + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(WorkerService_RunClientServer) error + // Just return the core count - unary call + CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) + // Quit this worker + QuitWorker(context.Context, *Void) (*Void, error) +} + +func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) { + s.RegisterService(&_WorkerService_serviceDesc, srv) +} + +func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) +} + +type WorkerService_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerServiceRunServerServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) +} + +type WorkerService_RunClientServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerServiceRunClientServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).CoreCount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/CoreCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Void) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).QuitWorker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/QuitWorker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.WorkerService", + HandlerType: (*WorkerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CoreCount", + Handler: _WorkerService_CoreCount_Handler, + }, + { + MethodName: "QuitWorker", + Handler: _WorkerService_QuitWorker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunServer", + Handler: _WorkerService_RunServer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunClient", + Handler: _WorkerService_RunClient_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "services.proto", +} + +func init() { proto.RegisterFile("services.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 255 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17, + 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0xcd, 0xd4, 0x99, 0x89, 0xe0, + 0x93, 0xf8, 0x0e, 0x3e, 0xa5, 0xec, 0x66, 0x57, 0xd6, 0x92, 0x9b, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, + 0x7f, 0x46, 0xcd, 0x18, 0xe8, 0xc3, 0x75, 0xc0, 0xf5, 0x48, 0x28, 0xa8, 0x8f, 0x7a, 0x1a, 0xbb, + 0x5a, 0x80, 0xc5, 0x85, 0xde, 0xcc, 0x06, 0x60, 0xb6, 0xfd, 0x8e, 0x9a, 0xaa, 0xc3, 0x20, 0x84, + 0x3e, 0x8d, 0xf3, 0xef, 0x42, 0x1d, 0x2f, 0x20, 0x74, 0x6f, 0x83, 0xa5, 0x55, 0x9b, 0x44, 0xfa, + 0x4e, 0x95, 0x8f, 0xc1, 0xd2, 0x67, 0x63, 0xbd, 0xd7, 0x67, 0xf5, 0xbe, 0xaf, 0x6e, 0xdd, 0x30, + 0x7a, 0x58, 0xc2, 0x7b, 0x04, 0x16, 0x73, 0x9e, 0x87, 0x3c, 0x62, 0x60, 0xd0, 0xf7, 0xaa, 0x6a, + 0x85, 0xc0, 0x0e, 0x2e, 0xf4, 0xff, 0x74, 0x5d, 0x14, 0x97, 0xc5, 0xfc, 0xeb, 0x40, 0x55, 0xcf, + 0x48, 0x2b, 0xa0, 0xdd, 0x4b, 0x6f, 0x55, 0xb9, 0x8c, 0x61, 0x3d, 0x01, 0xe9, 0x93, 0x89, 0x60, + 0x93, 0xde, 0x50, 0xcf, 0xc6, 0xe4, 0x48, 0x2b, 0x56, 0x22, 0xaf, 0xc5, 0x5b, 0x4d, 0xe3, 0x1d, + 0x04, 0x99, 0x6a, 0x52, 0x9a, 0xd3, 0x24, 0xb2, 0xa7, 0x59, 0xa8, 0xb2, 0x41, 0x82, 0x06, 0x63, + 0x10, 0x7d, 0x3a, 0x59, 0x46, 0xfa, 0x6d, 0x6a, 0x72, 0x68, 0xfb, 0x67, 0xd7, 0x4a, 0x3d, 0x44, + 0x27, 0xa9, 0xa6, 0xd6, 0x7f, 0x37, 0x9f, 0xd0, 0xbd, 0x9a, 0x4c, 0xf6, 0x72, 0xb8, 0xb9, 0xe6, + 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto new file mode 100644 index 000000000..f4e790782 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto @@ -0,0 +1,56 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto3"; + +import "messages.proto"; +import "control.proto"; + +package grpc.testing; + +service BenchmarkService { + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service WorkerService { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); + + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunClient(stream ClientArgs) returns (stream ClientStatus); + + // Just return the core count - unary call + rpc CoreCount(CoreRequest) returns (CoreResponse); + + // Quit this worker + rpc QuitWorker(Void) returns (Void); +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go new file mode 100644 index 000000000..d69cb7410 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go @@ -0,0 +1,208 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: stats.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ServerStats struct { + // wall clock time change in seconds since last reset + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + // change in user time (in seconds) used by the server since last reset + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + // change in server time (in seconds) used by the server process and all + // threads since last reset + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} +func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *ServerStats) GetTimeElapsed() float64 { + if m != nil { + return m.TimeElapsed + } + return 0 +} + +func (m *ServerStats) GetTimeUser() float64 { + if m != nil { + return m.TimeUser + } + return 0 +} + +func (m *ServerStats) GetTimeSystem() float64 { + if m != nil { + return m.TimeSystem + } + return 0 +} + +// Histogram params based on grpc/support/histogram.c +type HistogramParams struct { + Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"` + MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"` +} + +func (m *HistogramParams) Reset() { *m = HistogramParams{} } +func (m *HistogramParams) String() string { return proto.CompactTextString(m) } +func (*HistogramParams) ProtoMessage() {} +func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +func (m *HistogramParams) GetResolution() float64 { + if m != nil { + return m.Resolution + } + return 0 +} + +func (m *HistogramParams) GetMaxPossible() float64 { + if m != nil { + return m.MaxPossible + } + return 0 +} + +// Histogram data based on grpc/support/histogram.c +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` +} + +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} +func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +func (m *HistogramData) GetBucket() []uint32 { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *HistogramData) GetMinSeen() float64 { + if m != nil { + return m.MinSeen + } + return 0 +} + +func (m *HistogramData) GetMaxSeen() float64 { + if m != nil { + return m.MaxSeen + } + return 0 +} + +func (m *HistogramData) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *HistogramData) GetSumOfSquares() float64 { + if m != nil { + return m.SumOfSquares + } + return 0 +} + +func (m *HistogramData) GetCount() float64 { + if m != nil { + return m.Count + } + return 0 +} + +type ClientStats struct { + // Latency histogram. Data points are in nanoseconds. + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + // See ServerStats for details. + TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +func (m *ClientStats) GetTimeElapsed() float64 { + if m != nil { + return m.TimeElapsed + } + return 0 +} + +func (m *ClientStats) GetTimeUser() float64 { + if m != nil { + return m.TimeUser + } + return 0 +} + +func (m *ClientStats) GetTimeSystem() float64 { + if m != nil { + return m.TimeSystem + } + return 0 +} + +func init() { + proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") + proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") + proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") + proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") +} + +func init() { proto.RegisterFile("stats.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 341 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c, + 0x74, 0x95, 0x85, 0xae, 0x5c, 0xab, 0xe0, 0xce, 0xd2, 0xe8, 0x3a, 0x4c, 0xe3, 0x69, 0x19, 0xcc, + 0xcc, 0xc4, 0x39, 0x33, 0x12, 0x1f, 0x49, 0x7c, 0x49, 0xc9, 0x24, 0x68, 0x55, 0xd0, 0x5d, 0xe6, + 0xfb, 0x7e, 0xe6, 0xe4, 0xe4, 0x0f, 0x44, 0x64, 0xb9, 0xa5, 0xb4, 0x32, 0xda, 0x6a, 0x36, 0xd9, + 0x9a, 0xaa, 0x48, 0x2d, 0x92, 0x15, 0x6a, 0x9b, 0x28, 0x88, 0x32, 0x34, 0x4f, 0x68, 0xb2, 0x26, + 0xc2, 0x8e, 0x61, 0x62, 0x85, 0xc4, 0x1c, 0x4b, 0x5e, 0x11, 0xde, 0xc7, 0xc1, 0x3c, 0x58, 0x04, + 0xab, 0xa8, 0x61, 0x57, 0x2d, 0x62, 0x33, 0x18, 0xfb, 0x88, 0x23, 0x34, 0x71, 0xcf, 0xfb, 0x51, + 0x03, 0xee, 0x08, 0x0d, 0x3b, 0x02, 0x9f, 0xcd, 0xe9, 0x99, 0x2c, 0xca, 0x38, 0xf4, 0x1a, 0x1a, + 0x94, 0x79, 0x92, 0xdc, 0xc2, 0xbf, 0x6b, 0x41, 0x56, 0x6f, 0x0d, 0x97, 0x4b, 0x6e, 0xb8, 0x24, + 0x76, 0x08, 0x60, 0x90, 0x74, 0xe9, 0xac, 0xd0, 0xaa, 0x9b, 0xb8, 0x43, 0x9a, 0x77, 0x92, 0xbc, + 0xce, 0x2b, 0x4d, 0x24, 0xd6, 0x25, 0x76, 0x33, 0x23, 0xc9, 0xeb, 0x65, 0x87, 0x92, 0xd7, 0x00, + 0xa6, 0xef, 0xd7, 0x5e, 0x72, 0xcb, 0xd9, 0x3e, 0x0c, 0xd7, 0xae, 0x78, 0x40, 0x1b, 0x07, 0xf3, + 0x70, 0x31, 0x5d, 0x75, 0x27, 0x76, 0x00, 0x23, 0x29, 0x54, 0x4e, 0x88, 0xaa, 0xbb, 0xe8, 0x8f, + 0x14, 0x2a, 0x43, 0x54, 0x5e, 0xf1, 0xba, 0x55, 0x61, 0xa7, 0x78, 0xed, 0xd5, 0x7f, 0x08, 0xc9, + 0xc9, 0xb8, 0xef, 0x69, 0xf3, 0xc8, 0x4e, 0xe0, 0x2f, 0x39, 0x99, 0xeb, 0x4d, 0x4e, 0x8f, 0x8e, + 0x1b, 0xa4, 0x78, 0xe0, 0xe5, 0x84, 0x9c, 0xbc, 0xd9, 0x64, 0x2d, 0x63, 0x7b, 0x30, 0x28, 0xb4, + 0x53, 0x36, 0x1e, 0x7a, 0xd9, 0x1e, 0x92, 0x97, 0x00, 0xa2, 0x8b, 0x52, 0xa0, 0xb2, 0xed, 0x47, + 0x3f, 0x87, 0x71, 0xc9, 0x2d, 0xaa, 0x42, 0x20, 0xf9, 0xfd, 0xa3, 0xd3, 0x59, 0xba, 0xdb, 0x52, + 0xfa, 0x69, 0xb7, 0xd5, 0x47, 0xfa, 0x5b, 0x5f, 0xbd, 0x5f, 0xfa, 0x0a, 0x7f, 0xee, 0xab, 0xff, + 0xb5, 0xaf, 0xf5, 0xd0, 0xff, 0x34, 0x67, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, 0x34, + 0x90, 0x43, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto new file mode 100644 index 000000000..baf3610f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto @@ -0,0 +1,55 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +message ServerStats { + // wall clock time change in seconds since last reset + double time_elapsed = 1; + + // change in user time (in seconds) used by the server since last reset + double time_user = 2; + + // change in server time (in seconds) used by the server process and all + // threads since last reset + double time_system = 3; +} + +// Histogram params based on grpc/support/histogram.c +message HistogramParams { + double resolution = 1; // first bucket is [0, 1 + resolution) + double max_possible = 2; // use enough buckets to allow this value +} + +// Histogram data based on grpc/support/histogram.c +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +message ClientStats { + // Latency histogram. Data points are in nanoseconds. + HistogramData latencies = 1; + + // See ServerStats for details. + double time_elapsed = 2; + double time_user = 3; + double time_system = 4; +} diff --git a/vendor/google.golang.org/grpc/benchmark/latency/latency.go b/vendor/google.golang.org/grpc/benchmark/latency/latency.go new file mode 100644 index 000000000..5839a5c44 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/latency/latency.go @@ -0,0 +1,316 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package latency provides wrappers for net.Conn, net.Listener, and +// net.Dialers, designed to interoperate to inject real-world latency into +// network connections. +package latency + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "time" + + "golang.org/x/net/context" +) + +// Dialer is a function matching the signature of net.Dial. +type Dialer func(network, address string) (net.Conn, error) + +// TimeoutDialer is a function matching the signature of net.DialTimeout. +type TimeoutDialer func(network, address string, timeout time.Duration) (net.Conn, error) + +// ContextDialer is a function matching the signature of +// net.Dialer.DialContext. +type ContextDialer func(ctx context.Context, network, address string) (net.Conn, error) + +// Network represents a network with the given bandwidth, latency, and MTU +// (Maximum Transmission Unit) configuration, and can produce wrappers of +// net.Listeners, net.Conn, and various forms of dialing functions. The +// Listeners and Dialers/Conns on both sides of connections must come from this +// package, but need not be created from the same Network. Latency is computed +// when sending (in Write), and is injected when receiving (in Read). This +// allows senders' Write calls to be non-blocking, as in real-world +// applications. +// +// Note: Latency is injected by the sender specifying the absolute time data +// should be available, and the reader delaying until that time arrives to +// provide the data. This package attempts to counter-act the effects of clock +// drift and existing network latency by measuring the delay between the +// sender's transmission time and the receiver's reception time during startup. +// No attempt is made to measure the existing bandwidth of the connection. +type Network struct { + Kbps int // Kilobits per second; if non-positive, infinite + Latency time.Duration // One-way latency (sending); if non-positive, no delay + MTU int // Bytes per packet; if non-positive, infinite +} + +var ( + //Local simulates local network. + Local = Network{0, 0, 0} + //LAN simulates local area network network. + LAN = Network{100 * 1024, 2 * time.Millisecond, 1500} + //WAN simulates wide area network. + WAN = Network{20 * 1024, 30 * time.Millisecond, 1500} + //Longhaul simulates bad network. + Longhaul = Network{1000 * 1024, 200 * time.Millisecond, 9000} +) + +// Conn returns a net.Conn that wraps c and injects n's latency into that +// connection. This function also imposes latency for connection creation. +// If n's Latency is lower than the measured latency in c, an error is +// returned. +func (n *Network) Conn(c net.Conn) (net.Conn, error) { + start := now() + nc := &conn{Conn: c, network: n, readBuf: new(bytes.Buffer)} + if err := nc.sync(); err != nil { + return nil, err + } + sleep(start.Add(nc.delay).Sub(now())) + return nc, nil +} + +type conn struct { + net.Conn + network *Network + + readBuf *bytes.Buffer // one packet worth of data received + lastSendEnd time.Time // time the previous Write should be fully on the wire + delay time.Duration // desired latency - measured latency +} + +// header is sent before all data transmitted by the application. +type header struct { + ReadTime int64 // Time the reader is allowed to read this packet (UnixNano) + Sz int32 // Size of the data in the packet +} + +func (c *conn) Write(p []byte) (n int, err error) { + tNow := now() + if c.lastSendEnd.Before(tNow) { + c.lastSendEnd = tNow + } + for len(p) > 0 { + pkt := p + if c.network.MTU > 0 && len(pkt) > c.network.MTU { + pkt = pkt[:c.network.MTU] + p = p[c.network.MTU:] + } else { + p = nil + } + if c.network.Kbps > 0 { + if congestion := c.lastSendEnd.Sub(tNow) - c.delay; congestion > 0 { + // The network is full; sleep until this packet can be sent. + sleep(congestion) + tNow = tNow.Add(congestion) + } + } + c.lastSendEnd = c.lastSendEnd.Add(c.network.pktTime(len(pkt))) + hdr := header{ReadTime: c.lastSendEnd.Add(c.delay).UnixNano(), Sz: int32(len(pkt))} + if err := binary.Write(c.Conn, binary.BigEndian, hdr); err != nil { + return n, err + } + x, err := c.Conn.Write(pkt) + n += x + if err != nil { + return n, err + } + } + return n, nil +} + +func (c *conn) Read(p []byte) (n int, err error) { + if c.readBuf.Len() == 0 { + var hdr header + if err := binary.Read(c.Conn, binary.BigEndian, &hdr); err != nil { + return 0, err + } + defer func() { sleep(time.Unix(0, hdr.ReadTime).Sub(now())) }() + + if _, err := io.CopyN(c.readBuf, c.Conn, int64(hdr.Sz)); err != nil { + return 0, err + } + } + // Read from readBuf. + return c.readBuf.Read(p) +} + +// sync does a handshake and then measures the latency on the network in +// coordination with the other side. +func (c *conn) sync() error { + const ( + pingMsg = "syncPing" + warmup = 10 // minimum number of iterations to measure latency + giveUp = 50 // maximum number of iterations to measure latency + accuracy = time.Millisecond // req'd accuracy to stop early + goodRun = 3 // stop early if latency within accuracy this many times + ) + + type syncMsg struct { + SendT int64 // Time sent. If zero, stop. + RecvT int64 // Time received. If zero, fill in and respond. + } + + // A trivial handshake + if err := binary.Write(c.Conn, binary.BigEndian, []byte(pingMsg)); err != nil { + return err + } + var ping [8]byte + if err := binary.Read(c.Conn, binary.BigEndian, &ping); err != nil { + return err + } else if string(ping[:]) != pingMsg { + return fmt.Errorf("malformed handshake message: %v (want %q)", ping, pingMsg) + } + + // Both sides are alive and syncing. Calculate network delay / clock skew. + att := 0 + good := 0 + var latency time.Duration + localDone, remoteDone := false, false + send := true + for !localDone || !remoteDone { + if send { + if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{SendT: now().UnixNano()}); err != nil { + return err + } + att++ + send = false + } + + // Block until we get a syncMsg + m := syncMsg{} + if err := binary.Read(c.Conn, binary.BigEndian, &m); err != nil { + return err + } + + if m.RecvT == 0 { + // Message initiated from other side. + if m.SendT == 0 { + remoteDone = true + continue + } + // Send response. + m.RecvT = now().UnixNano() + if err := binary.Write(c.Conn, binary.BigEndian, m); err != nil { + return err + } + continue + } + + lag := time.Duration(m.RecvT - m.SendT) + latency += lag + avgLatency := latency / time.Duration(att) + if e := lag - avgLatency; e > -accuracy && e < accuracy { + good++ + } else { + good = 0 + } + if att < giveUp && (att < warmup || good < goodRun) { + send = true + continue + } + localDone = true + latency = avgLatency + // Tell the other side we're done. + if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{}); err != nil { + return err + } + } + if c.network.Latency <= 0 { + return nil + } + c.delay = c.network.Latency - latency + if c.delay < 0 { + return fmt.Errorf("measured network latency (%v) higher than desired latency (%v)", latency, c.network.Latency) + } + return nil +} + +// Listener returns a net.Listener that wraps l and injects n's latency in its +// connections. +func (n *Network) Listener(l net.Listener) net.Listener { + return &listener{Listener: l, network: n} +} + +type listener struct { + net.Listener + network *Network +} + +func (l *listener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + return l.network.Conn(c) +} + +// Dialer returns a Dialer that wraps d and injects n's latency in its +// connections. n's Latency is also injected to the connection's creation. +func (n *Network) Dialer(d Dialer) Dialer { + return func(network, address string) (net.Conn, error) { + conn, err := d(network, address) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// TimeoutDialer returns a TimeoutDialer that wraps d and injects n's latency +// in its connections. n's Latency is also injected to the connection's +// creation. +func (n *Network) TimeoutDialer(d TimeoutDialer) TimeoutDialer { + return func(network, address string, timeout time.Duration) (net.Conn, error) { + conn, err := d(network, address, timeout) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// ContextDialer returns a ContextDialer that wraps d and injects n's latency +// in its connections. n's Latency is also injected to the connection's +// creation. +func (n *Network) ContextDialer(d ContextDialer) ContextDialer { + return func(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := d(ctx, network, address) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// pktTime returns the time it takes to transmit one packet of data of size b +// in bytes. +func (n *Network) pktTime(b int) time.Duration { + if n.Kbps <= 0 { + return time.Duration(0) + } + return time.Duration(b) * time.Second / time.Duration(n.Kbps*(1024/8)) +} + +// Wrappers for testing + +var now = time.Now +var sleep = time.Sleep diff --git a/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go b/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go new file mode 100644 index 000000000..2298b1360 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package latency + +import ( + "bytes" + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" +) + +// bufConn is a net.Conn implemented by a bytes.Buffer (which is a ReadWriter). +type bufConn struct { + *bytes.Buffer +} + +func (bufConn) Close() error { panic("unimplemented") } +func (bufConn) LocalAddr() net.Addr { panic("unimplemented") } +func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") } +func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") } +func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") } +func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") } + +func restoreHooks() func() { + s := sleep + n := now + return func() { + sleep = s + now = n + } +} + +func TestConn(t *testing.T) { + defer restoreHooks()() + + // Constant time. + now = func() time.Time { return time.Unix(123, 456) } + + // Capture sleep times for checking later. + var sleepTimes []time.Duration + sleep = func(t time.Duration) { sleepTimes = append(sleepTimes, t) } + + wantSleeps := func(want ...time.Duration) { + if !reflect.DeepEqual(want, sleepTimes) { + t.Fatalf("sleepTimes = %v; want %v", sleepTimes, want) + } + sleepTimes = nil + } + + // Use a fairly high latency to cause a large BDP and avoid sleeps while + // writing due to simulation of full buffers. + latency := 1 * time.Second + c, err := (&Network{Kbps: 1, Latency: latency, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + wantSleeps(latency) // Connection creation delay. + + // 1 kbps = 128 Bps. Divides evenly by 1 second using nanos. + byteLatency := time.Duration(time.Second / 128) + + write := func(b []byte) { + n, err := c.Write(b) + if n != len(b) || err != nil { + t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) + } + } + + write([]byte{1, 2, 3, 4, 5}) // One full packet + pkt1Time := latency + byteLatency*5 + write([]byte{6}) // One partial packet + pkt2Time := pkt1Time + byteLatency + write([]byte{7, 8, 9, 10, 11, 12, 13}) // Two packets + pkt3Time := pkt2Time + byteLatency*5 + pkt4Time := pkt3Time + byteLatency*2 + + // No reads, so no sleeps yet. + wantSleeps() + + read := func(n int, want []byte) { + b := make([]byte, n) + if rd, err := c.Read(b); err != nil || rd != len(want) { + t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil", n, rd, err, len(want)) + } + if !reflect.DeepEqual(b[:len(want)], want) { + t.Fatalf("read %v; want %v", b, want) + } + } + + read(1, []byte{1}) + wantSleeps(pkt1Time) + read(1, []byte{2}) + wantSleeps() + read(3, []byte{3, 4, 5}) + wantSleeps() + read(2, []byte{6}) + wantSleeps(pkt2Time) + read(2, []byte{7, 8}) + wantSleeps(pkt3Time) + read(10, []byte{9, 10, 11}) + wantSleeps() + read(10, []byte{12, 13}) + wantSleeps(pkt4Time) +} + +func TestSync(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + sleep = func(d time.Duration) { tn = tn.Add(d) } + + // Simulate a 20ms latency network, then run sync across that and expect to + // measure 20ms latency, or 10ms additional delay for a 30ms network. + slowConn, err := (&Network{Kbps: 0, Latency: 20 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + c, err := (&Network{Latency: 30 * time.Millisecond}).Conn(slowConn) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + if c.(*conn).delay != 10*time.Millisecond { + t.Fatalf("c.delay = %v; want 10ms", c.(*conn).delay) + } +} + +func TestSyncTooSlow(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + sleep = func(d time.Duration) { tn = tn.Add(d) } + + // Simulate a 10ms latency network, then attempt to simulate a 5ms latency + // network and expect an error. + slowConn, err := (&Network{Kbps: 0, Latency: 10 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + + errWant := "measured network latency (10ms) higher than desired latency (5ms)" + if _, err := (&Network{Latency: 5 * time.Millisecond}).Conn(slowConn); err == nil || err.Error() != errWant { + t.Fatalf("Conn() = _, %q; want _, %q", err, errWant) + } +} + +func TestListenerAndDialer(t *testing.T) { + defer restoreHooks()() + + tn := time.Unix(123, 0) + startTime := tn + mu := &sync.Mutex{} + now = func() time.Time { + mu.Lock() + defer mu.Unlock() + return tn + } + + // Use a fairly high latency to cause a large BDP and avoid sleeps while + // writing due to simulation of full buffers. + n := &Network{Kbps: 2, Latency: 1 * time.Second, MTU: 10} + // 2 kbps = .25 kBps = 256 Bps + byteLatency := func(n int) time.Duration { + return time.Duration(n) * time.Second / 256 + } + + // Create a real listener and wrap it. + l, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Unexpected error creating listener: %v", err) + } + defer l.Close() + l = n.Listener(l) + + var serverConn net.Conn + var scErr error + scDone := make(chan struct{}) + go func() { + serverConn, scErr = l.Accept() + close(scDone) + }() + + // Create a dialer and use it. + clientConn, err := n.TimeoutDialer(net.DialTimeout)("tcp", l.Addr().String(), 2*time.Second) + if err != nil { + t.Fatalf("Unexpected error dialing: %v", err) + } + defer clientConn.Close() + + // Block until server's Conn is available. + <-scDone + if scErr != nil { + t.Fatalf("Unexpected error listening: %v", scErr) + } + defer serverConn.Close() + + // sleep (only) advances tn. Done after connections established so sync detects zero delay. + sleep = func(d time.Duration) { + mu.Lock() + defer mu.Unlock() + if d > 0 { + tn = tn.Add(d) + } + } + + seq := func(a, b int) []byte { + buf := make([]byte, b-a) + for i := 0; i < b-a; i++ { + buf[i] = byte(i + a) + } + return buf + } + + pkt1 := seq(0, 10) + pkt2 := seq(10, 30) + pkt3 := seq(30, 35) + + write := func(c net.Conn, b []byte) { + n, err := c.Write(b) + if n != len(b) || err != nil { + t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) + } + } + + write(serverConn, pkt1) + write(serverConn, pkt2) + write(serverConn, pkt3) + write(clientConn, pkt3) + write(clientConn, pkt1) + write(clientConn, pkt2) + + if tn != startTime { + t.Fatalf("unexpected sleep in write; tn = %v; want %v", tn, startTime) + } + + read := func(c net.Conn, n int, want []byte, timeWant time.Time) { + b := make([]byte, n) + if rd, err := c.Read(b); err != nil || rd != len(want) { + t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil (read: %v)", n, rd, err, len(want), b[:rd]) + } + if !reflect.DeepEqual(b[:len(want)], want) { + t.Fatalf("read %v; want %v", b, want) + } + if !tn.Equal(timeWant) { + t.Errorf("tn after read(%v) = %v; want %v", want, tn, timeWant) + } + } + + read(clientConn, len(pkt1)+1, pkt1, startTime.Add(n.Latency+byteLatency(len(pkt1)))) + read(serverConn, len(pkt3)+1, pkt3, tn) // tn was advanced by the above read; pkt3 is shorter than pkt1 + + read(clientConn, len(pkt2), pkt2[:10], startTime.Add(n.Latency+byteLatency(len(pkt1)+10))) + read(clientConn, len(pkt2), pkt2[10:], startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)))) + read(clientConn, len(pkt3), pkt3, startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)+len(pkt3)))) + + read(serverConn, len(pkt1), pkt1, tn) // tn already past the arrival time due to prior reads + read(serverConn, len(pkt2), pkt2[:10], tn) + read(serverConn, len(pkt2), pkt2[10:], tn) + + // Sleep awhile and make sure the read happens disregarding previous writes + // (lastSendEnd handling). + sleep(10 * time.Second) + write(clientConn, pkt1) + read(serverConn, len(pkt1), pkt1, tn.Add(n.Latency+byteLatency(len(pkt1)))) + + // Send, sleep longer than the network delay, then make sure the read happens + // instantly. + write(serverConn, pkt1) + sleep(10 * time.Second) + read(clientConn, len(pkt1), pkt1, tn) +} + +func TestBufferBloat(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + // Capture sleep times for checking later. + var sleepTimes []time.Duration + sleep = func(d time.Duration) { + sleepTimes = append(sleepTimes, d) + tn = tn.Add(d) + } + + wantSleeps := func(want ...time.Duration) error { + if !reflect.DeepEqual(want, sleepTimes) { + return fmt.Errorf("sleepTimes = %v; want %v", sleepTimes, want) + } + sleepTimes = nil + return nil + } + + n := &Network{Kbps: 8 /* 1KBps */, Latency: time.Second, MTU: 8} + bdpBytes := (n.Kbps * 1024 / 8) * int(n.Latency/time.Second) // 1024 + c, err := n.Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + wantSleeps(n.Latency) // Connection creation delay. + + write := func(n int, sleeps ...time.Duration) { + if wt, err := c.Write(make([]byte, n)); err != nil || wt != n { + t.Fatalf("c.Write(<%v bytes>) = %v, %v; want %v, nil", n, wt, err, n) + } + if err := wantSleeps(sleeps...); err != nil { + t.Fatalf("After writing %v bytes: %v", n, err) + } + } + + read := func(n int, sleeps ...time.Duration) { + if rd, err := c.Read(make([]byte, n)); err != nil || rd != n { + t.Fatalf("c.Read(_) = %v, %v; want %v, nil", rd, err, n) + } + if err := wantSleeps(sleeps...); err != nil { + t.Fatalf("After reading %v bytes: %v", n, err) + } + } + + write(8) // No reads and buffer not full, so no sleeps yet. + read(8, time.Second+n.pktTime(8)) + + write(bdpBytes) // Fill the buffer. + write(1) // We can send one extra packet even when the buffer is full. + write(n.MTU, n.pktTime(1)) // Make sure we sleep to clear the previous write. + write(1, n.pktTime(n.MTU)) + write(n.MTU+1, n.pktTime(1), n.pktTime(n.MTU)) + + tn = tn.Add(10 * time.Second) // Wait long enough for the buffer to clear. + write(bdpBytes) // No sleeps required. +} diff --git a/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go b/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go new file mode 100644 index 000000000..59355590d --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go @@ -0,0 +1,235 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package primitives_test contains benchmarks for various synchronization primitives +// available in Go. +package primitives_test + +import ( + "sync" + "sync/atomic" + "testing" +) + +func BenchmarkSelectClosed(b *testing.B) { + c := make(chan struct{}) + close(c) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + select { + case <-c: + x++ + default: + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkSelectOpen(b *testing.B) { + c := make(chan struct{}) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + select { + case <-c: + default: + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkAtomicBool(b *testing.B) { + c := int32(0) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if atomic.LoadInt32(&c) == 0 { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkAtomicValue(b *testing.B) { + c := atomic.Value{} + c.Store(0) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if c.Load().(int) == 0 { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutex(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.Lock() + x++ + c.Unlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkRWMutex(b *testing.B) { + c := sync.RWMutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.RLock() + x++ + c.RUnlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkRWMutexW(b *testing.B) { + c := sync.RWMutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.Lock() + x++ + c.Unlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + defer c.Unlock() + x++ + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithClosureDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + defer func() { c.Unlock() }() + x++ + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithoutDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + x++ + c.Unlock() + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +type myFooer struct{} + +func (myFooer) Foo() {} + +type fooer interface { + Foo() +} + +func BenchmarkInterfaceTypeAssertion(b *testing.B) { + // Call a separate function to avoid compiler optimizations. + runInterfaceTypeAssertion(b, myFooer{}) +} + +func runInterfaceTypeAssertion(b *testing.B, fer interface{}) { + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, ok := fer.(fooer); ok { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkStructTypeAssertion(b *testing.B) { + // Call a separate function to avoid compiler optimizations. + runStructTypeAssertion(b, myFooer{}) +} + +func runStructTypeAssertion(b *testing.B, fer interface{}) { + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, ok := fer.(myFooer); ok { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/server/main.go b/vendor/google.golang.org/grpc/benchmark/server/main.go new file mode 100644 index 000000000..dcce130e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/server/main.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "math" + "net" + "net/http" + _ "net/http/pprof" + "time" + + "google.golang.org/grpc/benchmark" + "google.golang.org/grpc/grpclog" +) + +var ( + duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") +) + +func main() { + flag.Parse() + go func() { + lis, err := net.Listen("tcp", ":0") + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + grpclog.Println("Server profiling address: ", lis.Addr().String()) + if err := http.Serve(lis, nil); err != nil { + grpclog.Fatalf("Failed to serve: %v", err) + } + }() + addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces + grpclog.Println("Server Address: ", addr) + <-time.After(time.Duration(*duration) * time.Second) + stopper() +} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go new file mode 100644 index 000000000..f038d26ed --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go @@ -0,0 +1,222 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "strconv" + "strings" +) + +// Histogram accumulates values in the form of a histogram with +// exponentially increased bucket sizes. +type Histogram struct { + // Count is the total number of values added to the histogram. + Count int64 + // Sum is the sum of all the values added to the histogram. + Sum int64 + // SumOfSquares is the sum of squares of all values. + SumOfSquares int64 + // Min is the minimum of all the values added to the histogram. + Min int64 + // Max is the maximum of all the values added to the histogram. + Max int64 + // Buckets contains all the buckets of the histogram. + Buckets []HistogramBucket + + opts HistogramOptions + logBaseBucketSize float64 + oneOverLogOnePlusGrowthFactor float64 +} + +// HistogramOptions contains the parameters that define the histogram's buckets. +// The first bucket of the created histogram (with index 0) contains [min, min+n) +// where n = BaseBucketSize, min = MinValue. +// Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor. +// The type of the values is int64. +type HistogramOptions struct { + // NumBuckets is the number of buckets. + NumBuckets int + // GrowthFactor is the growth factor of the buckets. A value of 0.1 + // indicates that bucket N+1 will be 10% larger than bucket N. + GrowthFactor float64 + // BaseBucketSize is the size of the first bucket. + BaseBucketSize float64 + // MinValue is the lower bound of the first bucket. + MinValue int64 +} + +// HistogramBucket represents one histogram bucket. +type HistogramBucket struct { + // LowBound is the lower bound of the bucket. + LowBound float64 + // Count is the number of values in the bucket. + Count int64 +} + +// NewHistogram returns a pointer to a new Histogram object that was created +// with the provided options. +func NewHistogram(opts HistogramOptions) *Histogram { + if opts.NumBuckets == 0 { + opts.NumBuckets = 32 + } + if opts.BaseBucketSize == 0.0 { + opts.BaseBucketSize = 1.0 + } + h := Histogram{ + Buckets: make([]HistogramBucket, opts.NumBuckets), + Min: math.MaxInt64, + Max: math.MinInt64, + + opts: opts, + logBaseBucketSize: math.Log(opts.BaseBucketSize), + oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor), + } + m := 1.0 + opts.GrowthFactor + delta := opts.BaseBucketSize + h.Buckets[0].LowBound = float64(opts.MinValue) + for i := 1; i < opts.NumBuckets; i++ { + h.Buckets[i].LowBound = float64(opts.MinValue) + delta + delta = delta * m + } + return &h +} + +// Print writes textual output of the histogram values. +func (h *Histogram) Print(w io.Writer) { + h.PrintWithUnit(w, 1) +} + +// PrintWithUnit writes textual output of the histogram values . +// Data in histogram is divided by a Unit before print. +func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { + avg := float64(h.Sum) / float64(h.Count) + fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit) + fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) + if h.Count <= 0 { + return + } + + maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) + if maxBucketDigitLen < 3 { + // For "inf". + maxBucketDigitLen = 3 + } + maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) + percentMulti := 100 / float64(h.Count) + + accCount := int64(0) + for i, b := range h.Buckets { + fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit) + if i+1 < len(h.Buckets) { + fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) + } else { + fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") + } + + accCount += b.Count + fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) + + const barScale = 0.1 + barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) + fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) + } +} + +// String returns the textual output of the histogram values as string. +func (h *Histogram) String() string { + var b bytes.Buffer + h.Print(&b) + return b.String() +} + +// Clear resets all the content of histogram. +func (h *Histogram) Clear() { + h.Count = 0 + h.Sum = 0 + h.SumOfSquares = 0 + h.Min = math.MaxInt64 + h.Max = math.MinInt64 + for i := range h.Buckets { + h.Buckets[i].Count = 0 + } +} + +// Opts returns a copy of the options used to create the Histogram. +func (h *Histogram) Opts() HistogramOptions { + return h.opts +} + +// Add adds a value to the histogram. +func (h *Histogram) Add(value int64) error { + bucket, err := h.findBucket(value) + if err != nil { + return err + } + h.Buckets[bucket].Count++ + h.Count++ + h.Sum += value + h.SumOfSquares += value * value + if value < h.Min { + h.Min = value + } + if value > h.Max { + h.Max = value + } + return nil +} + +func (h *Histogram) findBucket(value int64) (int, error) { + delta := float64(value - h.opts.MinValue) + var b int + if delta >= h.opts.BaseBucketSize { + // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 + // = log(delta / baseBucketSize) / log(1+growthFactor) + 1 + // = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1 + b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1) + } + if b >= len(h.Buckets) { + return 0, fmt.Errorf("no bucket for value: %d", value) + } + return b, nil +} + +// Merge takes another histogram h2, and merges its content into h. +// The two histograms must be created by equivalent HistogramOptions. +func (h *Histogram) Merge(h2 *Histogram) { + if h.opts != h2.opts { + log.Fatalf("failed to merge histograms, created by inequivalent options") + } + h.Count += h2.Count + h.Sum += h2.Sum + h.SumOfSquares += h2.SumOfSquares + if h2.Min < h.Min { + h.Min = h2.Min + } + if h2.Max > h.Max { + h.Max = h2.Max + } + for i, b := range h2.Buckets { + h.Buckets[i].Count += b.Count + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/stats.go b/vendor/google.golang.org/grpc/benchmark/stats/stats.go new file mode 100644 index 000000000..412daead0 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/stats.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "bytes" + "fmt" + "io" + "math" + "sort" + "strconv" + "time" +) + +// Features contains most fields for a benchmark +type Features struct { + NetworkMode string + EnableTrace bool + Latency time.Duration + Kbps int + Mtu int + MaxConcurrentCalls int + ReqSizeBytes int + RespSizeBytes int + EnableCompressor bool +} + +// String returns the textual output of the Features as string. +func (f Features) String() string { + return fmt.Sprintf("traceMode_%t-latency_%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+ + "%#v-reqSize_%#vB-respSize_%#vB-Compressor_%t", f.EnableTrace, + f.Latency.String(), f.Kbps, f.Mtu, f.MaxConcurrentCalls, f.ReqSizeBytes, f.RespSizeBytes, f.EnableCompressor) +} + +// PartialPrintString can print certain features with different format. +func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string { + s := "" + var ( + prefix, suffix, linker string + isNetwork bool + ) + if shared { + suffix = "\n" + linker = ": " + } else { + prefix = "-" + linker = "_" + } + if noneEmptyPos[0] { + s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableCompressor, suffix) + } + if shared && f.NetworkMode != "" { + s += fmt.Sprintf("Network: %s \n", f.NetworkMode) + isNetwork = true + } + if !isNetwork { + if noneEmptyPos[1] { + s += fmt.Sprintf("%slatency%s%s%s", prefix, linker, f.Latency.String(), suffix) + } + if noneEmptyPos[2] { + s += fmt.Sprintf("%skbps%s%#v%s", prefix, linker, f.Kbps, suffix) + } + if noneEmptyPos[3] { + s += fmt.Sprintf("%sMTU%s%#v%s", prefix, linker, f.Mtu, suffix) + } + } + if noneEmptyPos[4] { + s += fmt.Sprintf("%sCallers%s%#v%s", prefix, linker, f.MaxConcurrentCalls, suffix) + } + if noneEmptyPos[5] { + s += fmt.Sprintf("%sreqSize%s%#vB%s", prefix, linker, f.ReqSizeBytes, suffix) + } + if noneEmptyPos[6] { + s += fmt.Sprintf("%srespSize%s%#vB%s", prefix, linker, f.RespSizeBytes, suffix) + } + if noneEmptyPos[7] { + s += fmt.Sprintf("%sCompressor%s%t%s", prefix, linker, f.EnableCompressor, suffix) + } + return s +} + +type percentLatency struct { + Percent int + Value time.Duration +} + +// BenchResults records features and result of a benchmark. +type BenchResults struct { + RunMode string + Features Features + Latency []percentLatency + Operations int + NsPerOp int64 + AllocedBytesPerOp int64 + AllocsPerOp int64 + SharedPosion []bool +} + +// SetBenchmarkResult sets features of benchmark and basic results. +func (stats *Stats) SetBenchmarkResult(mode string, features Features, o int, allocdBytes, allocs int64, sharedPos []bool) { + stats.result.RunMode = mode + stats.result.Features = features + stats.result.Operations = o + stats.result.AllocedBytesPerOp = allocdBytes + stats.result.AllocsPerOp = allocs + stats.result.SharedPosion = sharedPos +} + +// GetBenchmarkResults returns the result of the benchmark including features and result. +func (stats *Stats) GetBenchmarkResults() BenchResults { + return stats.result +} + +// BenchString output latency stats as the format as time + unit. +func (stats *Stats) BenchString() string { + stats.maybeUpdate() + s := stats.result + res := s.RunMode + "-" + s.Features.String() + ": \n" + if len(s.Latency) != 0 { + var statsUnit = s.Latency[0].Value + var timeUnit = fmt.Sprintf("%v", statsUnit)[1:] + for i := 1; i < len(s.Latency)-1; i++ { + res += fmt.Sprintf("%d_Latency: %s %s \t", s.Latency[i].Percent, + strconv.FormatFloat(float64(s.Latency[i].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) + } + res += fmt.Sprintf("Avg latency: %s %s \t", + strconv.FormatFloat(float64(s.Latency[len(s.Latency)-1].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) + } + res += fmt.Sprintf("Count: %v \t", s.Operations) + res += fmt.Sprintf("%v Bytes/op\t", s.AllocedBytesPerOp) + res += fmt.Sprintf("%v Allocs/op\t", s.AllocsPerOp) + + return res +} + +// Stats is a simple helper for gathering additional statistics like histogram +// during benchmarks. This is not thread safe. +type Stats struct { + numBuckets int + unit time.Duration + min, max int64 + histogram *Histogram + + durations durationSlice + dirty bool + + sortLatency bool + result BenchResults +} + +type durationSlice []time.Duration + +// NewStats creates a new Stats instance. If numBuckets is not positive, +// the default value (16) will be used. +func NewStats(numBuckets int) *Stats { + if numBuckets <= 0 { + numBuckets = 16 + } + return &Stats{ + // Use one more bucket for the last unbounded bucket. + numBuckets: numBuckets + 1, + durations: make(durationSlice, 0, 100000), + } +} + +// Add adds an elapsed time per operation to the stats. +func (stats *Stats) Add(d time.Duration) { + stats.durations = append(stats.durations, d) + stats.dirty = true +} + +// Clear resets the stats, removing all values. +func (stats *Stats) Clear() { + stats.durations = stats.durations[:0] + stats.histogram = nil + stats.dirty = false + stats.result = BenchResults{} +} + +//Sort method for durations +func (a durationSlice) Len() int { return len(a) } +func (a durationSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a durationSlice) Less(i, j int) bool { return a[i] < a[j] } +func max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +// maybeUpdate updates internal stat data if there was any newly added +// stats since this was updated. +func (stats *Stats) maybeUpdate() { + if !stats.dirty { + return + } + + if stats.sortLatency { + sort.Sort(stats.durations) + stats.min = int64(stats.durations[0]) + stats.max = int64(stats.durations[len(stats.durations)-1]) + } + + stats.min = math.MaxInt64 + stats.max = 0 + for _, d := range stats.durations { + if stats.min > int64(d) { + stats.min = int64(d) + } + if stats.max < int64(d) { + stats.max = int64(d) + } + } + + // Use the largest unit that can represent the minimum time duration. + stats.unit = time.Nanosecond + for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { + if stats.min <= int64(u) { + break + } + stats.unit = u + } + + numBuckets := stats.numBuckets + if n := int(stats.max - stats.min + 1); n < numBuckets { + numBuckets = n + } + stats.histogram = NewHistogram(HistogramOptions{ + NumBuckets: numBuckets, + // max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize. + GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(numBuckets-2)) - 1, + BaseBucketSize: 1.0, + MinValue: stats.min}) + + for _, d := range stats.durations { + stats.histogram.Add(int64(d)) + } + + stats.dirty = false + + if stats.durations.Len() != 0 { + var percentToObserve = []int{50, 90} + // First data record min unit from the latency result. + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: stats.unit}) + for _, position := range percentToObserve { + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: position, Value: stats.durations[max(stats.histogram.Count*int64(position)/100-1, 0)]}) + } + // Last data record the average latency. + avg := float64(stats.histogram.Sum) / float64(stats.histogram.Count) + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: time.Duration(avg)}) + } +} + +// SortLatency blocks the output +func (stats *Stats) SortLatency() { + stats.sortLatency = true +} + +// Print writes textual output of the Stats. +func (stats *Stats) Print(w io.Writer) { + stats.maybeUpdate() + if stats.histogram == nil { + fmt.Fprint(w, "Histogram (empty)\n") + } else { + fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) + stats.histogram.PrintWithUnit(w, float64(stats.unit)) + } +} + +// String returns the textual output of the Stats as string. +func (stats *Stats) String() string { + var b bytes.Buffer + stats.Print(&b) + return b.String() +} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/util.go b/vendor/google.golang.org/grpc/benchmark/stats/util.go new file mode 100644 index 000000000..f3bb3a364 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/stats/util.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "testing" +) + +var ( + curB *testing.B + curBenchName string + curStats map[string]*Stats + + orgStdout *os.File + nextOutPos int + + injectCond *sync.Cond + injectDone chan struct{} +) + +// AddStats adds a new unnamed Stats instance to the current benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStats() should +// typically be called at the very beginning of each benchmark function. +func AddStats(b *testing.B, numBuckets int) *Stats { + return AddStatsWithName(b, "", numBuckets) +} + +// AddStatsWithName adds a new named Stats instance to the current benchmark. +// With this, you can add multiple stats in a single benchmark. You need +// to run benchmarks by calling RunTestMain() to inject the stats to the +// benchmark results. If numBuckets is not positive, the default value (16) will +// be used. Please note that this calls b.ResetTimer() since it may be blocked +// until the previous benchmark stats is printed out. So AddStatsWithName() +// should typically be called at the very beginning of each benchmark function. +func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { + var benchName string + for i := 1; ; i++ { + pc, _, _, ok := runtime.Caller(i) + if !ok { + panic("benchmark function not found") + } + p := strings.Split(runtime.FuncForPC(pc).Name(), ".") + benchName = p[len(p)-1] + if strings.HasPrefix(benchName, "run") { + break + } + } + procs := runtime.GOMAXPROCS(-1) + if procs != 1 { + benchName = fmt.Sprintf("%s-%d", benchName, procs) + } + + stats := NewStats(numBuckets) + + if injectCond != nil { + // We need to wait until the previous benchmark stats is printed out. + injectCond.L.Lock() + for curB != nil && curBenchName != benchName { + injectCond.Wait() + } + + curB = b + curBenchName = benchName + curStats[name] = stats + + injectCond.L.Unlock() + } + + b.ResetTimer() + return stats +} + +// RunTestMain runs the tests with enabling injection of benchmark stats. It +// returns an exit code to pass to os.Exit. +func RunTestMain(m *testing.M) int { + startStatsInjector() + defer stopStatsInjector() + return m.Run() +} + +// startStatsInjector starts stats injection to benchmark results. +func startStatsInjector() { + orgStdout = os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + nextOutPos = 0 + + resetCurBenchStats() + + injectCond = sync.NewCond(&sync.Mutex{}) + injectDone = make(chan struct{}) + go func() { + defer close(injectDone) + + scanner := bufio.NewScanner(r) + scanner.Split(splitLines) + for scanner.Scan() { + injectStatsIfFinished(scanner.Text()) + } + if err := scanner.Err(); err != nil { + panic(err) + } + }() +} + +// stopStatsInjector stops stats injection and restores os.Stdout. +func stopStatsInjector() { + os.Stdout.Close() + <-injectDone + injectCond = nil + os.Stdout = orgStdout +} + +// splitLines is a split function for a bufio.Scanner that returns each line +// of text, teeing texts to the original stdout even before each line ends. +func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { + if eof && len(data) == 0 { + return 0, nil, nil + } + + if i := bytes.IndexByte(data, '\n'); i >= 0 { + orgStdout.Write(data[nextOutPos : i+1]) + nextOutPos = 0 + return i + 1, data[0:i], nil + } + + orgStdout.Write(data[nextOutPos:]) + nextOutPos = len(data) + + if eof { + // This is a final, non-terminated line. Return it. + return len(data), data, nil + } + + return 0, nil, nil +} + +// injectStatsIfFinished prints out the stats if the current benchmark finishes. +func injectStatsIfFinished(line string) { + injectCond.L.Lock() + defer injectCond.L.Unlock() + // We assume that the benchmark results start with "Benchmark". + if curB == nil || !strings.HasPrefix(line, "Benchmark") { + return + } + + if !curB.Failed() { + // Output all stats in alphabetical order. + names := make([]string, 0, len(curStats)) + for name := range curStats { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + stats := curStats[name] + // The output of stats starts with a header like "Histogram (unit: ms)" + // followed by statistical properties and the buckets. Add the stats name + // if it is a named stats and indent them as Go testing outputs. + lines := strings.Split(stats.String(), "\n") + if n := len(lines); n > 0 { + if name != "" { + name = ": " + name + } + fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name) + for _, line := range lines[1 : n-1] { + fmt.Fprintf(orgStdout, "\t%s\n", line) + } + } + } + } + + resetCurBenchStats() + injectCond.Signal() +} + +// resetCurBenchStats resets the current benchmark stats. +func resetCurBenchStats() { + curB = nil + curBenchName = "" + curStats = make(map[string]*Stats) +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go new file mode 100644 index 000000000..9db1d8504 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go @@ -0,0 +1,392 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "math" + "runtime" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" +) + +var ( + caFile = flag.String("ca_file", "", "The file containing the CA root cert file") +) + +type lockingHistogram struct { + mu sync.Mutex + histogram *stats.Histogram +} + +func (h *lockingHistogram) add(value int64) { + h.mu.Lock() + defer h.mu.Unlock() + h.histogram.Add(value) +} + +// swap sets h.histogram to new, and returns its old value. +func (h *lockingHistogram) swap(new *stats.Histogram) *stats.Histogram { + h.mu.Lock() + defer h.mu.Unlock() + old := h.histogram + h.histogram = new + return old +} + +func (h *lockingHistogram) mergeInto(merged *stats.Histogram) { + h.mu.Lock() + defer h.mu.Unlock() + merged.Merge(h.histogram) +} + +type benchmarkClient struct { + closeConns func() + stop chan bool + lastResetTime time.Time + histogramOptions stats.HistogramOptions + lockingHistograms []lockingHistogram + rusageLastReset *syscall.Rusage +} + +func printClientConfig(config *testpb.ClientConfig) { + // Some config options are ignored: + // - client type: + // will always create sync client + // - async client threads. + // - core list + grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType) + grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads) + // TODO: use cores specified by CoreList when setting list of cores is supported in go. + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) + + grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) + grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) + grpclog.Printf(" - channel number: %v", config.ClientChannels) + grpclog.Printf(" - load params: %v", config.LoadParams) + grpclog.Printf(" - rpc type: %v", config.RpcType) + grpclog.Printf(" - histogram params: %v", config.HistogramParams) + grpclog.Printf(" - server targets: %v", config.ServerTargets) +} + +func setupClientEnv(config *testpb.ClientConfig) { + // Use all cpu cores available on machine by default. + // TODO: Revisit this for the optimal default setup. + if config.CoreLimit > 0 { + runtime.GOMAXPROCS(int(config.CoreLimit)) + } else { + runtime.GOMAXPROCS(runtime.NumCPU()) + } +} + +// createConns creates connections according to given config. +// It returns the connections and corresponding function to close them. +// It returns non-nil error if there is anything wrong. +func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { + var opts []grpc.DialOption + + // Sanity check for client type. + switch config.ClientType { + case testpb.ClientType_SYNC_CLIENT: + case testpb.ClientType_ASYNC_CLIENT: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) + } + + // Check and set security options. + if config.SecurityParams != nil { + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) + if err != nil { + return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Use byteBufCodec if it is required. + if config.PayloadConfig != nil { + switch config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.WithCodec(byteBufCodec{})) + case *testpb.PayloadConfig_SimpleParams: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } + + // Create connections. + connCount := int(config.ClientChannels) + conns := make([]*grpc.ClientConn, connCount, connCount) + for connIndex := 0; connIndex < connCount; connIndex++ { + conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) + } + + return conns, func() { + for _, conn := range conns { + conn.Close() + } + }, nil +} + +func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error { + // Read payload size and type from config. + var ( + payloadReqSize, payloadRespSize int + payloadType string + ) + if config.PayloadConfig != nil { + switch c := config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + payloadReqSize = int(c.BytebufParams.ReqSize) + payloadRespSize = int(c.BytebufParams.RespSize) + payloadType = "bytebuf" + case *testpb.PayloadConfig_SimpleParams: + payloadReqSize = int(c.SimpleParams.ReqSize) + payloadRespSize = int(c.SimpleParams.RespSize) + payloadType = "protobuf" + default: + return grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } + + // TODO add open loop distribution. + switch config.LoadParams.Load.(type) { + case *testpb.LoadParams_ClosedLoop: + case *testpb.LoadParams_Poisson: + return grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) + default: + return grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) + } + + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) + + switch config.RpcType { + case testpb.RpcType_UNARY: + bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) + // TODO open loop. + case testpb.RpcType_STREAMING: + bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) + // TODO open loop. + default: + return grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) + } + + return nil +} + +func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { + printClientConfig(config) + + // Set running environment like how many cores to use. + setupClientEnv(config) + + conns, closeConns, err := createConns(config) + if err != nil { + return nil, err + } + + rusage := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) + bc := &benchmarkClient{ + histogramOptions: stats.HistogramOptions{ + NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, + GrowthFactor: config.HistogramParams.Resolution, + BaseBucketSize: (1 + config.HistogramParams.Resolution), + MinValue: 0, + }, + lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)), + + stop: make(chan bool), + lastResetTime: time.Now(), + closeConns: closeConns, + rusageLastReset: rusage, + } + + if err = performRPCs(config, conns, bc); err != nil { + // Close all connections if performRPCs failed. + closeConns() + return nil, err + } + + return bc, nil +} + +func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { + for ic, conn := range conns { + client := testpb.NewBenchmarkServiceClient(conn) + // For each connection, create rpcCountPerConn goroutines to do rpc. + for j := 0; j < rpcCountPerConn; j++ { + // Create histogram for each goroutine. + idx := ic*rpcCountPerConn + j + bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + done := make(chan bool) + for { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + select { + case <-bc.stop: + case done <- false: + } + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + case done <- true: + } + }() + select { + case <-bc.stop: + return + case <-done: + } + } + }(idx) + } + } +} + +func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { + var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error + if payloadType == "bytebuf" { + doRPC = benchmark.DoByteBufStreamingRoundTrip + } else { + doRPC = benchmark.DoStreamingRoundTrip + } + for ic, conn := range conns { + // For each connection, create rpcCountPerConn goroutines to do rpc. + for j := 0; j < rpcCountPerConn; j++ { + c := testpb.NewBenchmarkServiceClient(conn) + stream, err := c.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) + } + // Create histogram for each goroutine. + idx := ic*rpcCountPerConn + j + bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + for { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + return + default: + } + } + }(idx) + } + } +} + +// getStats returns the stats for benchmark client. +// It resets lastResetTime and all histograms if argument reset is true. +func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { + var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64 + mergedHistogram := stats.NewHistogram(bc.histogramOptions) + latestRusage := new(syscall.Rusage) + + if reset { + // Merging histogram may take some time. + // Put all histograms aside and merge later. + toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms)) + for i := range bc.lockingHistograms { + toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions)) + } + + for i := 0; i < len(toMerge); i++ { + mergedHistogram.Merge(toMerge[i]) + } + + wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() + syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) + uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) + + bc.rusageLastReset = latestRusage + bc.lastResetTime = time.Now() + } else { + // Merge only, not reset. + for i := range bc.lockingHistograms { + bc.lockingHistograms[i].mergeInto(mergedHistogram) + } + + wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() + syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) + uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) + } + + b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets)) + for i, v := range mergedHistogram.Buckets { + b[i] = uint32(v.Count) + } + return &testpb.ClientStats{ + Latencies: &testpb.HistogramData{ + Bucket: b, + MinSeen: float64(mergedHistogram.Min), + MaxSeen: float64(mergedHistogram.Max), + Sum: float64(mergedHistogram.Sum), + SumOfSquares: float64(mergedHistogram.SumOfSquares), + Count: float64(mergedHistogram.Count), + }, + TimeElapsed: wallTimeElapsed, + TimeUser: uTimeElapsed, + TimeSystem: sTimeElapsed, + } +} + +func (bc *benchmarkClient) shutdown() { + close(bc.stop) + bc.closeConns() +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go new file mode 100644 index 000000000..238dfdebc --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" +) + +var ( + certFile = flag.String("tls_cert_file", "", "The TLS cert file") + keyFile = flag.String("tls_key_file", "", "The TLS key file") +) + +type benchmarkServer struct { + port int + cores int + closeFunc func() + mu sync.RWMutex + lastResetTime time.Time + rusageLastReset *syscall.Rusage +} + +func printServerConfig(config *testpb.ServerConfig) { + // Some config options are ignored: + // - server type: + // will always start sync server + // - async server threads + // - core list + grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType) + grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) + // TODO: use cores specified by CoreList when setting list of cores is supported in go. + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) + + grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - port: %v", config.Port) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) +} + +func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { + printServerConfig(config) + + // Use all cpu cores available on machine by default. + // TODO: Revisit this for the optimal default setup. + numOfCores := runtime.NumCPU() + if config.CoreLimit > 0 { + numOfCores = int(config.CoreLimit) + } + runtime.GOMAXPROCS(numOfCores) + + var opts []grpc.ServerOption + + // Sanity check for server type. + switch config.ServerType { + case testpb.ServerType_SYNC_SERVER: + case testpb.ServerType_ASYNC_SERVER: + case testpb.ServerType_ASYNC_GENERIC_SERVER: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType) + } + + // Set security options. + if config.SecurityParams != nil { + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("failed to generate credentials %v", err) + } + opts = append(opts, grpc.Creds(creds)) + } + + // Priority: config.Port > serverPort > default (0). + port := int(config.Port) + if port == 0 { + port = serverPort + } + + // Create different benchmark server according to config. + var ( + addr string + closeFunc func() + err error + ) + if config.PayloadConfig != nil { + switch payload := config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.CustomCodec(byteBufCodec{})) + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "bytebuf", + Metadata: payload.BytebufParams.RespSize, + }, opts...) + case *testpb.PayloadConfig_SimpleParams: + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + case *testpb.PayloadConfig_ComplexParams: + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } + } else { + // Start protobuf server if payload config is nil. + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + } + + grpclog.Printf("benchmark server listening at %v", addr) + addrSplitted := strings.Split(addr, ":") + p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1]) + if err != nil { + grpclog.Fatalf("failed to get port number from server address: %v", err) + } + + rusage := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + + return &benchmarkServer{ + port: p, + cores: numOfCores, + closeFunc: closeFunc, + lastResetTime: time.Now(), + rusageLastReset: rusage, + }, nil +} + +// getStats returns the stats for benchmark server. +// It resets lastResetTime if argument reset is true. +func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats { + bs.mu.RLock() + defer bs.mu.RUnlock() + wallTimeElapsed := time.Since(bs.lastResetTime).Seconds() + rusageLatest := new(syscall.Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusageLatest) + uTimeElapsed, sTimeElapsed := cpuTimeDiff(bs.rusageLastReset, rusageLatest) + + if reset { + bs.lastResetTime = time.Now() + bs.rusageLastReset = rusageLatest + } + return &testpb.ServerStats{ + TimeElapsed: wallTimeElapsed, + TimeUser: uTimeElapsed, + TimeSystem: sTimeElapsed, + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/main.go b/vendor/google.golang.org/grpc/benchmark/worker/main.go new file mode 100644 index 000000000..2b1ba985b --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/main.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + "net" + "net/http" + _ "net/http/pprof" + "runtime" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +var ( + driverPort = flag.Int("driver_port", 10000, "port for communication with driver") + serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") + pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset") + blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile") +) + +type byteBufCodec struct { +} + +func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { + b, ok := v.(*[]byte) + if !ok { + return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) + } + return *b, nil +} + +func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { + b, ok := v.(*[]byte) + if !ok { + return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) + } + *b = data + return nil +} + +func (byteBufCodec) String() string { + return "bytebuffer" +} + +// workerServer implements WorkerService rpc handlers. +// It can create benchmarkServer or benchmarkClient on demand. +type workerServer struct { + stop chan<- bool + serverPort int +} + +func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { + var bs *benchmarkServer + defer func() { + // Close benchmark server when stream ends. + grpclog.Printf("closing benchmark server") + if bs != nil { + bs.closeFunc() + } + }() + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var out *testpb.ServerStatus + switch argtype := in.Argtype.(type) { + case *testpb.ServerArgs_Setup: + grpclog.Printf("server setup received:") + if bs != nil { + grpclog.Printf("server setup received when server already exists, closing the existing server") + bs.closeFunc() + } + bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) + if err != nil { + return err + } + out = &testpb.ServerStatus{ + Stats: bs.getStats(false), + Port: int32(bs.port), + Cores: int32(bs.cores), + } + + case *testpb.ServerArgs_Mark: + grpclog.Printf("server mark received:") + grpclog.Printf(" - %v", argtype) + if bs == nil { + return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") + } + out = &testpb.ServerStatus{ + Stats: bs.getStats(argtype.Mark.Reset_), + Port: int32(bs.port), + Cores: int32(bs.cores), + } + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { + var bc *benchmarkClient + defer func() { + // Shut down benchmark client when stream ends. + grpclog.Printf("shuting down benchmark client") + if bc != nil { + bc.shutdown() + } + }() + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var out *testpb.ClientStatus + switch t := in.Argtype.(type) { + case *testpb.ClientArgs_Setup: + grpclog.Printf("client setup received:") + if bc != nil { + grpclog.Printf("client setup received when client already exists, shuting down the existing client") + bc.shutdown() + } + bc, err = startBenchmarkClient(t.Setup) + if err != nil { + return err + } + out = &testpb.ClientStatus{ + Stats: bc.getStats(false), + } + + case *testpb.ClientArgs_Mark: + grpclog.Printf("client mark received:") + grpclog.Printf(" - %v", t) + if bc == nil { + return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") + } + out = &testpb.ClientStatus{ + Stats: bc.getStats(t.Mark.Reset_), + } + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { + grpclog.Printf("core count: %v", runtime.NumCPU()) + return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil +} + +func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { + grpclog.Printf("quiting worker") + s.stop <- true + return &testpb.Void{}, nil +} + +func main() { + grpc.EnableTracing = false + + flag.Parse() + lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + grpclog.Printf("worker listening at port %v", *driverPort) + + s := grpc.NewServer() + stop := make(chan bool) + testpb.RegisterWorkerServiceServer(s, &workerServer{ + stop: stop, + serverPort: *serverPort, + }) + + go func() { + <-stop + // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. + // TODO revise this once server graceful stop is supported in gRPC. + time.Sleep(time.Second) + s.Stop() + }() + + runtime.SetBlockProfileRate(*blockProfRate) + + if *pprofPort >= 0 { + go func() { + grpclog.Println("Starting pprof server on port " + strconv.Itoa(*pprofPort)) + grpclog.Println(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil)) + }() + } + + s.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/util.go b/vendor/google.golang.org/grpc/benchmark/worker/util.go new file mode 100644 index 000000000..f26993e65 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/worker/util.go @@ -0,0 +1,35 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import "syscall" + +func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..1ef2507c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,307 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "io" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// recvResponse receives and parses an RPC response. +// On error, it returns the error and indicates whether the call should be retried. +// +// TODO(zhaoq): Check whether the received message sequence is valid. +// TODO ctx is used for stats collection and processing. It is the context passed from the application. +func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) { + // Try to acquire header metadata from the server if there is any. + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + c.headerMD, err = stream.Header() + if err != nil { + return + } + p := &parser{r: stream} + var inPayload *stats.InPayload + if dopts.copts.StatsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + for { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { + if err == io.EOF { + break + } + return + } + } + if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK { + // TODO in the current implementation, inTrailer may be handled before inPayload in some cases. + // Fix the order if necessary. + dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) + } + c.trailerMD = stream.Trailer() + return nil +} + +// sendRequest writes out various information of an RPC such as Context and Message. +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { + defer func() { + if err != nil { + // If err is connection error, t will be closed, no need to close stream here. + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + var ( + cbuf *bytes.Buffer + outPayload *stats.OutPayload + ) + if compressor != nil { + cbuf = new(bytes.Buffer) + } + if dopts.copts.StatsHandler != nil { + outPayload = &stats.OutPayload{ + Client: true, + } + } + hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + if err != nil { + return err + } + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) + } + // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method + // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following + // recvResponse to get the final status. + if err != nil && err != io.EOF { + return err + } + // Sent successfully. + return nil +} + +// Invoke sends the RPC request on the wire and returns after response is received. +// Invoke is called by generated code. Also users can call Invoke directly when it +// is really needed in their use cases. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() + } + + opts = append(cc.dopts.callOptions, opts...) + for _, o := range opts { + if err := o.before(c); err != nil { + return toRPCErr(err) + } + } + defer func() { + for _, o := range opts { + o.after(c) + } + }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if e != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true) + c.traceInfo.tr.SetError() + } + }() + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + begin := &stats.Begin{ + Client: true, + BeginTime: time.Now(), + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + defer func() { + end := &stats.End{ + Client: true, + EndTime: time.Now(), + Error: e, + } + sh.HandleRPC(ctx, end) + }() + } + topts := &transport.Options{ + Last: true, + Delay: false, + } + for { + var ( + err error + t transport.ClientTransport + stream *transport.Stream + // Record the done handler from Balancer.Get(...). It is called once the + // RPC has completed or failed. + done func(balancer.DoneInfo) + ) + // TODO(zhaoq): Need a formal spec of fail-fast. + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := status.FromError(err); ok { + return err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return Errorf(codes.Internal, "%v", err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = t.NewStream(ctx, callHdr) + if err != nil { + if done != nil { + if _, ok := err.(transport.ConnectionError); ok { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } + done(balancer.DoneInfo{Err: err}) + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) + if err != nil { + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + // Retry a non-failfast RPC when + // i) there is a connection error; or + // ii) the server started to drain before this RPC was initiated. + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + err = recvResponse(ctx, cc.dopts, t, c, stream, reply) + if err != nil { + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } + t.CloseStream(stream, nil) + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + return stream.Status().Err() + } +} diff --git a/vendor/google.golang.org/grpc/call_test.go b/vendor/google.golang.org/grpc/call_test.go new file mode 100644 index 000000000..f48d30e87 --- /dev/null +++ b/vendor/google.golang.org/grpc/call_test.go @@ -0,0 +1,292 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/transport" +) + +var ( + expectedRequest = "ping" + expectedResponse = "pong" + weirdError = "format verbs: %v%s" + sizeLargeErr = 1024 * 1024 + canceled = 0 +) + +type testCodec struct { +} + +func (testCodec) Marshal(v interface{}) ([]byte, error) { + return []byte(*(v.(*string))), nil +} + +func (testCodec) Unmarshal(data []byte, v interface{}) error { + *(v.(*string)) = string(data) + return nil +} + +func (testCodec) String() string { + return "test" +} + +type testStreamHandler struct { + port string + t transport.ServerTransport +} + +func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { + p := &parser{r: s} + for { + pf, req, err := p.recvMsg(math.MaxInt32) + if err == io.EOF { + break + } + if err != nil { + return + } + if pf != compressionNone { + t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone) + return + } + var v string + codec := testCodec{} + if err := codec.Unmarshal(req, &v); err != nil { + t.Errorf("Failed to unmarshal the received message: %v", err) + return + } + if v == "weird error" { + h.t.WriteStatus(s, status.New(codes.Internal, weirdError)) + return + } + if v == "canceled" { + canceled++ + h.t.WriteStatus(s, status.New(codes.Internal, "")) + return + } + if v == "port" { + h.t.WriteStatus(s, status.New(codes.Internal, h.port)) + return + } + + if v != expectedRequest { + h.t.WriteStatus(s, status.New(codes.Internal, strings.Repeat("A", sizeLargeErr))) + return + } + } + // send a response back to end the stream. + hdr, data, err := encode(testCodec{}, &expectedResponse, nil, nil, nil) + if err != nil { + t.Errorf("Failed to encode the response: %v", err) + return + } + h.t.Write(s, hdr, data, &transport.Options{}) + h.t.WriteStatus(s, status.New(codes.OK, "")) +} + +type server struct { + lis net.Listener + port string + addr string + startedErr chan error // sent nil or an error after server starts + mu sync.Mutex + conns map[transport.ServerTransport]bool +} + +func newTestServer() *server { + return &server{startedErr: make(chan error, 1)} +} + +// start starts server. Other goroutines should block on s.startedErr for further operations. +func (s *server) start(t *testing.T, port int, maxStreams uint32) { + var err error + if port == 0 { + s.lis, err = net.Listen("tcp", "localhost:0") + } else { + s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) + } + if err != nil { + s.startedErr <- fmt.Errorf("failed to listen: %v", err) + return + } + s.addr = s.lis.Addr().String() + _, p, err := net.SplitHostPort(s.addr) + if err != nil { + s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) + return + } + s.port = p + s.conns = make(map[transport.ServerTransport]bool) + s.startedErr <- nil + for { + conn, err := s.lis.Accept() + if err != nil { + return + } + config := &transport.ServerConfig{ + MaxStreams: maxStreams, + } + st, err := transport.NewServerTransport("http2", conn, config) + if err != nil { + continue + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + st.Close() + return + } + s.conns[st] = true + s.mu.Unlock() + h := &testStreamHandler{ + port: s.port, + t: st, + } + go st.HandleStreams(func(s *transport.Stream) { + go h.handleStream(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + } +} + +func (s *server) wait(t *testing.T, timeout time.Duration) { + select { + case err := <-s.startedErr: + if err != nil { + t.Fatal(err) + } + case <-time.After(timeout): + t.Fatalf("Timed out after %v waiting for server to be ready", timeout) + } +} + +func (s *server) stop() { + s.lis.Close() + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = nil + s.mu.Unlock() +} + +func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { + server := newTestServer() + go server.start(t, port, maxStreams) + server.wait(t, 2*time.Second) + addr := "localhost:" + server.port + cc, err := Dial(addr, WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + return server, cc +} + +func TestInvoke(t *testing.T) { + defer leakcheck.Check(t) + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) + } + cc.Close() + server.stop() +} + +func TestInvokeLargeErr(t *testing.T) { + defer leakcheck.Check(t) + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "hello" + err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + if _, ok := status.FromError(err); !ok { + t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") + } + if Code(err) != codes.Internal || len(ErrorDesc(err)) != sizeLargeErr { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr) + } + cc.Close() + server.stop() +} + +// TestInvokeErrorSpecialChars checks that error messages don't get mangled. +func TestInvokeErrorSpecialChars(t *testing.T) { + defer leakcheck.Check(t) + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "weird error" + err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + if _, ok := status.FromError(err); !ok { + t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") + } + if got, want := ErrorDesc(err), weirdError; got != want { + t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want) + } + cc.Close() + server.stop() +} + +// TestInvokeCancel checks that an Invoke with a canceled context is not sent. +func TestInvokeCancel(t *testing.T) { + defer leakcheck.Check(t) + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + req := "canceled" + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Invoke(ctx, "/foo/bar", &req, &reply, cc) + } + if canceled != 0 { + t.Fatalf("received %d of 100 canceled requests", canceled) + } + cc.Close() + server.stop() +} + +// TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC +// on a closed client will terminate. +func TestInvokeCancelClosedNonFailFast(t *testing.T) { + defer leakcheck.Check(t) + server, cc := setUp(t, 0, math.MaxUint32) + var reply string + cc.Close() + req := "hello" + ctx, cancel := context.WithCancel(context.Background()) + cancel() + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc, FailFast(false)); err == nil { + t.Fatalf("canceled invoke on closed connection should fail") + } + server.stop() +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..886bead9d --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1142 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/transport" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + ErrClientConnClosing = errors.New("grpc: the client connection is closing") + // ErrClientConnTimeout indicates that the ClientConn cannot establish the + // underlying connections within the specified timeout. + // DEPRECATED: Please use context.DeadlineExceeded instead. + ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is to support v1 balancer. + balancerBuilder balancer.Builder +} + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + } +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// Deprecated: use the new balancer APIs in balancer package instead. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b + } +} + +// WithServiceConfig returns a DialOption which has a channel to read the service configuration. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return func(o *dialOptions) { + o.scChan = c + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return func(o *dialOptions) { + o.copts.TransportCredentials = creds + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials and places auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.timeout = d + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's +// Temporary() method to decide if it should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + } + } +} + +// WithStatsHandler returns a DialOption that specifies the stats handler +// for all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return func(o *dialOptions) { + o.copts.StatsHandler = h + } +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. +// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network +// address and won't try to reconnect. +// The default value of FailOnNonTempDialError is false. +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return func(o *dialOptions) { + o.copts.KeepaliveParams = kp + } +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return func(o *dialOptions) { + o.unaryInt = f + } +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return func(o *dialOptions) { + o.streamInt = f + } +} + +// WithAuthority returns a DialOption that specifies the value to be used as +// the :authority pseudo-header. This value only works with WithInsecure and +// has no effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return func(o *dialOptions) { + o.copts.Authority = a + } +} + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. ctx can be used to +// cancel or expire the pending connection. Once this function returns, the +// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close +// to terminate all the pending operations after this function returns. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), + } + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt(&cc.dopts) + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + scSet = true + } + default: + } + } + // Set defaults. + if cc.dopts.codec == nil { + cc.dopts.codec = protoCodec{} + } + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { + cc.authority = cc.dopts.copts.Authority + } else { + cc.authority = target + } + + if cc.dopts.balancerBuilder != nil { + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() + } + buildOpts := balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + // Build should not take long time. So it's ok to not have a goroutine for it. + // TODO(bar) init balancer after first resolver result to support service config balancer. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) + } else { + waitC := make(chan error, 1) + go func() { + defer close(waitC) + // No balancer, or no resolver within the balancer. Connect directly. + ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) + if err != nil { + waitC <- err + return + } + if err := ac.connect(cc.dopts.block); err != nil { + waitC <- err + return + } + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + } + } + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + if cc.balancerWrapper != nil && cc.resolverWrapper == nil { + // TODO(bar) there should always be a resolver (DNS as the default). + // Unblock balancer initialization with a fake resolver update if there's no resolver. + // The balancer wrapper will not read the addresses, so an empty list works. + // TODO(bar) remove this after the real resolver is started. + cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) + } + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerWrapper *ccBalancerWrapper + resolverWrapper *ccResolverWrapper + + blockingpicker *pickerWrapper + + mu sync.RWMutex + sc ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revist this decision in the future. + cc.sc = sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + dopts: cc.dopts, + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect(block bool) error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.mu.Unlock() + + if block { + if err := ac.resetTransport(); err != nil { + if err != errConnClosing { + ac.tearDown(err) + } + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return e.Origin() + } + return err + } + // Start to monitor the error status of transport. + go ac.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() + } + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + if cc.balancerWrapper == nil { + // If balancer is nil, there should be only one addrConn available. + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + // TODO this function returns toRPCErr and non-toRPCErr. Clean up + // the errors in ClientConn. + return nil, nil, toRPCErr(ErrClientConnClosing) + } + var ac *addrConn + for ac = range cc.conns { + // Break after the first iteration to get the first addrConn. + break + } + cc.mu.RUnlock() + if ac == nil { + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) + if err != nil { + return nil, nil, err + } + return t, nil, nil + } + + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + cc.mu.Unlock() + cc.blockingpicker.close() + if cc.resolverWrapper != nil { + cc.resolverWrapper.close() + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + curAddr resolver.Address + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn + + mu sync.Mutex + state connectivity.State + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.TooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) + } +} + +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } +} + +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.transport = nil + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + for retries := 0; ; retries++ { + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout + ac.mu.Lock() + if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { + timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + } + connectTime := time.Now() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + // TODO(bar) remove condition once we always have a balancer. + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + for _, addr := range addrsIter { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + sinfo := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + } + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) + if err != nil { + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return err + } + grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + continue + } + ac.mu.Lock() + ac.printf("ready") + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + newTransport.Close() + return errConnClosing + } + ac.state = connectivity.Ready + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + t := ac.transport + ac.transport = newTransport + if t != nil { + t.Close() + } + ac.curAddr = addr + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + return nil + } + ac.mu.Lock() + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(sleepTime - time.Since(connectTime)) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return ac.ctx.Err() + } + timer.Stop() + } +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (ac *addrConn) transportMonitor() { + for { + ac.mu.Lock() + t := ac.transport + ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. + select { + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + default: + } + ac.mu.Lock() + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + } +} + +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { + for { + ac.mu.Lock() + switch { + case ac.state == connectivity.Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err + } + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == connectivity.Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == connectivity.TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable + } + } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect(false) + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + ac.mu.Lock() + ac.curAddr = resolver.Address{} + defer ac.mu.Unlock() + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + if ac.state == connectivity.Shutdown { + return + } + ac.state = connectivity.Shutdown + ac.tearDownErr = err + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + return +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} diff --git a/vendor/google.golang.org/grpc/clientconn_test.go b/vendor/google.golang.org/grpc/clientconn_test.go new file mode 100644 index 000000000..47801e962 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn_test.go @@ -0,0 +1,390 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math" + "net" + "testing" + "time" + + "golang.org/x/net/context" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/testdata" +) + +func assertState(wantState connectivity.State, cc *ClientConn) (connectivity.State, bool) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + var state connectivity.State + for state = cc.GetState(); state != wantState && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + return state, state == wantState +} + +func TestConnectivityStates(t *testing.T) { + defer leakcheck.Check(t) + servers, resolver, cleanup := startServers(t, 2, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(resolver)), WithInsecure()) + if err != nil { + t.Fatalf("Dial(\"foo.bar.com\", WithBalancer(_)) = _, %v, want _ ", err) + } + defer cc.Close() + wantState := connectivity.Ready + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + // Send an update to delete the server connection (tearDown addrConn). + update := []*naming.Update{ + { + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }, + } + resolver.w.inject(update) + wantState = connectivity.TransientFailure + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + update[0] = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + resolver.w.inject(update) + wantState = connectivity.Ready + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + +} + +func TestDialTimeout(t *testing.T) { + defer leakcheck.Check(t) + conn, err := Dial("Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) + if err == nil { + conn.Close() + } + if err != context.DeadlineExceeded { + t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, context.DeadlineExceeded) + } +} + +func TestTLSDialTimeout(t *testing.T) { + defer leakcheck.Check(t) + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds), WithTimeout(time.Millisecond), WithBlock()) + if err == nil { + conn.Close() + } + if err != context.DeadlineExceeded { + t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, context.DeadlineExceeded) + } +} + +func TestDefaultAuthority(t *testing.T) { + defer leakcheck.Check(t) + target := "Non-Existent.Server:8080" + conn, err := Dial(target, WithInsecure()) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != target { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, target) + } +} + +func TestTLSServerNameOverwrite(t *testing.T) { + defer leakcheck.Check(t) + overwriteServerName := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != overwriteServerName { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) + } +} + +func TestWithAuthority(t *testing.T) { + defer leakcheck.Check(t) + overwriteServerName := "over.write.server.name" + conn, err := Dial("Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != overwriteServerName { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) + } +} + +func TestWithAuthorityAndTLS(t *testing.T) { + defer leakcheck.Check(t) + overwriteServerName := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) + if err != nil { + t.Fatalf("Failed to create credentials %v", err) + } + conn, err := Dial("Non-Existent.Server:80", WithTransportCredentials(creds), WithAuthority("no.effect.authority")) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v, want _, ", err) + } + conn.Close() + if conn.authority != overwriteServerName { + t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) + } +} + +func TestDialContextCancel(t *testing.T) { + defer leakcheck.Check(t) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled { + t.Fatalf("DialContext(%v, _) = _, %v, want _, %v", ctx, err, context.Canceled) + } +} + +// blockingBalancer mimics the behavior of balancers whose initialization takes a long time. +// In this test, reading from blockingBalancer.Notify() blocks forever. +type blockingBalancer struct { + ch chan []Address +} + +func newBlockingBalancer() Balancer { + return &blockingBalancer{ch: make(chan []Address)} +} +func (b *blockingBalancer) Start(target string, config BalancerConfig) error { + return nil +} +func (b *blockingBalancer) Up(addr Address) func(error) { + return nil +} +func (b *blockingBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + return Address{}, nil, nil +} +func (b *blockingBalancer) Notify() <-chan []Address { + return b.ch +} +func (b *blockingBalancer) Close() error { + close(b.ch) + return nil +} + +func TestDialWithBlockingBalancer(t *testing.T) { + defer leakcheck.Check(t) + ctx, cancel := context.WithCancel(context.Background()) + dialDone := make(chan struct{}) + go func() { + DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithBalancer(newBlockingBalancer())) + close(dialDone) + }() + cancel() + <-dialDone +} + +// securePerRPCCredentials always requires transport security. +type securePerRPCCredentials struct{} + +func (c securePerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +func (c securePerRPCCredentials) RequireTransportSecurity() bool { + return true +} + +func TestCredentialsMisuse(t *testing.T) { + defer leakcheck.Check(t) + tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") + if err != nil { + t.Fatalf("Failed to create authenticator %v", err) + } + // Two conflicting credential configurations + if _, err := Dial("Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithBlock(), WithInsecure()); err != errCredentialsConflict { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict) + } + // security info on insecure connection + if _, err := Dial("Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) + } +} + +func TestWithBackoffConfigDefault(t *testing.T) { + defer leakcheck.Check(t) + testBackoffConfigSet(t, &DefaultBackoffConfig) +} + +func TestWithBackoffConfig(t *testing.T) { + defer leakcheck.Check(t) + b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2} + expected := b + setDefaults(&expected) // defaults should be set + testBackoffConfigSet(t, &expected, WithBackoffConfig(b)) +} + +func TestWithBackoffMaxDelay(t *testing.T) { + defer leakcheck.Check(t) + md := DefaultBackoffConfig.MaxDelay / 2 + expected := BackoffConfig{MaxDelay: md} + setDefaults(&expected) + testBackoffConfigSet(t, &expected, WithBackoffMaxDelay(md)) +} + +func testBackoffConfigSet(t *testing.T, expected *BackoffConfig, opts ...DialOption) { + opts = append(opts, WithInsecure()) + conn, err := Dial("foo:80", opts...) + if err != nil { + t.Fatalf("unexpected error dialing connection: %v", err) + } + + if conn.dopts.bs == nil { + t.Fatalf("backoff config not set") + } + + actual, ok := conn.dopts.bs.(BackoffConfig) + if !ok { + t.Fatalf("unexpected type of backoff config: %#v", conn.dopts.bs) + } + + if actual != *expected { + t.Fatalf("unexpected backoff config on connection: %v, want %v", actual, expected) + } + conn.Close() +} + +type testErr struct { + temp bool +} + +func (e *testErr) Error() string { + return "test error" +} + +func (e *testErr) Temporary() bool { + return e.temp +} + +var nonTemporaryError = &testErr{false} + +func nonTemporaryErrorDialer(addr string, timeout time.Duration) (net.Conn, error) { + return nil, nonTemporaryError +} + +func TestDialWithBlockErrorOnNonTemporaryErrorDialer(t *testing.T) { + defer leakcheck.Check(t) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + if _, err := DialContext(ctx, "", WithInsecure(), WithDialer(nonTemporaryErrorDialer), WithBlock(), FailOnNonTempDialError(true)); err != nonTemporaryError { + t.Fatalf("Dial(%q) = %v, want %v", "", err, nonTemporaryError) + } + + // Without FailOnNonTempDialError, gRPC will retry to connect, and dial should exit with time out error. + if _, err := DialContext(ctx, "", WithInsecure(), WithDialer(nonTemporaryErrorDialer), WithBlock()); err != context.DeadlineExceeded { + t.Fatalf("Dial(%q) = %v, want %v", "", err, context.DeadlineExceeded) + } +} + +// emptyBalancer returns an empty set of servers. +type emptyBalancer struct { + ch chan []Address +} + +func newEmptyBalancer() Balancer { + return &emptyBalancer{ch: make(chan []Address, 1)} +} +func (b *emptyBalancer) Start(_ string, _ BalancerConfig) error { + b.ch <- nil + return nil +} +func (b *emptyBalancer) Up(_ Address) func(error) { + return nil +} +func (b *emptyBalancer) Get(_ context.Context, _ BalancerGetOptions) (Address, func(), error) { + return Address{}, nil, nil +} +func (b *emptyBalancer) Notify() <-chan []Address { + return b.ch +} +func (b *emptyBalancer) Close() error { + close(b.ch) + return nil +} + +func TestNonblockingDialWithEmptyBalancer(t *testing.T) { + defer leakcheck.Check(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dialDone := make(chan error) + go func() { + dialDone <- func() error { + conn, err := DialContext(ctx, "Non-Existent.Server:80", WithInsecure(), WithBalancer(newEmptyBalancer())) + if err != nil { + return err + } + return conn.Close() + }() + }() + if err := <-dialDone; err != nil { + t.Fatalf("unexpected error dialing connection: %s", err) + } +} + +func TestClientUpdatesParamsAfterGoAway(t *testing.T) { + defer leakcheck.Check(t) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + defer lis.Close() + addr := lis.Addr().String() + s := NewServer() + go s.Serve(lis) + defer s.Stop() + cc, err := Dial(addr, WithBlock(), WithInsecure(), WithKeepaliveParams(keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 1 * time.Millisecond, + PermitWithoutStream: true, + })) + if err != nil { + t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) + } + defer cc.Close() + time.Sleep(1 * time.Second) + cc.mu.RLock() + defer cc.mu.RUnlock() + v := cc.mkp.Time + if v < 100*time.Millisecond { + t.Fatalf("cc.dopts.copts.Keepalive.Time = %v , want 100ms", v) + } +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..905b048e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. +type protoCodec struct { +} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (p protoCodec) Marshal(v interface{}) ([]byte, error) { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := p.marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (p protoCodec) Unmarshal(data []byte, v interface{}) error { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + v.(proto.Message).Reset() + err := cb.Unmarshal(v.(proto.Message)) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (protoCodec) String() string { + return "proto" +} + +var ( + protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, + } +) diff --git a/vendor/google.golang.org/grpc/codec_benchmark_test.go b/vendor/google.golang.org/grpc/codec_benchmark_test.go new file mode 100644 index 000000000..dee617ca2 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec_benchmark_test.go @@ -0,0 +1,100 @@ +// +build go1.7 + +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/test/codec_perf" +) + +func setupBenchmarkProtoCodecInputs(b *testing.B, payloadBaseSize uint32) []proto.Message { + payloadBase := make([]byte, payloadBaseSize) + // arbitrary byte slices + payloadSuffixes := [][]byte{ + []byte("one"), + []byte("two"), + []byte("three"), + []byte("four"), + []byte("five"), + } + protoStructs := make([]proto.Message, 0) + + for _, p := range payloadSuffixes { + ps := &codec_perf.Buffer{} + ps.Body = append(payloadBase, p...) + protoStructs = append(protoStructs, ps) + } + + return protoStructs +} + +// The possible use of certain protobuf APIs like the proto.Buffer API potentially involves caching +// on our side. This can add checks around memory allocations and possible contention. +// Example run: go test -v -run=^$ -bench=BenchmarkProtoCodec -benchmem +func BenchmarkProtoCodec(b *testing.B) { + // range of message sizes + payloadBaseSizes := make([]uint32, 0) + for i := uint32(0); i <= 12; i += 4 { + payloadBaseSizes = append(payloadBaseSizes, 1<= Code(len(_Code_index)-1) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..21e7733a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +//go:generate stringer -type=Code + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 +) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..568ef5dc6 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..2475fe832 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,219 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "golang.org/x/net/context" +) + +var ( + // alpnProtoStr are the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +var ( + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC + // and the caller should not close rawConn. + ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") +) + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + cfg.ServerName = addr[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_test.go b/vendor/google.golang.org/grpc/credentials/credentials_test.go new file mode 100644 index 000000000..9b13db51d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_test.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" + "net" + "testing" + + "golang.org/x/net/context" + "google.golang.org/grpc/testdata" +) + +func TestTLSOverrideServerName(t *testing.T) { + expectedServerName := "server.name" + c := NewTLS(nil) + c.OverrideServerName(expectedServerName) + if c.Info().ServerName != expectedServerName { + t.Fatalf("c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) + } +} + +func TestTLSClone(t *testing.T) { + expectedServerName := "server.name" + c := NewTLS(nil) + c.OverrideServerName(expectedServerName) + cc := c.Clone() + if cc.Info().ServerName != expectedServerName { + t.Fatalf("cc.Info().ServerName = %v, want %v", cc.Info().ServerName, expectedServerName) + } + cc.OverrideServerName("") + if c.Info().ServerName != expectedServerName { + t.Fatalf("Change in clone should not affect the original, c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) + } + +} + +type serverHandshake func(net.Conn) (AuthInfo, error) + +func TestClientHandshakeReturnsAuthInfo(t *testing.T) { + done := make(chan AuthInfo, 1) + lis := launchServer(t, tlsServerHandshake, done) + defer lis.Close() + lisAddr := lis.Addr().String() + clientAuthInfo := clientHandle(t, gRPCClientHandshake, lisAddr) + // wait until server sends serverAuthInfo or fails. + serverAuthInfo, ok := <-done + if !ok { + t.Fatalf("Error at server-side") + } + if !compare(clientAuthInfo, serverAuthInfo) { + t.Fatalf("c.ClientHandshake(_, %v, _) = %v, want %v.", lisAddr, clientAuthInfo, serverAuthInfo) + } +} + +func TestServerHandshakeReturnsAuthInfo(t *testing.T) { + done := make(chan AuthInfo, 1) + lis := launchServer(t, gRPCServerHandshake, done) + defer lis.Close() + clientAuthInfo := clientHandle(t, tlsClientHandshake, lis.Addr().String()) + // wait until server sends serverAuthInfo or fails. + serverAuthInfo, ok := <-done + if !ok { + t.Fatalf("Error at server-side") + } + if !compare(clientAuthInfo, serverAuthInfo) { + t.Fatalf("ServerHandshake(_) = %v, want %v.", serverAuthInfo, clientAuthInfo) + } +} + +func TestServerAndClientHandshake(t *testing.T) { + done := make(chan AuthInfo, 1) + lis := launchServer(t, gRPCServerHandshake, done) + defer lis.Close() + clientAuthInfo := clientHandle(t, gRPCClientHandshake, lis.Addr().String()) + // wait until server sends serverAuthInfo or fails. + serverAuthInfo, ok := <-done + if !ok { + t.Fatalf("Error at server-side") + } + if !compare(clientAuthInfo, serverAuthInfo) { + t.Fatalf("AuthInfo returned by server: %v and client: %v aren't same", serverAuthInfo, clientAuthInfo) + } +} + +func compare(a1, a2 AuthInfo) bool { + if a1.AuthType() != a2.AuthType() { + return false + } + switch a1.AuthType() { + case "tls": + state1 := a1.(TLSInfo).State + state2 := a2.(TLSInfo).State + if state1.Version == state2.Version && + state1.HandshakeComplete == state2.HandshakeComplete && + state1.CipherSuite == state2.CipherSuite && + state1.NegotiatedProtocol == state2.NegotiatedProtocol { + return true + } + return false + default: + return false + } +} + +func launchServer(t *testing.T, hs serverHandshake, done chan AuthInfo) net.Listener { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + go serverHandle(t, hs, done, lis) + return lis +} + +// Is run in a separate goroutine. +func serverHandle(t *testing.T, hs serverHandshake, done chan AuthInfo, lis net.Listener) { + serverRawConn, err := lis.Accept() + if err != nil { + t.Errorf("Server failed to accept connection: %v", err) + close(done) + return + } + serverAuthInfo, err := hs(serverRawConn) + if err != nil { + t.Errorf("Server failed while handshake. Error: %v", err) + serverRawConn.Close() + close(done) + return + } + done <- serverAuthInfo +} + +func clientHandle(t *testing.T, hs func(net.Conn, string) (AuthInfo, error), lisAddr string) AuthInfo { + conn, err := net.Dial("tcp", lisAddr) + if err != nil { + t.Fatalf("Client failed to connect to %s. Error: %v", lisAddr, err) + } + defer conn.Close() + clientAuthInfo, err := hs(conn, lisAddr) + if err != nil { + t.Fatalf("Error on client while handshake. Error: %v", err) + } + return clientAuthInfo +} + +// Server handshake implementation in gRPC. +func gRPCServerHandshake(conn net.Conn) (AuthInfo, error) { + serverTLS, err := NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) + if err != nil { + return nil, err + } + _, serverAuthInfo, err := serverTLS.ServerHandshake(conn) + if err != nil { + return nil, err + } + return serverAuthInfo, nil +} + +// Client handshake implementation in gRPC. +func gRPCClientHandshake(conn net.Conn, lisAddr string) (AuthInfo, error) { + clientTLS := NewTLS(&tls.Config{InsecureSkipVerify: true}) + _, authInfo, err := clientTLS.ClientHandshake(context.Background(), lisAddr, conn) + if err != nil { + return nil, err + } + return authInfo, nil +} + +func tlsServerHandshake(conn net.Conn) (AuthInfo, error) { + cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) + if err != nil { + return nil, err + } + serverTLSConfig := &tls.Config{Certificates: []tls.Certificate{cert}} + serverConn := tls.Server(conn, serverTLSConfig) + err = serverConn.Handshake() + if err != nil { + return nil, err + } + return TLSInfo{State: serverConn.ConnectionState()}, nil +} + +func tlsClientHandshake(conn net.Conn, _ string) (AuthInfo, error) { + clientTLSConfig := &tls.Config{InsecureSkipVerify: true} + clientConn := tls.Client(conn, clientTLSConfig) + if err := clientConn.Handshake(); err != nil { + return nil, err + } + return TLSInfo{State: clientConn.ConnectionState()}, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 000000000..60409aac0 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,60 @@ +// +build go1.7 +// +build !go1.8 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go new file mode 100644 index 000000000..93f0e1d8d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -0,0 +1,38 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 000000000..d6bbcc9fd --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,57 @@ +// +build !go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..f6d597a14 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..187adbb11 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/examples/README.md b/vendor/google.golang.org/grpc/examples/README.md new file mode 100644 index 000000000..6d9175c68 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/README.md @@ -0,0 +1,60 @@ +gRPC in 3 minutes (Go) +====================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto](helloworld/helloworld/helloworld.proto). + +PREREQUISITES +------------- + +- This requires Go 1.7 or later +- Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) + +``` +$ go help gopath +$ # ensure the PATH contains $GOPATH/bin +$ export PATH=$PATH:$GOPATH/bin +``` + +INSTALL +------- + +``` +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_client +$ go get -u google.golang.org/grpc/examples/helloworld/greeter_server +``` + +TRY IT! +------- + +- Run the server + +``` +$ greeter_server & +``` + +- Run the client + +``` +$ greeter_client +``` + +OPTIONAL - Rebuilding the generated code +---------------------------------------- + +1 First [install protoc](https://github.com/google/protobuf/blob/master/README.md) + - For now, this needs to be installed from source + - This is will change once proto3 is officially released + +2 Install the protoc Go plugin. + +``` +$ go get -a github.com/golang/protobuf/protoc-gen-go +``` + +3 Rebuild the generated Go code. + +``` +$ go generate google.golang.org/grpc/examples/helloworld/... +``` diff --git a/vendor/google.golang.org/grpc/examples/gotutorial.md b/vendor/google.golang.org/grpc/examples/gotutorial.md new file mode 100644 index 000000000..a520a5a15 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/gotutorial.md @@ -0,0 +1,431 @@ +# gRPC Basics: Go + +This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: + +- Define a service in a `.proto` file. +- Generate server and client code using the protocol buffer compiler. +- Use the Go gRPC API to write a simple client and server for your service. + +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. + +This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. + +## Why use gRPC? + +Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. + +With gRPC we can define our service once in a `.proto` file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. + +## Example code and setup + +The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: +```shell +$ go get google.golang.org/grpc +``` + +Then change your current directory to `grpc-go/examples/route_guide`: +```shell +$ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide +``` + +You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](https://github.com/grpc/grpc-go/tree/master/examples/). + + +## Defining the service + +Our first step (as you'll know from the [quick start](https://grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). You can see the complete `.proto` file in [examples/route_guide/routeguide/route_guide.proto](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/routeguide/route_guide.proto). + +To define a service, you specify a named `service` in your `.proto` file: + +```proto +service RouteGuide { + ... +} +``` + +Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: + +- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. +```proto + // Obtains the feature at a given position. + rpc GetFeature(Point) returns (Feature) {} +``` + +- A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. +```proto + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} +``` + +- A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. +```proto + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} +``` + +- A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. +```proto + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +``` + +Our `.proto` file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +```proto +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} +``` + + +## Generating client and server code + +Next we need to generate the gRPC client and server interfaces from our `.proto` service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. + +For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): + +```shell +$ codegen.sh route_guide.proto +``` + +which actually runs: + +```shell +$ protoc --go_out=plugins=grpc:. route_guide.proto +``` + +Running this command generates the following file in your current directory: +- `route_guide.pb.go` + +This contains: +- All the protocol buffer code to populate, serialize, and retrieve our request and response message types +- An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. +- An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. + + + +## Creating the server + +First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). + +There are two parts to making our `RouteGuide` service do its job: +- Implementing the service interface generated from our service definition: doing the actual "work" of our service. +- Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. + +You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. + +### Implementing RouteGuide + +As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: + +```go +type routeGuideServer struct { + ... +} +... + +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + ... +} +... + +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + ... +} +... + +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + ... +} +... + +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + ... +} +... +``` + +#### Simple RPC +`routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. + +```go +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{"", point}, nil +} +``` + +The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. + +#### Server-side streaming RPC +Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. + +```go +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} +``` + +As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. + +In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. + +#### Client-side streaming RPC +Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. + +```go +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} +``` + +In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the the error returned from `Recv()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. + +#### Bidirectional streaming RPC +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. + +```go +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + ... // look for notes to be sent to client + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} +``` + +This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. + +The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +### Starting the server + +Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: + +```go +flag.Parse() +lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) +if err != nil { + log.Fatalf("failed to listen: %v", err) +} +grpcServer := grpc.NewServer() +pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) +... // determine whether to use TLS +grpcServer.Serve(lis) +``` +To build and start a server, we: + +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))`. +2. Create an instance of the gRPC server using `grpc.NewServer()`. +3. Register our service implementation with the gRPC server. +4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. + + +## Creating the client + +In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). + +### Creating a stub + +To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: + +```go +conn, err := grpc.Dial(*serverAddr) +if err != nil { + ... +} +defer conn.Close() +``` + +You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. + +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our `.proto` file. + +```go +client := pb.NewRouteGuideClient(conn) +``` + +### Calling service methods + +Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. + +#### Simple RPC + +Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. + +```go +feature, err := client.GetFeature(context.Background(), &pb.Point{409146138, -746188906}) +if err != nil { + ... +} +``` + +As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. + +```go +log.Println(feature) +``` + +#### Server-side streaming RPC + +Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. + +```go +rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle +stream, err := client.ListFeatures(context.Background(), rect) +if err != nil { + ... +} +for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) +} +``` + +As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. + +We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. + +#### Client-side streaming RPC + +The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. + +```go +// Create a random number of random points +r := rand.New(rand.NewSource(time.Now().UnixNano())) +pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points +var points []*pb.Point +for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) +} +log.Printf("Traversing %d points.", len(points)) +stream, err := client.RecordRoute(context.Background()) +if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) +} +for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } +} +reply, err := stream.CloseAndRecv() +if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) +} +log.Printf("Route summary: %v", reply) +``` + +The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. + +#### Bidirectional streaming RPC + +Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. + +```go +stream, err := client.RouteChat(context.Background()) +waitc := make(chan struct{}) +go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } +}() +for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } +} +stream.CloseSend() +<-waitc +``` + +The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. + +## Try it out! + +To compile and run the server, assuming you are in the folder +`$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go new file mode 100644 index 000000000..3dc426e24 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "log" + "os" + + "golang.org/x/net/context" + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" +) + +const ( + address = "localhost:50051" + defaultName = "world" +) + +func main() { + // Set up a connection to the server. + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + name := defaultName + if len(os.Args) > 1 { + name = os.Args[1] + } + r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.Message) +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go new file mode 100644 index 000000000..702a3b617 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc -I ../helloworld --go_out=plugins=grpc:../helloworld ../helloworld/helloworld.proto + +package main + +import ( + "log" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/reflection" +) + +const ( + port = ":50051" +) + +// server is used to implement helloworld.GreeterServer. +type server struct{} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + lis, err := net.Listen("tcp", port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + // Register reflection service on gRPC server. + reflection.Register(s) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go new file mode 100644 index 000000000..64bd1ef3c --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: helloworld.proto + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HelloRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} +func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HelloReply) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") + proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "helloworld.proto", +} + +func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, + 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, + 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, + 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, + 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, + 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, + 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, + 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, + 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, + 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, + 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto new file mode 100644 index 000000000..d79a6a0d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -0,0 +1,37 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go new file mode 100644 index 000000000..14957ed5f --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go @@ -0,0 +1,48 @@ +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/grpc/examples/helloworld/helloworld (interfaces: GreeterClient) + +package mock_helloworld + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + helloworld "google.golang.org/grpc/examples/helloworld/helloworld" +) + +// Mock of GreeterClient interface +type MockGreeterClient struct { + ctrl *gomock.Controller + recorder *_MockGreeterClientRecorder +} + +// Recorder for MockGreeterClient (not exported) +type _MockGreeterClientRecorder struct { + mock *MockGreeterClient +} + +func NewMockGreeterClient(ctrl *gomock.Controller) *MockGreeterClient { + mock := &MockGreeterClient{ctrl: ctrl} + mock.recorder = &_MockGreeterClientRecorder{mock} + return mock +} + +func (_m *MockGreeterClient) EXPECT() *_MockGreeterClientRecorder { + return _m.recorder +} + +func (_m *MockGreeterClient) SayHello(_param0 context.Context, _param1 *helloworld.HelloRequest, _param2 ...grpc.CallOption) (*helloworld.HelloReply, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "SayHello", _s...) + ret0, _ := ret[0].(*helloworld.HelloReply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockGreeterClientRecorder) SayHello(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "SayHello", _s...) +} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go new file mode 100644 index 000000000..5ad9b8b0d --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mock_helloworld_test + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + helloworld "google.golang.org/grpc/examples/helloworld/helloworld" + hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" +) + +// rpcMsg implements the gomock.Matcher interface +type rpcMsg struct { + msg proto.Message +} + +func (r *rpcMsg) Matches(msg interface{}) bool { + m, ok := msg.(proto.Message) + if !ok { + return false + } + return proto.Equal(m, r.msg) +} + +func (r *rpcMsg) String() string { + return fmt.Sprintf("is %s", r.msg) +} + +func TestSayHello(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) + req := &helloworld.HelloRequest{Name: "unit_test"} + mockGreeterClient.EXPECT().SayHello( + gomock.Any(), + &rpcMsg{msg: req}, + ).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) + testSayHello(t, mockGreeterClient) +} + +func testSayHello(t *testing.T, client helloworld.GreeterClient) { + r, err := client.SayHello(context.Background(), &helloworld.HelloRequest{Name: "unit_test"}) + if err != nil || r.Message != "Mocked Interface" { + t.Errorf("mocking failed") + } + t.Log("Reply : ", r.Message) +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/README.md b/vendor/google.golang.org/grpc/examples/route_guide/README.md new file mode 100644 index 000000000..7f009d5ca --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/README.md @@ -0,0 +1,35 @@ +# Description +The route guide server and client demonstrate how to use grpc go libraries to +perform unary, client streaming, server streaming and full duplex RPCs. + +Please refer to [gRPC Basics: Go] (https://grpc.io/docs/tutorials/basic/go.html) for more information. + +See the definition of the route guide service in routeguide/route_guide.proto. + +# Run the sample code +To compile and run the server, assuming you are in the root of the route_guide +folder, i.e., .../examples/route_guide/, simply: + +```sh +$ go run server/server.go +``` + +Likewise, to run the client: + +```sh +$ go run client/client.go +``` + +# Optional command line flags +The server and client both take optional command line flags. For example, the +client and server run without TLS by default. To enable TLS: + +```sh +$ go run server/server.go -tls=true +``` + +and + +```sh +$ go run client/client.go -tls=true +``` diff --git a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go new file mode 100644 index 000000000..6fc7a079c --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It interacts with the route guide service whose definition can be found in routeguide/route_guide.proto. +package main + +import ( + "flag" + "io" + "log" + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + pb "google.golang.org/grpc/examples/route_guide/routeguide" + "google.golang.org/grpc/testdata" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") + serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") + serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") +) + +// printFeature gets the feature for the given point. +func printFeature(client pb.RouteGuideClient, point *pb.Point) { + log.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) + feature, err := client.GetFeature(context.Background(), point) + if err != nil { + log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + } + log.Println(feature) +} + +// printFeatures lists all the features within the given bounding Rectangle. +func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { + log.Printf("Looking for features within %v", rect) + stream, err := client.ListFeatures(context.Background(), rect) + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + for { + feature, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + } + log.Println(feature) + } +} + +// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. +func runRecordRoute(client pb.RouteGuideClient) { + // Create a random number of random points + r := rand.New(rand.NewSource(time.Now().UnixNano())) + pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points + var points []*pb.Point + for i := 0; i < pointCount; i++ { + points = append(points, randomPoint(r)) + } + log.Printf("Traversing %d points.", len(points)) + stream, err := client.RecordRoute(context.Background()) + if err != nil { + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + } + for _, point := range points { + if err := stream.Send(point); err != nil { + log.Fatalf("%v.Send(%v) = %v", stream, point, err) + } + } + reply, err := stream.CloseAndRecv() + if err != nil { + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + log.Printf("Route summary: %v", reply) +} + +// runRouteChat receives a sequence of route notes, while sending notes for various locations. +func runRouteChat(client pb.RouteGuideClient) { + notes := []*pb.RouteNote{ + {&pb.Point{Latitude: 0, Longitude: 1}, "First message"}, + {&pb.Point{Latitude: 0, Longitude: 2}, "Second message"}, + {&pb.Point{Latitude: 0, Longitude: 3}, "Third message"}, + {&pb.Point{Latitude: 0, Longitude: 1}, "Fourth message"}, + {&pb.Point{Latitude: 0, Longitude: 2}, "Fifth message"}, + {&pb.Point{Latitude: 0, Longitude: 3}, "Sixth message"}, + } + stream, err := client.RouteChat(context.Background()) + if err != nil { + log.Fatalf("%v.RouteChat(_) = _, %v", client, err) + } + waitc := make(chan struct{}) + go func() { + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + log.Fatalf("Failed to receive a note : %v", err) + } + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + } + }() + for _, note := range notes { + if err := stream.Send(note); err != nil { + log.Fatalf("Failed to send a note: %v", err) + } + } + stream.CloseSend() + <-waitc +} + +func randomPoint(r *rand.Rand) *pb.Point { + lat := (r.Int31n(180) - 90) * 1e7 + long := (r.Int31n(360) - 180) * 1e7 + return &pb.Point{Latitude: lat, Longitude: long} +} + +func main() { + flag.Parse() + var opts []grpc.DialOption + if *tls { + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) + if err != nil { + log.Fatalf("Failed to create TLS credentials %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(*serverAddr, opts...) + if err != nil { + log.Fatalf("fail to dial: %v", err) + } + defer conn.Close() + client := pb.NewRouteGuideClient(conn) + + // Looking for a valid feature + printFeature(client, &pb.Point{Latitude: 409146138, Longitude: -746188906}) + + // Feature missing. + printFeature(client, &pb.Point{Latitude: 0, Longitude: 0}) + + // Looking for features between 40, -75 and 42, -73. + printFeatures(client, &pb.Rectangle{ + Lo: &pb.Point{Latitude: 400000000, Longitude: -750000000}, + Hi: &pb.Point{Latitude: 420000000, Longitude: -730000000}, + }) + + // RecordRoute + runRecordRoute(client) + + // RouteChat + runRouteChat(client) +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go new file mode 100644 index 000000000..328c929fa --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go @@ -0,0 +1,200 @@ +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/grpc/examples/route_guide/routeguide (interfaces: RouteGuideClient,RouteGuide_RouteChatClient) + +package mock_routeguide + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + routeguide "google.golang.org/grpc/examples/route_guide/routeguide" + metadata "google.golang.org/grpc/metadata" +) + +// Mock of RouteGuideClient interface +type MockRouteGuideClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuideClientRecorder +} + +// Recorder for MockRouteGuideClient (not exported) +type _MockRouteGuideClientRecorder struct { + mock *MockRouteGuideClient +} + +func NewMockRouteGuideClient(ctrl *gomock.Controller) *MockRouteGuideClient { + mock := &MockRouteGuideClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuideClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuideClient) EXPECT() *_MockRouteGuideClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuideClient) GetFeature(_param0 context.Context, _param1 *routeguide.Point, _param2 ...grpc.CallOption) (*routeguide.Feature, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "GetFeature", _s...) + ret0, _ := ret[0].(*routeguide.Feature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) GetFeature(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "GetFeature", _s...) +} + +func (_m *MockRouteGuideClient) ListFeatures(_param0 context.Context, _param1 *routeguide.Rectangle, _param2 ...grpc.CallOption) (routeguide.RouteGuide_ListFeaturesClient, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "ListFeatures", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_ListFeaturesClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) ListFeatures(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "ListFeatures", _s...) +} + +func (_m *MockRouteGuideClient) RecordRoute(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RecordRouteClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RecordRoute", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RecordRouteClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RecordRoute(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecordRoute", _s...) +} + +func (_m *MockRouteGuideClient) RouteChat(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RouteChatClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RouteChat", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RouteChatClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RouteChat(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RouteChat", _s...) +} + +// Mock of RouteGuide_RouteChatClient interface +type MockRouteGuide_RouteChatClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuide_RouteChatClientRecorder +} + +// Recorder for MockRouteGuide_RouteChatClient (not exported) +type _MockRouteGuide_RouteChatClientRecorder struct { + mock *MockRouteGuide_RouteChatClient +} + +func NewMockRouteGuide_RouteChatClient(ctrl *gomock.Controller) *MockRouteGuide_RouteChatClient { + mock := &MockRouteGuide_RouteChatClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuide_RouteChatClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuide_RouteChatClient) EXPECT() *_MockRouteGuide_RouteChatClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuide_RouteChatClient) CloseSend() error { + ret := _m.ctrl.Call(_m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) CloseSend() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CloseSend") +} + +func (_m *MockRouteGuide_RouteChatClient) Context() context.Context { + ret := _m.ctrl.Call(_m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Context() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Context") +} + +func (_m *MockRouteGuide_RouteChatClient) Header() (metadata.MD, error) { + ret := _m.ctrl.Call(_m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Header() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Header") +} + +func (_m *MockRouteGuide_RouteChatClient) Recv() (*routeguide.RouteNote, error) { + ret := _m.ctrl.Call(_m, "Recv") + ret0, _ := ret[0].(*routeguide.RouteNote) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Recv() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Recv") +} + +func (_m *MockRouteGuide_RouteChatClient) RecvMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "RecvMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecvMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Send(_param0 *routeguide.RouteNote) error { + ret := _m.ctrl.Call(_m, "Send", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Send(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Send", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) SendMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "SendMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) SendMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SendMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Trailer() metadata.MD { + ret := _m.ctrl.Call(_m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Trailer() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Trailer") +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go new file mode 100644 index 000000000..8c6d63c72 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mock_routeguide_test + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" + rgpb "google.golang.org/grpc/examples/route_guide/routeguide" +) + +var ( + msg = &rgpb.RouteNote{ + Location: &rgpb.Point{Latitude: 17, Longitude: 29}, + Message: "Taxi-cab", + } +) + +func TestRouteChat(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create mock for the stream returned by RouteChat + stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) + // set expectation on sending. + stream.EXPECT().Send( + gomock.Any(), + ).Return(nil) + // Set expectation on receiving. + stream.EXPECT().Recv().Return(msg, nil) + stream.EXPECT().CloseSend().Return(nil) + // Create mock for the client interface. + rgclient := rgmock.NewMockRouteGuideClient(ctrl) + // Set expectation on RouteChat + rgclient.EXPECT().RouteChat( + gomock.Any(), + ).Return(stream, nil) + if err := testRouteChat(rgclient); err != nil { + t.Fatalf("Test failed: %v", err) + } +} + +func testRouteChat(client rgpb.RouteGuideClient) error { + stream, err := client.RouteChat(context.Background()) + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + if err := stream.CloseSend(); err != nil { + return err + } + got, err := stream.Recv() + if err != nil { + return err + } + if !proto.Equal(got, msg) { + return fmt.Errorf("stream.Recv() = %v, want %v", got, msg) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go new file mode 100644 index 000000000..cf7f3937e --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -0,0 +1,543 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: route_guide.proto + +/* +Package routeguide is a generated protocol buffer package. + +It is generated from these files: + route_guide.proto + +It has these top-level messages: + Point + Rectangle + Feature + RouteNote + RouteSummary +*/ +package routeguide + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +type Point struct { + Latitude int32 `protobuf:"varint,1,opt,name=latitude" json:"latitude,omitempty"` + Longitude int32 `protobuf:"varint,2,opt,name=longitude" json:"longitude,omitempty"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Point) GetLatitude() int32 { + if m != nil { + return m.Latitude + } + return 0 +} + +func (m *Point) GetLongitude() int32 { + if m != nil { + return m.Longitude + } + return 0 +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +type Rectangle struct { + // One corner of the rectangle. + Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` + // The other corner of the rectangle. + Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` +} + +func (m *Rectangle) Reset() { *m = Rectangle{} } +func (m *Rectangle) String() string { return proto.CompactTextString(m) } +func (*Rectangle) ProtoMessage() {} +func (*Rectangle) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Rectangle) GetLo() *Point { + if m != nil { + return m.Lo + } + return nil +} + +func (m *Rectangle) GetHi() *Point { + if m != nil { + return m.Hi + } + return nil +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +type Feature struct { + // The name of the feature. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The point where the feature is detected. + Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Feature) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Feature) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +// A RouteNote is a message sent while at a given point. +type RouteNote struct { + // The location from which the message is sent. + Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // The message to be sent. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *RouteNote) Reset() { *m = RouteNote{} } +func (m *RouteNote) String() string { return proto.CompactTextString(m) } +func (*RouteNote) ProtoMessage() {} +func (*RouteNote) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *RouteNote) GetLocation() *Point { + if m != nil { + return m.Location + } + return nil +} + +func (m *RouteNote) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +type RouteSummary struct { + // The number of points received. + PointCount int32 `protobuf:"varint,1,opt,name=point_count,json=pointCount" json:"point_count,omitempty"` + // The number of known features passed while traversing the route. + FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count,json=featureCount" json:"feature_count,omitempty"` + // The distance covered in metres. + Distance int32 `protobuf:"varint,3,opt,name=distance" json:"distance,omitempty"` + // The duration of the traversal in seconds. + ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time,json=elapsedTime" json:"elapsed_time,omitempty"` +} + +func (m *RouteSummary) Reset() { *m = RouteSummary{} } +func (m *RouteSummary) String() string { return proto.CompactTextString(m) } +func (*RouteSummary) ProtoMessage() {} +func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *RouteSummary) GetPointCount() int32 { + if m != nil { + return m.PointCount + } + return 0 +} + +func (m *RouteSummary) GetFeatureCount() int32 { + if m != nil { + return m.FeatureCount + } + return 0 +} + +func (m *RouteSummary) GetDistance() int32 { + if m != nil { + return m.Distance + } + return 0 +} + +func (m *RouteSummary) GetElapsedTime() int32 { + if m != nil { + return m.ElapsedTime + } + return 0 +} + +func init() { + proto.RegisterType((*Point)(nil), "routeguide.Point") + proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") + proto.RegisterType((*Feature)(nil), "routeguide.Feature") + proto.RegisterType((*RouteNote)(nil), "routeguide.RouteNote") + proto.RegisterType((*RouteSummary)(nil), "routeguide.RouteSummary") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for RouteGuide service + +type RouteGuideClient interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) +} + +type routeGuideClient struct { + cc *grpc.ClientConn +} + +func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { + return &routeGuideClient{cc} +} + +func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { + out := new(Feature) + err := grpc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/routeguide.RouteGuide/ListFeatures", opts...) + if err != nil { + return nil, err + } + x := &routeGuideListFeaturesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RouteGuide_ListFeaturesClient interface { + Recv() (*Feature, error) + grpc.ClientStream +} + +type routeGuideListFeaturesClient struct { + grpc.ClientStream +} + +func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { + m := new(Feature) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/routeguide.RouteGuide/RecordRoute", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRecordRouteClient{stream} + return x, nil +} + +type RouteGuide_RecordRouteClient interface { + Send(*Point) error + CloseAndRecv() (*RouteSummary, error) + grpc.ClientStream +} + +type routeGuideRecordRouteClient struct { + grpc.ClientStream +} + +func (x *routeGuideRecordRouteClient) Send(m *Point) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(RouteSummary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/routeguide.RouteGuide/RouteChat", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRouteChatClient{stream} + return x, nil +} + +type RouteGuide_RouteChatClient interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ClientStream +} + +type routeGuideRouteChatClient struct { + grpc.ClientStream +} + +func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for RouteGuide service + +type RouteGuideServer interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(context.Context, *Point) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(RouteGuide_RecordRouteServer) error + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(RouteGuide_RouteChatServer) error +} + +func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { + s.RegisterService(&_RouteGuide_serviceDesc, srv) +} + +func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Point) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteGuideServer).GetFeature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/routeguide.RouteGuide/GetFeature", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Rectangle) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) +} + +type RouteGuide_ListFeaturesServer interface { + Send(*Feature) error + grpc.ServerStream +} + +type routeGuideListFeaturesServer struct { + grpc.ServerStream +} + +func (x *routeGuideListFeaturesServer) Send(m *Feature) error { + return x.ServerStream.SendMsg(m) +} + +func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) +} + +type RouteGuide_RecordRouteServer interface { + SendAndClose(*RouteSummary) error + Recv() (*Point, error) + grpc.ServerStream +} + +type routeGuideRecordRouteServer struct { + grpc.ServerStream +} + +func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { + m := new(Point) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) +} + +type RouteGuide_RouteChatServer interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ServerStream +} + +type routeGuideRouteChatServer struct { + grpc.ServerStream +} + +func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _RouteGuide_serviceDesc = grpc.ServiceDesc{ + ServiceName: "routeguide.RouteGuide", + HandlerType: (*RouteGuideServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetFeature", + Handler: _RouteGuide_GetFeature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListFeatures", + Handler: _RouteGuide_ListFeatures_Handler, + ServerStreams: true, + }, + { + StreamName: "RecordRoute", + Handler: _RouteGuide_RecordRoute_Handler, + ClientStreams: true, + }, + { + StreamName: "RouteChat", + Handler: _RouteGuide_RouteChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "route_guide.proto", +} + +func init() { proto.RegisterFile("route_guide.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdd, 0xca, 0xd3, 0x40, + 0x10, 0xfd, 0x36, 0x7e, 0x9f, 0x6d, 0x26, 0x11, 0xe9, 0x88, 0x10, 0xa2, 0xa0, 0x8d, 0x37, 0xbd, + 0x31, 0x94, 0x0a, 0x5e, 0x56, 0x6c, 0xc1, 0xde, 0x14, 0xa9, 0xb1, 0xf7, 0x65, 0x4d, 0xc6, 0x74, + 0x61, 0x93, 0x0d, 0xc9, 0x06, 0xf4, 0x01, 0x7c, 0x02, 0x5f, 0x58, 0xb2, 0x49, 0xda, 0x54, 0x5b, + 0xbc, 0xdb, 0x39, 0x73, 0xce, 0xfc, 0x9c, 0x61, 0x61, 0x52, 0xaa, 0x5a, 0xd3, 0x21, 0xad, 0x45, + 0x42, 0x61, 0x51, 0x2a, 0xad, 0x10, 0x0c, 0x64, 0x90, 0xe0, 0x23, 0x3c, 0xec, 0x94, 0xc8, 0x35, + 0xfa, 0x30, 0x96, 0x5c, 0x0b, 0x5d, 0x27, 0xe4, 0xb1, 0xd7, 0x6c, 0xf6, 0x10, 0x9d, 0x62, 0x7c, + 0x09, 0xb6, 0x54, 0x79, 0xda, 0x26, 0x2d, 0x93, 0x3c, 0x03, 0xc1, 0x17, 0xb0, 0x23, 0x8a, 0x35, + 0xcf, 0x53, 0x49, 0x38, 0x05, 0x4b, 0x2a, 0x53, 0xc0, 0x59, 0x4c, 0xc2, 0x73, 0xa3, 0xd0, 0x74, + 0x89, 0x2c, 0xa9, 0x1a, 0xca, 0x51, 0x98, 0x32, 0xd7, 0x29, 0x47, 0x11, 0x6c, 0x61, 0xf4, 0x89, + 0xb8, 0xae, 0x4b, 0x42, 0x84, 0xfb, 0x9c, 0x67, 0xed, 0x4c, 0x76, 0x64, 0xde, 0xf8, 0x16, 0xc6, + 0x52, 0xc5, 0x5c, 0x0b, 0x95, 0xdf, 0xae, 0x73, 0xa2, 0x04, 0x7b, 0xb0, 0xa3, 0x26, 0xfb, 0x59, + 0xe9, 0x4b, 0x2d, 0xfb, 0xaf, 0x16, 0x3d, 0x18, 0x65, 0x54, 0x55, 0x3c, 0x6d, 0x17, 0xb7, 0xa3, + 0x3e, 0x0c, 0x7e, 0x33, 0x70, 0x4d, 0xd9, 0xaf, 0x75, 0x96, 0xf1, 0xf2, 0x27, 0xbe, 0x02, 0xa7, + 0x68, 0xd4, 0x87, 0x58, 0xd5, 0xb9, 0xee, 0x4c, 0x04, 0x03, 0xad, 0x1b, 0x04, 0xdf, 0xc0, 0x93, + 0xef, 0xed, 0x56, 0x1d, 0xa5, 0xb5, 0xd2, 0xed, 0xc0, 0x96, 0xe4, 0xc3, 0x38, 0x11, 0x95, 0xe6, + 0x79, 0x4c, 0xde, 0xa3, 0xf6, 0x0e, 0x7d, 0x8c, 0x53, 0x70, 0x49, 0xf2, 0xa2, 0xa2, 0xe4, 0xa0, + 0x45, 0x46, 0xde, 0xbd, 0xc9, 0x3b, 0x1d, 0xb6, 0x17, 0x19, 0x2d, 0x7e, 0x59, 0x00, 0x66, 0xaa, + 0x4d, 0xb3, 0x0e, 0xbe, 0x07, 0xd8, 0x90, 0xee, 0xbd, 0xfc, 0x77, 0x53, 0xff, 0xd9, 0x10, 0xea, + 0x78, 0xc1, 0x1d, 0x2e, 0xc1, 0xdd, 0x8a, 0xaa, 0x17, 0x56, 0xf8, 0x7c, 0x48, 0x3b, 0x5d, 0xfb, + 0x86, 0x7a, 0xce, 0x70, 0x09, 0x4e, 0x44, 0xb1, 0x2a, 0x13, 0x33, 0xcb, 0xb5, 0xc6, 0xde, 0x45, + 0xc5, 0x81, 0x8f, 0xc1, 0xdd, 0x8c, 0xe1, 0x87, 0xee, 0x64, 0xeb, 0x23, 0xd7, 0x7f, 0x35, 0xef, + 0x2f, 0xe9, 0x5f, 0x87, 0x1b, 0xf9, 0x9c, 0xad, 0xe6, 0xf0, 0x42, 0xa8, 0x30, 0x2d, 0x8b, 0x38, + 0xa4, 0x1f, 0x3c, 0x2b, 0x24, 0x55, 0x03, 0xfa, 0xea, 0xe9, 0xd9, 0xa3, 0x5d, 0xf3, 0x27, 0x76, + 0xec, 0xdb, 0x63, 0xf3, 0x39, 0xde, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, + 0x31, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto new file mode 100644 index 000000000..fe21e437a --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -0,0 +1,110 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.routeguide"; +option java_outer_classname = "RouteGuideProto"; + +package routeguide; + +// Interface exported by the server. +service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; +} + +// A RouteNote is a message sent while at a given point. +message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go new file mode 100644 index 000000000..5d919047e --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc -I ../routeguide --go_out=plugins=grpc:../routeguide ../routeguide/route_guide.proto + +// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries +// to perform unary, client streaming, server streaming and full duplex RPCs. +// +// It implements the route guide service whose definition can be found in routeguide/route_guide.proto. +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/testdata" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/grpc/examples/route_guide/routeguide" +) + +var ( + tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("cert_file", "", "The TLS cert file") + keyFile = flag.String("key_file", "", "The TLS key file") + jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") + port = flag.Int("port", 10000, "The server port") +) + +type routeGuideServer struct { + savedFeatures []*pb.Feature + routeNotes map[string][]*pb.RouteNote +} + +// GetFeature returns the feature at the given point. +func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + return feature, nil + } + } + // No feature was found, return an unnamed feature + return &pb.Feature{Location: point}, nil +} + +// ListFeatures lists all features contained within the given bounding Rectangle. +func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { + for _, feature := range s.savedFeatures { + if inRange(feature.Location, rect) { + if err := stream.Send(feature); err != nil { + return err + } + } + } + return nil +} + +// RecordRoute records a route composited of a sequence of points. +// +// It gets a stream of points, and responds with statistics about the "trip": +// number of points, number of known features visited, total distance traveled, and +// total time spent. +func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { + var pointCount, featureCount, distance int32 + var lastPoint *pb.Point + startTime := time.Now() + for { + point, err := stream.Recv() + if err == io.EOF { + endTime := time.Now() + return stream.SendAndClose(&pb.RouteSummary{ + PointCount: pointCount, + FeatureCount: featureCount, + Distance: distance, + ElapsedTime: int32(endTime.Sub(startTime).Seconds()), + }) + } + if err != nil { + return err + } + pointCount++ + for _, feature := range s.savedFeatures { + if proto.Equal(feature.Location, point) { + featureCount++ + } + } + if lastPoint != nil { + distance += calcDistance(lastPoint, point) + } + lastPoint = point + } +} + +// RouteChat receives a stream of message/location pairs, and responds with a stream of all +// previous messages at each of those locations. +func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + key := serialize(in.Location) + if _, present := s.routeNotes[key]; !present { + s.routeNotes[key] = []*pb.RouteNote{in} + } else { + s.routeNotes[key] = append(s.routeNotes[key], in) + } + for _, note := range s.routeNotes[key] { + if err := stream.Send(note); err != nil { + return err + } + } + } +} + +// loadFeatures loads features from a JSON file. +func (s *routeGuideServer) loadFeatures(filePath string) { + file, err := ioutil.ReadFile(filePath) + if err != nil { + log.Fatalf("Failed to load default features: %v", err) + } + if err := json.Unmarshal(file, &s.savedFeatures); err != nil { + log.Fatalf("Failed to load default features: %v", err) + } +} + +func toRadians(num float64) float64 { + return num * math.Pi / float64(180) +} + +// calcDistance calculates the distance between two points using the "haversine" formula. +// This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. +func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { + const CordFactor float64 = 1e7 + const R float64 = float64(6371000) // metres + lat1 := float64(p1.Latitude) / CordFactor + lat2 := float64(p2.Latitude) / CordFactor + lng1 := float64(p1.Longitude) / CordFactor + lng2 := float64(p2.Longitude) / CordFactor + φ1 := toRadians(lat1) + φ2 := toRadians(lat2) + Δφ := toRadians(lat2 - lat1) + Δλ := toRadians(lng2 - lng1) + + a := math.Sin(Δφ/2)*math.Sin(Δφ/2) + + math.Cos(φ1)*math.Cos(φ2)* + math.Sin(Δλ/2)*math.Sin(Δλ/2) + c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) + + distance := R * c + return int32(distance) +} + +func inRange(point *pb.Point, rect *pb.Rectangle) bool { + left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) + top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) + + if float64(point.Longitude) >= left && + float64(point.Longitude) <= right && + float64(point.Latitude) >= bottom && + float64(point.Latitude) <= top { + return true + } + return false +} + +func serialize(point *pb.Point) string { + return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) +} + +func newServer() *routeGuideServer { + s := new(routeGuideServer) + s.loadFeatures(*jsonDBFile) + s.routeNotes = make(map[string][]*pb.RouteNote) + return s +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *tls { + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + log.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + grpcServer := grpc.NewServer(opts...) + pb.RegisterRouteGuideServer(grpcServer, newServer()) + grpcServer.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json b/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json new file mode 100644 index 000000000..9d6a980ab --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/testdata/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go new file mode 100644 index 000000000..db56ff362 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -0,0 +1,704 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math/rand" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/naming" +) + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { + desc := &StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { + m := new(lbmpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// NewGRPCLBBalancer creates a grpclb load balancer. +func NewGRPCLBBalancer(r naming.Resolver) Balancer { + return &grpclbBalancer{ + r: r, + } +} + +type remoteBalancerInfo struct { + addr string + // the server name used for authentication with the remote LB server. + name string +} + +// grpclbAddrInfo consists of the information of a backend server. +type grpclbAddrInfo struct { + addr Address + connected bool + // dropForRateLimiting indicates whether this particular request should be + // dropped by the client for rate limiting. + dropForRateLimiting bool + // dropForLoadBalancing indicates whether this particular request should be + // dropped by the client for load balancing. + dropForLoadBalancing bool +} + +type grpclbBalancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + rand *rand.Rand + + clientStats lbmpb.ClientStats +} + +func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { + updates, err := w.Next() + if err != nil { + grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) + return err + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return ErrClientConnClosing + } + for _, update := range updates { + switch update.Op { + case naming.Add: + var exist bool + for _, v := range b.rbs { + // TODO: Is the same addr with different server name a different balancer? + if update.Addr == v.addr { + exist = true + break + } + } + if exist { + continue + } + md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) + if !ok { + // TODO: Revisit the handling here and may introduce some fallback mechanism. + grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) + continue + } + switch md.AddrType { + case naming.Backend: + // TODO: Revisit the handling here and may introduce some fallback mechanism. + grpclog.Errorf("The name resolution does not give grpclb addresses") + continue + case naming.GRPCLB: + b.rbs = append(b.rbs, remoteBalancerInfo{ + addr: update.Addr, + name: md.ServerName, + }) + default: + grpclog.Errorf("Received unknow address type %d", md.AddrType) + continue + } + case naming.Delete: + for i, v := range b.rbs { + if update.Addr == v.addr { + copy(b.rbs[i:], b.rbs[i+1:]) + b.rbs = b.rbs[:len(b.rbs)-1] + break + } + } + default: + grpclog.Errorf("Unknown update.Op %v", update.Op) + } + } + // TODO: Fall back to the basic round-robin load balancing if the resulting address is + // not a load balancer. + select { + case <-ch: + default: + } + ch <- b.rbs + return nil +} + +func convertDuration(d *lbmpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { + if l == nil { + return + } + servers := l.GetServers() + var ( + sl []*grpclbAddrInfo + addrs []Address + ) + for _, s := range servers { + md := metadata.Pairs("lb-token", s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + sl = append(sl, &grpclbAddrInfo{ + addr: addr, + dropForRateLimiting: s.DropForRateLimiting, + dropForLoadBalancing: s.DropForLoadBalancing, + }) + addrs = append(addrs, addr) + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done || seq < b.seq { + return + } + if len(sl) > 0 { + // reset b.next to 0 when replacing the server list. + b.next = 0 + b.addrs = sl + b.addrCh <- addrs + } + return +} + +func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-done: + return + } + b.mu.Lock() + stats := b.clientStats + b.clientStats = lbmpb.ClientStats{} // Clear the stats. + b.mu.Unlock() + t := time.Now() + stats.Timestamp = &lbmpb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ + ClientStats: &stats, + }, + }); err != nil { + grpclog.Errorf("grpclb: failed to send load report: %v", err) + return + } + } +} + +func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbc.BalanceLoad(ctx) + if err != nil { + grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + return + } + b.mu.Lock() + if b.done { + b.mu.Unlock() + return + } + b.mu.Unlock() + initReq := &lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbmpb.InitialLoadBalanceRequest{ + Name: b.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + grpclog.Errorf("grpclb: failed to send init request: %v", err) + // TODO: backoff on retry? + return true + } + reply, err := stream.Recv() + if err != nil { + grpclog.Errorf("grpclb: failed to recv init response: %v", err) + // TODO: backoff on retry? + return true + } + initResp := reply.GetInitialResponse() + if initResp == nil { + grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") + return + } + // TODO: Support delegation. + if initResp.LoadBalancerDelegate != "" { + // delegation + grpclog.Errorf("TODO: Delegation is not supported yet.") + return + } + streamDone := make(chan struct{}) + defer close(streamDone) + b.mu.Lock() + b.clientStats = lbmpb.ClientStats{} // Clear client stats. + b.mu.Unlock() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + go b.sendLoadReport(stream, d, streamDone) + } + // Retrieve the server list. + for { + reply, err := stream.Recv() + if err != nil { + grpclog.Errorf("grpclb: failed to recv server list: %v", err) + break + } + b.mu.Lock() + if b.done || seq < b.seq { + b.mu.Unlock() + return + } + b.seq++ // tick when receiving a new list of servers. + seq = b.seq + b.mu.Unlock() + if serverList := reply.GetServerList(); serverList != nil { + b.processServerList(serverList, seq) + } + } + return true +} + +func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { + b.rand = rand.New(rand.NewSource(time.Now().Unix())) + // TODO: Fall back to the basic direct connection if there is no name resolver. + if b.r == nil { + return errors.New("there is no name resolver installed") + } + b.target = target + b.mu.Lock() + if b.done { + b.mu.Unlock() + return ErrClientConnClosing + } + b.addrCh = make(chan []Address) + w, err := b.r.Resolve(target) + if err != nil { + b.mu.Unlock() + grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) + return err + } + b.w = w + b.mu.Unlock() + balancerAddrsCh := make(chan []remoteBalancerInfo, 1) + // Spawn a goroutine to monitor the name resolution of remote load balancer. + go func() { + for { + if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { + grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) + close(balancerAddrsCh) + return + } + } + }() + // Spawn a goroutine to talk to the remote load balancer. + go func() { + var ( + cc *ClientConn + // ccError is closed when there is an error in the current cc. + // A new rb should be picked from rbs and connected. + ccError chan struct{} + rb *remoteBalancerInfo + rbs []remoteBalancerInfo + rbIdx int + ) + + defer func() { + if ccError != nil { + select { + case <-ccError: + default: + close(ccError) + } + } + if cc != nil { + cc.Close() + } + }() + + for { + var ok bool + select { + case rbs, ok = <-balancerAddrsCh: + if !ok { + return + } + foundIdx := -1 + if rb != nil { + for i, trb := range rbs { + if trb == *rb { + foundIdx = i + break + } + } + } + if foundIdx >= 0 { + if foundIdx >= 1 { + // Move the address in use to the beginning of the list. + b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0] + rbIdx = 0 + } + continue // If found, don't dial new cc. + } else if len(rbs) > 0 { + // Pick a random one from the list, instead of always using the first one. + if l := len(rbs); l > 1 && rb != nil { + tmpIdx := b.rand.Intn(l - 1) + b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0] + } + rbIdx = 0 + rb = &rbs[0] + } else { + // foundIdx < 0 && len(rbs) <= 0. + rb = nil + } + case <-ccError: + ccError = nil + if rbIdx < len(rbs)-1 { + rbIdx++ + rb = &rbs[rbIdx] + } else { + rb = nil + } + } + + if rb == nil { + continue + } + + if cc != nil { + cc.Close() + } + // Talk to the remote load balancer to get the server list. + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { + if rb.name != "" { + if err := creds.OverrideServerName(rb.name); err != nil { + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) + continue + } + } + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) + } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) + } + dopts = append(dopts, WithBlock()) + ccError = make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cc, err = DialContext(ctx, rb.addr, dopts...) + cancel() + if err != nil { + grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + close(ccError) + continue + } + b.mu.Lock() + b.seq++ // tick when getting a new balancer address + seq := b.seq + b.next = 0 + b.mu.Unlock() + go func(cc *ClientConn, ccError chan struct{}) { + lbc := &loadBalancerClient{cc} + b.callRemoteBalancer(lbc, seq) + cc.Close() + select { + case <-ccError: + default: + close(ccError) + } + }(cc, ccError) + } + }() + return nil +} + +func (b *grpclbBalancer) down(addr Address, err error) { + b.mu.Lock() + defer b.mu.Unlock() + for _, a := range b.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +func (b *grpclbBalancer) Up(addr Address) func(error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return nil + } + var cnt int + for _, a := range b.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing { + cnt++ + } + } + // addr is the only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && b.waitCh != nil { + close(b.waitCh) + b.waitCh = nil + } + return func(err error) { + b.down(addr, err) + } +} + +func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + b.mu.Lock() + if b.done { + b.mu.Unlock() + err = ErrClientConnClosing + return + } + seq := b.seq + + defer func() { + if err != nil { + return + } + put = func() { + s, ok := rpcInfoFromContext(ctx) + if !ok { + return + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done || seq < b.seq { + return + } + b.clientStats.NumCallsFinished++ + if !s.bytesSent { + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + } else if s.bytesReceived { + b.clientStats.NumCallsFinishedKnownReceived++ + } + } + }() + + b.clientStats.NumCallsStarted++ + if len(b.addrs) > 0 { + if b.next >= len(b.addrs) { + b.next = 0 + } + next := b.next + for { + a := b.addrs[next] + next = (next + 1) % len(b.addrs) + if a.connected { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { + addr = a.addr + b.next = next + b.mu.Unlock() + return + } + if !opts.BlockingWait { + b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } + b.mu.Unlock() + err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) + return + } + } + if next == b.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") + return + } + // Wait on b.waitCh for non-failfast RPCs. + if b.waitCh == nil { + ch = make(chan struct{}) + b.waitCh = ch + } else { + ch = b.waitCh + } + b.mu.Unlock() + for { + select { + case <-ctx.Done(): + b.mu.Lock() + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = ctx.Err() + return + case <-ch: + b.mu.Lock() + if b.done { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(b.addrs) > 0 { + if b.next >= len(b.addrs) { + b.next = 0 + } + next := b.next + for { + a := b.addrs[next] + next = (next + 1) % len(b.addrs) + if a.connected { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { + addr = a.addr + b.next = next + b.mu.Unlock() + return + } + if !opts.BlockingWait { + b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } + b.mu.Unlock() + err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) + return + } + } + if next == b.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if b.waitCh == nil { + ch = make(chan struct{}) + b.waitCh = ch + } else { + ch = b.waitCh + } + b.mu.Unlock() + } + } +} + +func (b *grpclbBalancer) Notify() <-chan []Address { + return b.addrCh +} + +func (b *grpclbBalancer) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return errBalancerClosed + } + b.done = true + if b.waitCh != nil { + close(b.waitCh) + } + if b.addrCh != nil { + close(b.addrCh) + } + if b.w != nil { + b.w.Close() + } + return nil +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go new file mode 100644 index 000000000..f4a27125a --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -0,0 +1,615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto + +/* +Package messages is a generated protocol buffer package. + +It is generated from these files: + grpc_lb_v1/messages/messages.proto + +It has these top-level messages: + Duration + Timestamp + LoadBalanceRequest + InitialLoadBalanceRequest + ClientStats + LoadBalanceResponse + InitialLoadBalanceResponse + ServerList + Server +*/ +package messages + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` +} +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialRequest); err != nil { + return err + } + case *LoadBalanceRequest_ClientStats: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStats); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceRequest) + switch tag { + case 1: // load_balance_request_type.initial_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceRequest) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} + return true, err + case 2: // load_balance_request_type.client_stats + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientStats) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + s := proto.Size(x.InitialRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceRequest_ClientStats: + s := proto.Size(x.ClientStats) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceRequest struct { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` + // The total number of RPCs that were dropped by the client because of rate + // limiting. + NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` + // The total number of RPCs that were dropped by the client because of load + // balancing. + NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ClientStats) GetTimestamp() *Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForRateLimiting + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForLoadBalancing + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` +} +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + } +} + +func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialResponse); err != nil { + return err + } + case *LoadBalanceResponse_ServerList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceResponse) + switch tag { + case 1: // load_balance_response_type.initial_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceResponse) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} + return true, err + case 2: // load_balance_response_type.server_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerList) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + s := proto.Size(x.InitialResponse) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceResponse_ServerList: + s := proto.Size(x.ServerList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for load balancing. + DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDropForRateLimiting() bool { + if m != nil { + return m.DropForRateLimiting + } + return false +} + +func (m *Server) GetDropForLoadBalancing() bool { + if m != nil { + return m.DropForLoadBalancing + } + return false +} + +func init() { + proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") + proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, + 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, + 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, + 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, + 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, + 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, + 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, + 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, + 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, + 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, + 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, + 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, + 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, + 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, + 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, + 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, + 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, + 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, + 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, + 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, + 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, + 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, + 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, + 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, + 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, + 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, + 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, + 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, + 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, + 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, + 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, + 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, + 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, + 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, + 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, + 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, + 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, + 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, + 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, + 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, + 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, + 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, + 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, + 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, + 0xa6, 0x4a, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto new file mode 100644 index 000000000..2ed04551f --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -0,0 +1,155 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.lb.v1; +option go_package = "messages"; + +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} + +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} + +message LoadBalanceRequest { + oneof load_balance_request_type { + // This message should be sent on the first request to the load balancer. + InitialLoadBalanceRequest initial_request = 1; + + // The client stats should be periodically reported to the load balancer + // based on the duration defined in the InitialLoadBalanceResponse. + ClientStats client_stats = 2; + } +} + +message InitialLoadBalanceRequest { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + string name = 1; +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +message ClientStats { + // The timestamp of generating the report. + Timestamp timestamp = 1; + + // The total number of RPCs that started. + int64 num_calls_started = 2; + + // The total number of RPCs that finished. + int64 num_calls_finished = 3; + + // The total number of RPCs that were dropped by the client because of rate + // limiting. + int64 num_calls_finished_with_drop_for_rate_limiting = 4; + + // The total number of RPCs that were dropped by the client because of load + // balancing. + int64 num_calls_finished_with_drop_for_load_balancing = 5; + + // The total number of RPCs that failed to reach a server except dropped RPCs. + int64 num_calls_finished_with_client_failed_to_send = 6; + + // The total number of RPCs that finished and are known to have been received + // by a server. + int64 num_calls_finished_known_received = 7; +} + +message LoadBalanceResponse { + oneof load_balance_response_type { + // This message should be sent on the first response to the client. + InitialLoadBalanceResponse initial_response = 1; + + // Contains the list of servers selected by the load balancer. The client + // should send requests to these servers in the specified order. + ServerList server_list = 2; + } +} + +message InitialLoadBalanceResponse { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + string load_balancer_delegate = 1; + + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + Duration client_stats_report_interval = 2; +} + +message ServerList { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + repeated Server servers = 1; + + // Was google.protobuf.Duration expiration_interval. + reserved 3; +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +message Server { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + bytes ip_address = 1; + + // A resolved port number for the server. + int32 port = 2; + + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + string load_balance_token = 3; + + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + bool drop_for_rate_limiting = 4; + + // Indicates whether this particular request should be dropped by the client + // for load balancing. + bool drop_for_load_balancing = 5; +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go new file mode 100644 index 000000000..ebcbe56d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/service/service.proto + +/* +Package service is a generated protocol buffer package. + +It is generated from these files: + grpc_lb_v1/service/service.proto + +It has these top-level messages: +*/ +package service + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import grpc_lb_v1 "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LoadBalancer service + +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*grpc_lb_v1.LoadBalanceRequest) error + Recv() (*grpc_lb_v1.LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *grpc_lb_v1.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*grpc_lb_v1.LoadBalanceResponse, error) { + m := new(grpc_lb_v1.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LoadBalancer service + +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*grpc_lb_v1.LoadBalanceResponse) error + Recv() (*grpc_lb_v1.LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *grpc_lb_v1.LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*grpc_lb_v1.LoadBalanceRequest, error) { + m := new(grpc_lb_v1.LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_lb_v1/service/service.proto", +} + +func init() { proto.RegisterFile("grpc_lb_v1/service/service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 142 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x49, 0x8a, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x85, 0xd1, + 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x20, 0x15, 0x7a, 0x39, 0x49, 0x7a, 0x65, 0x86, + 0x52, 0x4a, 0x48, 0xaa, 0x73, 0x53, 0x8b, 0x8b, 0x13, 0xd3, 0x53, 0x8b, 0xe1, 0x0c, 0x88, 0x7a, + 0xa3, 0x24, 0x2e, 0x1e, 0x9f, 0xfc, 0xc4, 0x14, 0xa7, 0xc4, 0x9c, 0xc4, 0xbc, 0xe4, 0xd4, 0x22, + 0xa1, 0x20, 0x2e, 0x6e, 0x28, 0x1b, 0x24, 0x2c, 0x24, 0xa7, 0x87, 0x30, 0x4f, 0x0f, 0x49, 0x61, + 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x3c, 0x4e, 0xf9, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, + 0x54, 0x0d, 0x46, 0x03, 0x46, 0x27, 0xce, 0x28, 0x76, 0xa8, 0x23, 0x93, 0xd8, 0xc0, 0xb6, 0x1a, + 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x39, 0x4e, 0xb0, 0xf8, 0xc9, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto new file mode 100644 index 000000000..02c1a8b77 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto @@ -0,0 +1,26 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.lb.v1; +option go_package = "service"; + +import "grpc_lb_v1/messages/messages.proto"; + +service LoadBalancer { + // Bidirectional rpc to get a list of servers. + rpc BalanceLoad(stream LoadBalanceRequest) + returns (stream LoadBalanceResponse); +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb_test.go b/vendor/google.golang.org/grpc/grpclb/grpclb_test.go new file mode 100644 index 000000000..46c1fe5b9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpclb_test.go @@ -0,0 +1,891 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=:. grpc_lb_v1/messages/messages.proto +//go:generate protoc --go_out=Mgrpc_lb_v1/messages/messages.proto=google.golang.org/grpc/grpclb/grpc_lb_v1/messages,plugins=grpc:. grpc_lb_v1/service/service.proto + +// Package grpclb_test is currently used only for grpclb testing. +package grpclb_test + +import ( + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + lbspb "google.golang.org/grpc/grpclb/grpc_lb_v1/service" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/naming" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" +) + +var ( + lbsn = "bar.com" + besn = "foo.com" + lbToken = "iamatoken" + + // Resolver replaces localhost with fakeName in Next(). + // Dialer replaces fakeName with localhost when dialing. + // This will test that custom dialer is passed from Dial to grpclb. + fakeName = "fake.Name" +) + +type testWatcher struct { + // the channel to receives name resolution updates + update chan *naming.Update + // the side channel to get to know how many updates in a batch + side chan int + // the channel to notifiy update injector that the update reading is done + readDone chan int +} + +func (w *testWatcher) Next() (updates []*naming.Update, err error) { + n, ok := <-w.side + if !ok { + return nil, fmt.Errorf("w.side is closed") + } + for i := 0; i < n; i++ { + u, ok := <-w.update + if !ok { + break + } + if u != nil { + // Resolver replaces localhost with fakeName in Next(). + // Custom dialer will replace fakeName with localhost when dialing. + u.Addr = strings.Replace(u.Addr, "localhost", fakeName, 1) + updates = append(updates, u) + } + } + w.readDone <- 0 + return +} + +func (w *testWatcher) Close() { + close(w.side) +} + +// Inject naming resolution updates to the testWatcher. +func (w *testWatcher) inject(updates []*naming.Update) { + w.side <- len(updates) + for _, u := range updates { + w.update <- u + } + <-w.readDone +} + +type testNameResolver struct { + w *testWatcher + addrs []string +} + +func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { + r.w = &testWatcher{ + update: make(chan *naming.Update, len(r.addrs)), + side: make(chan int, 1), + readDone: make(chan int), + } + r.w.side <- len(r.addrs) + for _, addr := range r.addrs { + r.w.update <- &naming.Update{ + Op: naming.Add, + Addr: addr, + Metadata: &naming.AddrMetadataGRPCLB{ + AddrType: naming.GRPCLB, + ServerName: lbsn, + }, + } + } + go func() { + <-r.w.readDone + }() + return r.w, nil +} + +func (r *testNameResolver) inject(updates []*naming.Update) { + if r.w != nil { + r.w.inject(updates) + } +} + +type serverNameCheckCreds struct { + mu sync.Mutex + sn string + expected string +} + +func (c *serverNameCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if _, err := io.WriteString(rawConn, c.sn); err != nil { + fmt.Printf("Failed to write the server name %s to the client %v", c.sn, err) + return nil, nil, err + } + return rawConn, nil, nil +} +func (c *serverNameCheckCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + b := make([]byte, len(c.expected)) + if _, err := rawConn.Read(b); err != nil { + fmt.Printf("Failed to read the server name from the server %v", err) + return nil, nil, err + } + if c.expected != string(b) { + fmt.Printf("Read the server name %s want %s", string(b), c.expected) + return nil, nil, errors.New("received unexpected server name") + } + return rawConn, nil, nil +} +func (c *serverNameCheckCreds) Info() credentials.ProtocolInfo { + c.mu.Lock() + defer c.mu.Unlock() + return credentials.ProtocolInfo{} +} +func (c *serverNameCheckCreds) Clone() credentials.TransportCredentials { + c.mu.Lock() + defer c.mu.Unlock() + return &serverNameCheckCreds{ + expected: c.expected, + } +} +func (c *serverNameCheckCreds) OverrideServerName(s string) error { + c.mu.Lock() + defer c.mu.Unlock() + c.expected = s + return nil +} + +// fakeNameDialer replaces fakeName with localhost when dialing. +// This will test that custom dialer is passed from Dial to grpclb. +func fakeNameDialer(addr string, timeout time.Duration) (net.Conn, error) { + addr = strings.Replace(addr, fakeName, "localhost", 1) + return net.DialTimeout("tcp", addr, timeout) +} + +type remoteBalancer struct { + sls []*lbmpb.ServerList + intervals []time.Duration + statsDura time.Duration + done chan struct{} + mu sync.Mutex + stats lbmpb.ClientStats +} + +func newRemoteBalancer(sls []*lbmpb.ServerList, intervals []time.Duration) *remoteBalancer { + return &remoteBalancer{ + sls: sls, + intervals: intervals, + done: make(chan struct{}), + } +} + +func (b *remoteBalancer) stop() { + close(b.done) +} + +func (b *remoteBalancer) BalanceLoad(stream lbspb.LoadBalancer_BalanceLoadServer) error { + req, err := stream.Recv() + if err != nil { + return err + } + initReq := req.GetInitialRequest() + if initReq.Name != besn { + return grpc.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name) + } + resp := &lbmpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbmpb.LoadBalanceResponse_InitialResponse{ + InitialResponse: &lbmpb.InitialLoadBalanceResponse{ + ClientStatsReportInterval: &lbmpb.Duration{ + Seconds: int64(b.statsDura.Seconds()), + Nanos: int32(b.statsDura.Nanoseconds() - int64(b.statsDura.Seconds())*1e9), + }, + }, + }, + } + if err := stream.Send(resp); err != nil { + return err + } + go func() { + for { + var ( + req *lbmpb.LoadBalanceRequest + err error + ) + if req, err = stream.Recv(); err != nil { + return + } + b.mu.Lock() + b.stats.NumCallsStarted += req.GetClientStats().NumCallsStarted + b.stats.NumCallsFinished += req.GetClientStats().NumCallsFinished + b.stats.NumCallsFinishedWithDropForRateLimiting += req.GetClientStats().NumCallsFinishedWithDropForRateLimiting + b.stats.NumCallsFinishedWithDropForLoadBalancing += req.GetClientStats().NumCallsFinishedWithDropForLoadBalancing + b.stats.NumCallsFinishedWithClientFailedToSend += req.GetClientStats().NumCallsFinishedWithClientFailedToSend + b.stats.NumCallsFinishedKnownReceived += req.GetClientStats().NumCallsFinishedKnownReceived + b.mu.Unlock() + } + }() + for k, v := range b.sls { + time.Sleep(b.intervals[k]) + resp = &lbmpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbmpb.LoadBalanceResponse_ServerList{ + ServerList: v, + }, + } + if err := stream.Send(resp); err != nil { + return err + } + } + <-b.done + return nil +} + +type testServer struct { + testpb.TestServiceServer + + addr string +} + +const testmdkey = "testmd" + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, grpc.Errorf(codes.Internal, "failed to receive metadata") + } + if md == nil || md["lb-token"][0] != lbToken { + return nil, grpc.Errorf(codes.Internal, "received unexpected metadata: %v", md) + } + grpc.SetTrailer(ctx, metadata.Pairs(testmdkey, s.addr)) + return &testpb.Empty{}, nil +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + return nil +} + +func startBackends(sn string, lis ...net.Listener) (servers []*grpc.Server) { + for _, l := range lis { + creds := &serverNameCheckCreds{ + sn: sn, + } + s := grpc.NewServer(grpc.Creds(creds)) + testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String()}) + servers = append(servers, s) + go func(s *grpc.Server, l net.Listener) { + s.Serve(l) + }(s, l) + } + return +} + +func stopBackends(servers []*grpc.Server) { + for _, s := range servers { + s.Stop() + } +} + +type testServers struct { + lbAddr string + ls *remoteBalancer + lb *grpc.Server + beIPs []net.IP + bePorts []int +} + +func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), err error) { + var ( + beListeners []net.Listener + ls *remoteBalancer + lb *grpc.Server + beIPs []net.IP + bePorts []int + ) + for i := 0; i < numberOfBackends; i++ { + // Start a backend. + beLis, e := net.Listen("tcp", "localhost:0") + if e != nil { + err = fmt.Errorf("Failed to listen %v", err) + return + } + beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP) + bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port) + + beListeners = append(beListeners, beLis) + } + backends := startBackends(besn, beListeners...) + + // Start a load balancer. + lbLis, err := net.Listen("tcp", "localhost:0") + if err != nil { + err = fmt.Errorf("Failed to create the listener for the load balancer %v", err) + return + } + lbCreds := &serverNameCheckCreds{ + sn: lbsn, + } + lb = grpc.NewServer(grpc.Creds(lbCreds)) + if err != nil { + err = fmt.Errorf("Failed to generate the port number %v", err) + return + } + ls = newRemoteBalancer(nil, nil) + lbspb.RegisterLoadBalancerServer(lb, ls) + go func() { + lb.Serve(lbLis) + }() + + tss = &testServers{ + lbAddr: lbLis.Addr().String(), + ls: ls, + lb: lb, + beIPs: beIPs, + bePorts: bePorts, + } + cleanup = func() { + defer stopBackends(backends) + defer func() { + ls.stop() + lb.Stop() + }() + } + return +} + +func TestGRPCLB(t *testing.T) { + defer leakcheck.Check(t) + tss, cleanup, err := newLoadBalancer(1) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + + be := &lbmpb.Server{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + } + var bes []*lbmpb.Server + bes = append(bes, be) + sl := &lbmpb.ServerList{ + Servers: bes, + } + tss.ls.sls = []*lbmpb.ServerList{sl} + tss.ls.intervals = []time.Duration{0} + creds := serverNameCheckCreds{ + expected: besn, + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } +} + +func TestDropRequest(t *testing.T) { + defer leakcheck.Check(t) + tss, cleanup, err := newLoadBalancer(2) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + tss.ls.sls = []*lbmpb.ServerList{{ + Servers: []*lbmpb.Server{{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + DropForLoadBalancing: true, + }, { + IpAddress: tss.beIPs[1], + Port: int32(tss.bePorts[1]), + LoadBalanceToken: lbToken, + DropForLoadBalancing: false, + }}, + }} + tss.ls.intervals = []time.Duration{0} + creds := serverNameCheckCreds{ + expected: besn, + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + // Wait until the first connection is up. + // The first one has Drop set to true, error should contain "drop requests". + for { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } + // The 1st, non-fail-fast RPC should succeed. This ensures both server + // connections are made, because the first one has DropForLoadBalancing set to true. + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.SayHello(_, _) = _, %v, want _, ", testC, err) + } + for i := 0; i < 3; i++ { + // Odd fail-fast RPCs should fail, because the 1st backend has DropForLoadBalancing + // set to true. + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable) + } + // Even fail-fast RPCs should succeed since they choose the + // non-drop-request backend according to the round robin policy. + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } + } +} + +func TestDropRequestFailedNonFailFast(t *testing.T) { + defer leakcheck.Check(t) + tss, cleanup, err := newLoadBalancer(1) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + be := &lbmpb.Server{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + DropForLoadBalancing: true, + } + var bes []*lbmpb.Server + bes = append(bes, be) + sl := &lbmpb.ServerList{ + Servers: bes, + } + tss.ls.sls = []*lbmpb.ServerList{sl} + tss.ls.intervals = []time.Duration{0} + creds := serverNameCheckCreds{ + expected: besn, + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.DeadlineExceeded) + } +} + +// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. +func TestBalancerDisconnects(t *testing.T) { + defer leakcheck.Check(t) + var ( + lbAddrs []string + lbs []*grpc.Server + ) + for i := 0; i < 3; i++ { + tss, cleanup, err := newLoadBalancer(1) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + + be := &lbmpb.Server{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + } + var bes []*lbmpb.Server + bes = append(bes, be) + sl := &lbmpb.ServerList{ + Servers: bes, + } + tss.ls.sls = []*lbmpb.ServerList{sl} + tss.ls.intervals = []time.Duration{0} + + lbAddrs = append(lbAddrs, tss.lbAddr) + lbs = append(lbs, tss.lb) + } + + creds := serverNameCheckCreds{ + expected: besn, + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + resolver := &testNameResolver{ + addrs: lbAddrs[:2], + } + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(resolver)), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + var previousTrailer string + trailer := metadata.MD{} + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } else { + previousTrailer = trailer[testmdkey][0] + } + // The initial resolver update contains lbs[0] and lbs[1]. + // When lbs[0] is stopped, lbs[1] should be used. + lbs[0].Stop() + for { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } else if trailer[testmdkey][0] != previousTrailer { + // A new backend server should receive the request. + // The trailer contains the backend address, so the trailer should be different from the previous one. + previousTrailer = trailer[testmdkey][0] + break + } + time.Sleep(100 * time.Millisecond) + } + // Inject a update to add lbs[2] to resolved addresses. + resolver.inject([]*naming.Update{ + {Op: naming.Add, + Addr: lbAddrs[2], + Metadata: &naming.AddrMetadataGRPCLB{ + AddrType: naming.GRPCLB, + ServerName: lbsn, + }, + }, + }) + // Stop lbs[1]. Now lbs[0] and lbs[1] are all stopped. lbs[2] should be used. + lbs[1].Stop() + for { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } else if trailer[testmdkey][0] != previousTrailer { + // A new backend server should receive the request. + // The trailer contains the backend address, so the trailer should be different from the previous one. + break + } + time.Sleep(100 * time.Millisecond) + } +} + +type failPreRPCCred struct{} + +func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + if strings.Contains(uri[0], "failtosend") { + return nil, fmt.Errorf("rpc should fail to send") + } + return nil, nil +} + +func (failPreRPCCred) RequireTransportSecurity() bool { + return false +} + +func checkStats(stats *lbmpb.ClientStats, expected *lbmpb.ClientStats) error { + if !proto.Equal(stats, expected) { + return fmt.Errorf("stats not equal: got %+v, want %+v", stats, expected) + } + return nil +} + +func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool, runRPCs func(*grpc.ClientConn)) lbmpb.ClientStats { + tss, cleanup, err := newLoadBalancer(3) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + tss.ls.sls = []*lbmpb.ServerList{{ + Servers: []*lbmpb.Server{{ + IpAddress: tss.beIPs[2], + Port: int32(tss.bePorts[2]), + LoadBalanceToken: lbToken, + DropForLoadBalancing: dropForLoadBalancing, + DropForRateLimiting: dropForRateLimiting, + }}, + }} + tss.ls.intervals = []time.Duration{0} + tss.ls.statsDura = 100 * time.Millisecond + creds := serverNameCheckCreds{expected: besn} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{}), + grpc.WithBlock(), grpc.WithDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + + runRPCs(cc) + time.Sleep(1 * time.Second) + tss.ls.mu.Lock() + stats := tss.ls.stats + tss.ls.mu.Unlock() + return stats +} + +const countRPC = 40 + +func TestGRPCLBStatsUnarySuccess(t *testing.T) { + defer leakcheck.Check(t) + stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + // The first non-failfast RPC succeeds, all connections are up. + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } + for i := 0; i < countRPC-1; i++ { + testC.EmptyCall(context.Background(), &testpb.Empty{}) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC), + NumCallsFinished: int64(countRPC), + NumCallsFinishedKnownReceived: int64(countRPC), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) { + defer leakcheck.Check(t) + c := 0 + stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + for { + c++ + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } + for i := 0; i < countRPC; i++ { + testC.EmptyCall(context.Background(), &testpb.Empty{}) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC + c), + NumCallsFinished: int64(countRPC + c), + NumCallsFinishedWithDropForLoadBalancing: int64(countRPC + 1), + NumCallsFinishedWithClientFailedToSend: int64(c - 1), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) { + defer leakcheck.Check(t) + c := 0 + stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + for { + c++ + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } + for i := 0; i < countRPC; i++ { + testC.EmptyCall(context.Background(), &testpb.Empty{}) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC + c), + NumCallsFinished: int64(countRPC + c), + NumCallsFinishedWithDropForRateLimiting: int64(countRPC + 1), + NumCallsFinishedWithClientFailedToSend: int64(c - 1), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { + defer leakcheck.Check(t) + stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + // The first non-failfast RPC succeeds, all connections are up. + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } + for i := 0; i < countRPC-1; i++ { + grpc.Invoke(context.Background(), "failtosend", &testpb.Empty{}, nil, cc) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC), + NumCallsFinished: int64(countRPC), + NumCallsFinishedWithClientFailedToSend: int64(countRPC - 1), + NumCallsFinishedKnownReceived: 1, + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsStreamingSuccess(t *testing.T) { + defer leakcheck.Check(t) + stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + // The first non-failfast RPC succeeds, all connections are up. + stream, err := testC.FullDuplexCall(context.Background(), grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, ", testC, err) + } + for { + if _, err = stream.Recv(); err == io.EOF { + break + } + } + for i := 0; i < countRPC-1; i++ { + stream, err = testC.FullDuplexCall(context.Background()) + if err == nil { + // Wait for stream to end if err is nil. + for { + if _, err = stream.Recv(); err == io.EOF { + break + } + } + } + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC), + NumCallsFinished: int64(countRPC), + NumCallsFinishedKnownReceived: int64(countRPC), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) { + defer leakcheck.Check(t) + c := 0 + stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + for { + c++ + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } + for i := 0; i < countRPC; i++ { + testC.FullDuplexCall(context.Background()) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC + c), + NumCallsFinished: int64(countRPC + c), + NumCallsFinishedWithDropForLoadBalancing: int64(countRPC + 1), + NumCallsFinishedWithClientFailedToSend: int64(c - 1), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) { + defer leakcheck.Check(t) + c := 0 + stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + for { + c++ + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } + for i := 0; i < countRPC; i++ { + testC.FullDuplexCall(context.Background()) + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC + c), + NumCallsFinished: int64(countRPC + c), + NumCallsFinishedWithDropForRateLimiting: int64(countRPC + 1), + NumCallsFinishedWithClientFailedToSend: int64(c - 1), + }); err != nil { + t.Fatal(err) + } +} + +func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { + defer leakcheck.Check(t) + stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { + testC := testpb.NewTestServiceClient(cc) + // The first non-failfast RPC succeeds, all connections are up. + stream, err := testC.FullDuplexCall(context.Background(), grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, ", testC, err) + } + for { + if _, err = stream.Recv(); err == io.EOF { + break + } + } + for i := 0; i < countRPC-1; i++ { + grpc.NewClientStream(context.Background(), &grpc.StreamDesc{}, cc, "failtosend") + } + }) + + if err := checkStats(&stats, &lbmpb.ClientStats{ + NumCallsStarted: int64(countRPC), + NumCallsFinished: int64(countRPC), + NumCallsFinishedWithClientFailedToSend: int64(countRPC - 1), + NumCallsFinishedKnownReceived: 1, + }); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go new file mode 100644 index 000000000..e5498f823 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -0,0 +1,86 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package glogger defines glog-based logging for grpc. +// Importing this package will install glog as the logger used by grpclog. +package glogger + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/grpc/grpclog" +) + +func init() { + grpclog.SetLoggerV2(&glogger{}) +} + +type glogger struct{} + +func (g *glogger) Info(args ...interface{}) { + glog.InfoDepth(2, args...) +} + +func (g *glogger) Infoln(args ...interface{}) { + glog.InfoDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Infof(format string, args ...interface{}) { + glog.InfoDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) Warning(args ...interface{}) { + glog.WarningDepth(2, args...) +} + +func (g *glogger) Warningln(args ...interface{}) { + glog.WarningDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Warningf(format string, args ...interface{}) { + glog.WarningDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) Error(args ...interface{}) { + glog.ErrorDepth(2, args...) +} + +func (g *glogger) Errorln(args ...interface{}) { + glog.ErrorDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Errorf(format string, args ...interface{}) { + glog.ErrorDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) Fatal(args ...interface{}) { + glog.FatalDepth(2, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.FatalDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.FatalDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) V(l int) bool { + return bool(glog.V(glog.Level(l))) +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..16a7d8886 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..d03b2397b --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..d49325776 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go b/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go new file mode 100644 index 000000000..756f215f9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "bytes" + "fmt" + "regexp" + "testing" +) + +func TestLoggerV2Severity(t *testing.T) { + buffers := []*bytes.Buffer{new(bytes.Buffer), new(bytes.Buffer), new(bytes.Buffer)} + SetLoggerV2(NewLoggerV2(buffers[infoLog], buffers[warningLog], buffers[errorLog])) + + Info(severityName[infoLog]) + Warning(severityName[warningLog]) + Error(severityName[errorLog]) + + for i := 0; i < fatalLog; i++ { + buf := buffers[i] + // The content of info buffer should be something like: + // INFO: 2017/04/07 14:55:42 INFO + // WARNING: 2017/04/07 14:55:42 WARNING + // ERROR: 2017/04/07 14:55:42 ERROR + for j := i; j < fatalLog; j++ { + b, err := buf.ReadBytes('\n') + if err != nil { + t.Fatal(err) + } + if err := checkLogForSeverity(j, b); err != nil { + t.Fatal(err) + } + } + } +} + +// check if b is in the format of: +// WARNING: 2017/04/07 14:55:42 WARNING +func checkLogForSeverity(s int, b []byte) error { + expected := regexp.MustCompile(fmt.Sprintf(`^%s: [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s\n$`, severityName[s], severityName[s])) + if m := expected.Match(b); !m { + return fmt.Errorf("got: %v, want string in format of: %v", string(b), severityName[s]+": 2016/10/05 17:09:26 "+severityName[s]) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 000000000..fdcbb9e0b --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + grpc_health_v1/health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_health_v1/health.proto", +} + +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 000000000..6072fdc3b --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,34 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 000000000..c6212f406 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..06dc825b9 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..07083832c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,34 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/interop/client/client.go b/vendor/google.golang.org/grpc/interop/client/client.go new file mode 100644 index 000000000..da6bc2d20 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/client/client.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/testdata" +) + +var ( + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") + serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") + oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") + defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") + serverHost = flag.String("server_host", "localhost", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + tlsServerName = flag.String("server_host_override", "", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + testCase = flag.String("test_case", "large_unary", + `Configure different test cases. Valid options are: + empty_unary : empty (zero bytes) request and response; + large_unary : single request and (large) response; + client_streaming : request streaming with single response; + server_streaming : single request with response streaming; + ping_pong : full-duplex streaming; + empty_stream : full-duplex streaming with zero message; + timeout_on_sleeping_server: fullduplex streaming on a sleeping server; + compute_engine_creds: large_unary with compute engine auth; + service_account_creds: large_unary with service account auth; + jwt_token_creds: large_unary with jwt token auth; + per_rpc_creds: large_unary with per rpc token; + oauth2_auth_token: large_unary with oauth2 token auth; + cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; + cancel_after_first_response: cancellation after receiving 1st message from the server; + status_code_and_message: status code propagated back to client; + custom_metadata: server will echo custom metadata; + unimplemented_method: client attempts to call unimplemented method; + unimplemented_service: client attempts to call unimplemented service.`) +) + +func main() { + flag.Parse() + serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + var opts []grpc.DialOption + if *useTLS { + var sn string + if *tlsServerName != "" { + sn = *tlsServerName + } + var creds credentials.TransportCredentials + if *testCA { + var err error + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + if *testCase == "compute_engine_creds" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) + } else if *testCase == "service_account_creds" { + jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "jwt_token_creds" { + jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) + if err != nil { + grpclog.Fatalf("Failed to create JWT credentials: %v", err) + } + opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) + } else if *testCase == "oauth2_auth_token" { + opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(interop.GetToken(*serviceAccountKeyFile, *oauthScope)))) + } + } else { + opts = append(opts, grpc.WithInsecure()) + } + opts = append(opts, grpc.WithBlock()) + conn, err := grpc.Dial(serverAddr, opts...) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testpb.NewTestServiceClient(conn) + switch *testCase { + case "empty_unary": + interop.DoEmptyUnaryCall(tc) + grpclog.Println("EmptyUnaryCall done") + case "large_unary": + interop.DoLargeUnaryCall(tc) + grpclog.Println("LargeUnaryCall done") + case "client_streaming": + interop.DoClientStreaming(tc) + grpclog.Println("ClientStreaming done") + case "server_streaming": + interop.DoServerStreaming(tc) + grpclog.Println("ServerStreaming done") + case "ping_pong": + interop.DoPingPong(tc) + grpclog.Println("Pingpong done") + case "empty_stream": + interop.DoEmptyStream(tc) + grpclog.Println("Emptystream done") + case "timeout_on_sleeping_server": + interop.DoTimeoutOnSleepingServer(tc) + grpclog.Println("TimeoutOnSleepingServer done") + case "compute_engine_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") + } + interop.DoComputeEngineCreds(tc, *defaultServiceAccount, *oauthScope) + grpclog.Println("ComputeEngineCreds done") + case "service_account_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") + } + interop.DoServiceAccountCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("ServiceAccountCreds done") + case "jwt_token_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") + } + interop.DoJWTTokenCreds(tc, *serviceAccountKeyFile) + grpclog.Println("JWTtokenCreds done") + case "per_rpc_creds": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") + } + interop.DoPerRPCCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("PerRPCCreds done") + case "oauth2_auth_token": + if !*useTLS { + grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") + } + interop.DoOauth2TokenCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("Oauth2TokenCreds done") + case "cancel_after_begin": + interop.DoCancelAfterBegin(tc) + grpclog.Println("CancelAfterBegin done") + case "cancel_after_first_response": + interop.DoCancelAfterFirstResponse(tc) + grpclog.Println("CancelAfterFirstResponse done") + case "status_code_and_message": + interop.DoStatusCodeAndMessage(tc) + grpclog.Println("StatusCodeAndMessage done") + case "custom_metadata": + interop.DoCustomMetadata(tc) + grpclog.Println("CustomMetadata done") + case "unimplemented_method": + interop.DoUnimplementedMethod(conn) + grpclog.Println("UnimplementedMethod done") + case "unimplemented_service": + interop.DoUnimplementedService(testpb.NewUnimplementedServiceClient(conn)) + grpclog.Println("UnimplementedService done") + default: + grpclog.Fatal("Unsupported test case: ", *testCase) + } +} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go new file mode 100644 index 000000000..b03c728c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -0,0 +1,905 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + grpc_testing/test.proto + +It has these top-level messages: + Empty + Payload + EchoStatus + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +type EchoStatus struct { + Code *int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EchoStatus) Reset() { *m = EchoStatus{} } +func (m *EchoStatus) String() string { return proto.CompactTextString(m) } +func (*EchoStatus) ProtoMessage() {} +func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *EchoStatus) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *EchoStatus) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +func (m *SimpleRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).EmptyCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/EmptyCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_testing/test.proto", +} + +// Client API for UnimplementedService service + +type UnimplementedServiceClient interface { + // A call that no server should implement + UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type unimplementedServiceClient struct { + cc *grpc.ClientConn +} + +func NewUnimplementedServiceClient(cc *grpc.ClientConn) UnimplementedServiceClient { + return &unimplementedServiceClient{cc} +} + +func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.UnimplementedService/UnimplementedCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for UnimplementedService service + +type UnimplementedServiceServer interface { + // A call that no server should implement + UnimplementedCall(context.Context, *Empty) (*Empty, error) +} + +func RegisterUnimplementedServiceServer(s *grpc.Server, srv UnimplementedServiceServer) { + s.RegisterService(&_UnimplementedService_serviceDesc, srv) +} + +func _UnimplementedService_UnimplementedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.UnimplementedService/UnimplementedCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _UnimplementedService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.UnimplementedService", + HandlerType: (*UnimplementedServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnimplementedCall", + Handler: _UnimplementedService_UnimplementedCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_testing/test.proto", +} + +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0x69, 0x42, 0xda, 0x49, 0x6a, 0xc2, 0x94, 0xaa, 0x6e, 0x8a, 0x44, 0x64, 0x0e, 0x18, + 0x24, 0x52, 0x14, 0x09, 0x0e, 0x48, 0x80, 0x4a, 0x9b, 0x8a, 0x4a, 0x6d, 0x53, 0xec, 0xe6, 0x1c, + 0x2d, 0xc9, 0xd4, 0xb5, 0xe4, 0x2f, 0xec, 0x75, 0x45, 0x7a, 0xe0, 0xcf, 0xf0, 0x23, 0x38, 0xf0, + 0xe7, 0xd0, 0xae, 0xed, 0xc4, 0x49, 0x53, 0xd1, 0xf2, 0x75, 0xca, 0xee, 0x9b, 0x37, 0xb3, 0xf3, + 0x66, 0x5e, 0x0c, 0x1b, 0x76, 0x14, 0x0e, 0x07, 0x9c, 0x62, 0xee, 0xf8, 0xf6, 0xb6, 0xf8, 0x6d, + 0x87, 0x51, 0xc0, 0x03, 0xac, 0x8b, 0x40, 0x3b, 0x0b, 0xe8, 0x55, 0xa8, 0x74, 0xbd, 0x90, 0x8f, + 0xf5, 0x43, 0xa8, 0x9e, 0xb0, 0xb1, 0x1b, 0xb0, 0x11, 0x3e, 0x87, 0x32, 0x1f, 0x87, 0xa4, 0x29, + 0x2d, 0xc5, 0x50, 0x3b, 0x9b, 0xed, 0x62, 0x42, 0x3b, 0x23, 0x9d, 0x8e, 0x43, 0x32, 0x25, 0x0d, + 0x11, 0xca, 0x9f, 0x82, 0xd1, 0x58, 0x2b, 0xb5, 0x14, 0xa3, 0x6e, 0xca, 0xb3, 0xfe, 0x1a, 0xa0, + 0x3b, 0x3c, 0x0f, 0x2c, 0xce, 0x78, 0x12, 0x0b, 0xc6, 0x30, 0x18, 0xa5, 0x05, 0x2b, 0xa6, 0x3c, + 0xa3, 0x06, 0x55, 0x8f, 0xe2, 0x98, 0xd9, 0x24, 0x13, 0x57, 0xcc, 0xfc, 0xaa, 0x7f, 0x2f, 0xc1, + 0xaa, 0xe5, 0x78, 0xa1, 0x4b, 0x26, 0x7d, 0x4e, 0x28, 0xe6, 0xf8, 0x16, 0x56, 0x23, 0x8a, 0xc3, + 0xc0, 0x8f, 0x69, 0x70, 0xb3, 0xce, 0xea, 0x39, 0x5f, 0xdc, 0xf0, 0x71, 0x21, 0x3f, 0x76, 0x2e, + 0xd3, 0x17, 0x2b, 0x53, 0x92, 0xe5, 0x5c, 0x12, 0x6e, 0x43, 0x35, 0x4c, 0x2b, 0x68, 0x4b, 0x2d, + 0xc5, 0xa8, 0x75, 0xd6, 0x17, 0x96, 0x37, 0x73, 0x96, 0xa8, 0x7a, 0xe6, 0xb8, 0xee, 0x20, 0x89, + 0x29, 0xf2, 0x99, 0x47, 0x5a, 0xb9, 0xa5, 0x18, 0xcb, 0x66, 0x5d, 0x80, 0xfd, 0x0c, 0x43, 0x03, + 0x1a, 0x92, 0x14, 0xb0, 0x84, 0x9f, 0x0f, 0xe2, 0x61, 0x10, 0x92, 0x56, 0x91, 0x3c, 0x55, 0xe0, + 0x3d, 0x01, 0x5b, 0x02, 0xc5, 0x1d, 0xb8, 0x37, 0x6d, 0x52, 0xce, 0x4d, 0xab, 0xca, 0x3e, 0xb4, + 0xd9, 0x3e, 0xa6, 0x73, 0x35, 0xd5, 0x89, 0x00, 0x79, 0xd7, 0xbf, 0x82, 0x9a, 0x0f, 0x2e, 0xc5, + 0x8b, 0xa2, 0x94, 0x1b, 0x89, 0x6a, 0xc2, 0xf2, 0x44, 0x4f, 0xba, 0x97, 0xc9, 0x1d, 0x1f, 0x41, + 0xad, 0x28, 0x63, 0x49, 0x86, 0x21, 0x98, 0x48, 0xd0, 0x0f, 0x61, 0xd3, 0xe2, 0x11, 0x31, 0xcf, + 0xf1, 0xed, 0x03, 0x3f, 0x4c, 0xf8, 0x2e, 0x73, 0xdd, 0x7c, 0x89, 0xb7, 0x6d, 0x45, 0x3f, 0x85, + 0xe6, 0xa2, 0x6a, 0x99, 0xb2, 0x57, 0xb0, 0xc1, 0x6c, 0x3b, 0x22, 0x9b, 0x71, 0x1a, 0x0d, 0xb2, + 0x9c, 0x74, 0xbb, 0xa9, 0xcd, 0xd6, 0xa7, 0xe1, 0xac, 0xb4, 0x58, 0xb3, 0x7e, 0x00, 0x98, 0xd7, + 0x38, 0x61, 0x11, 0xf3, 0x88, 0x53, 0x24, 0x1d, 0x5a, 0x48, 0x95, 0x67, 0x21, 0xd7, 0xf1, 0x39, + 0x45, 0x17, 0x4c, 0xec, 0x38, 0xf3, 0x0c, 0xe4, 0x50, 0x3f, 0xd6, 0xbf, 0x95, 0x0a, 0x1d, 0xf6, + 0x12, 0x3e, 0x27, 0xf8, 0x4f, 0x5d, 0xfb, 0x11, 0xd6, 0x26, 0xf9, 0xe1, 0xa4, 0x55, 0xad, 0xd4, + 0x5a, 0x32, 0x6a, 0x9d, 0xd6, 0x6c, 0x95, 0xab, 0x92, 0x4c, 0x8c, 0xae, 0xca, 0xbc, 0xb5, 0xc7, + 0xff, 0x82, 0x29, 0x8f, 0x61, 0x6b, 0xe1, 0x90, 0x7e, 0xd3, 0xa1, 0xcf, 0xde, 0x41, 0xad, 0x30, + 0x33, 0x6c, 0x40, 0x7d, 0xb7, 0x77, 0x74, 0x62, 0x76, 0x2d, 0x6b, 0xe7, 0xfd, 0x61, 0xb7, 0x71, + 0x07, 0x11, 0xd4, 0xfe, 0xf1, 0x0c, 0xa6, 0x20, 0xc0, 0x5d, 0x73, 0xe7, 0x78, 0xaf, 0x77, 0xd4, + 0x28, 0x75, 0x7e, 0x94, 0xa1, 0x76, 0x4a, 0x31, 0xb7, 0x28, 0xba, 0x70, 0x86, 0x84, 0x2f, 0x61, + 0x45, 0x7e, 0x02, 0x45, 0x5b, 0xb8, 0x36, 0xa7, 0x4b, 0x04, 0x9a, 0x8b, 0x40, 0xdc, 0x87, 0x95, + 0xbe, 0xcf, 0xa2, 0x34, 0x6d, 0x6b, 0x96, 0x31, 0xf3, 0xf9, 0x6a, 0x3e, 0x5c, 0x1c, 0xcc, 0x06, + 0xe0, 0xc2, 0xda, 0x82, 0xf9, 0xa0, 0x31, 0x97, 0x74, 0xad, 0xcf, 0x9a, 0x4f, 0x6f, 0xc0, 0x4c, + 0xdf, 0x7a, 0xa1, 0xa0, 0x03, 0x78, 0xf5, 0x4f, 0x85, 0x4f, 0xae, 0x29, 0x31, 0xff, 0x27, 0x6e, + 0x1a, 0xbf, 0x26, 0xa6, 0x4f, 0x19, 0xe2, 0x29, 0x75, 0x3f, 0x71, 0xdd, 0xbd, 0x24, 0x74, 0xe9, + 0xcb, 0x3f, 0xd3, 0x64, 0x28, 0x52, 0x95, 0xfa, 0x81, 0xb9, 0x67, 0xff, 0xe1, 0xa9, 0x4e, 0x1f, + 0x1e, 0xf4, 0x7d, 0xb9, 0x41, 0x8f, 0x7c, 0x4e, 0xa3, 0xdc, 0x45, 0x6f, 0xe0, 0xfe, 0x0c, 0x7e, + 0x3b, 0x37, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x15, 0x62, 0x93, 0xba, 0xaf, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto new file mode 100644 index 000000000..20d4366b0 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto @@ -0,0 +1,174 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +message EchoStatus { + optional int32 code = 1; + optional string message = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; + + // Whether server should return a given status + optional EchoStatus response_status = 7; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether server should return a given status + optional EchoStatus response_status = 7; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} + +// A simple service NOT implemented at servers so clients can test for +// that case. +service UnimplementedService { + // A call that no server should implement + rpc UnimplementedCall(grpc.testing.Empty) returns (grpc.testing.Empty); +} diff --git a/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go new file mode 100644 index 000000000..5b42b76a9 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + * Client used to test http2 error edge cases like GOAWAYs and RST_STREAMs + * + * Documentation: + * https://github.com/grpc/grpc/blob/master/doc/negative-http2-interop-test-descriptions.md + */ + +package main + +import ( + "flag" + "net" + "strconv" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + serverHost = flag.String("server_host", "127.0.0.1", "The server host name") + serverPort = flag.Int("server_port", 8080, "The server port number") + testCase = flag.String("test_case", "goaway", + `Configure different test cases. Valid options are: + goaway : client sends two requests, the server will send a goaway in between; + rst_after_header : server will send rst_stream after it sends headers; + rst_during_data : server will send rst_stream while sending data; + rst_after_data : server will send rst_stream after sending data; + ping : server will send pings between each http2 frame; + max_streams : server will ensure that the max_concurrent_streams limit is upheld;`) + largeReqSize = 271828 + largeRespSize = 314159 +) + +func largeSimpleRequest() *testpb.SimpleRequest { + pl := interop.ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + return &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + } +} + +// sends two unary calls. The server asserts that the calls use different connections. +func goaway(tc testpb.TestServiceClient) { + interop.DoLargeUnaryCall(tc) + // sleep to ensure that the client has time to recv the GOAWAY. + // TODO(ncteisen): make this less hacky. + time.Sleep(1 * time.Second) + interop.DoLargeUnaryCall(tc) +} + +func rstAfterHeader(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream after header") + } + if grpc.Code(err) != codes.Internal { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Internal) + } +} + +func rstDuringData(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream during data") + } + if grpc.Code(err) != codes.Unknown { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Unknown) + } +} + +func rstAfterData(tc testpb.TestServiceClient) { + req := largeSimpleRequest() + reply, err := tc.UnaryCall(context.Background(), req) + if reply != nil { + grpclog.Fatalf("Client received reply despite server sending rst stream after data") + } + if grpc.Code(err) != codes.Internal { + grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Internal) + } +} + +func ping(tc testpb.TestServiceClient) { + // The server will assert that every ping it sends was ACK-ed by the client. + interop.DoLargeUnaryCall(tc) +} + +func maxStreams(tc testpb.TestServiceClient) { + interop.DoLargeUnaryCall(tc) + var wg sync.WaitGroup + for i := 0; i < 15; i++ { + wg.Add(1) + go func() { + defer wg.Done() + interop.DoLargeUnaryCall(tc) + }() + } + wg.Wait() +} + +func main() { + flag.Parse() + serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + var opts []grpc.DialOption + opts = append(opts, grpc.WithInsecure()) + conn, err := grpc.Dial(serverAddr, opts...) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testpb.NewTestServiceClient(conn) + switch *testCase { + case "goaway": + goaway(tc) + grpclog.Println("goaway done") + case "rst_after_header": + rstAfterHeader(tc) + grpclog.Println("rst_after_header done") + case "rst_during_data": + rstDuringData(tc) + grpclog.Println("rst_during_data done") + case "rst_after_data": + rstAfterData(tc) + grpclog.Println("rst_after_data done") + case "ping": + ping(tc) + grpclog.Println("ping done") + case "max_streams": + maxStreams(tc) + grpclog.Println("max_streams done") + default: + grpclog.Fatal("Unsupported test case: ", *testCase) + } +} diff --git a/vendor/google.golang.org/grpc/interop/server/server.go b/vendor/google.golang.org/grpc/interop/server/server.go new file mode 100644 index 000000000..b833c76f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/server/server.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/testdata" +) + +var ( + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + certFile = flag.String("tls_cert_file", "", "The TLS cert file") + keyFile = flag.String("tls_key_file", "", "The TLS key file") + port = flag.Int("port", 10000, "The server port") +) + +func main() { + flag.Parse() + p := strconv.Itoa(*port) + lis, err := net.Listen("tcp", ":"+p) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + var opts []grpc.ServerOption + if *useTLS { + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) + if err != nil { + grpclog.Fatalf("Failed to generate credentials %v", err) + } + opts = []grpc.ServerOption{grpc.Creds(creds)} + } + server := grpc.NewServer(opts...) + testpb.RegisterTestServiceServer(server, interop.NewTestServer()) + server.Serve(lis) +} diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go new file mode 100644 index 000000000..36285d362 --- /dev/null +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -0,0 +1,748 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +package interop + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" +) + +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} + largeReqSize = 271828 + largeRespSize = 314159 + initialMetadataKey = "x-grpc-test-echo-initial" + trailingMetadataKey = "x-grpc-test-echo-trailing-bin" +) + +// ClientNewPayload returns a payload of the given type and size. +func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { + if size < 0 { + grpclog.Fatalf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") + default: + grpclog.Fatalf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + } +} + +// DoEmptyUnaryCall performs a unary RPC with empty request and response messages. +func DoEmptyUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, args...) + if err != nil { + grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) + } + if !proto.Equal(&testpb.Empty{}, reply) { + grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) + } +} + +// DoLargeUnaryCall performs a unary RPC with large payload in the request and response. +func DoLargeUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + } + reply, err := tc.UnaryCall(context.Background(), req, args...) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { + grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + } +} + +// DoClientStreaming performs a client streaming RPC. +func DoClientStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.StreamingInputCall(context.Background(), args...) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + var sum int + for _, s := range reqSizes { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, s) + req := &testpb.StreamingInputCallRequest{ + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) + } + sum += s + } + reply, err := stream.CloseAndRecv() + if err != nil { + grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } +} + +// DoServerStreaming performs a server streaming RPC. +func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req, args...) + if err != nil { + grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + grpclog.Fatalf("Failed to finish the server streaming rpc: %v", rpcStatus) + } + if respCnt != len(respSizes) { + grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } +} + +// DoPingPong performs ping-pong style bi-directional streaming RPC. +func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.FullDuplexCall(context.Background(), args...) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) + } + reply, err := stream.Recv() + if err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + t := reply.GetPayload().GetType() + if t != testpb.PayloadType_COMPRESSABLE { + grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } +} + +// DoEmptyStream sets up a bi-directional streaming with zero message. +func DoEmptyStream(tc testpb.TestServiceClient, args ...grpc.CallOption) { + stream, err := tc.FullDuplexCall(context.Background(), args...) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) + } +} + +// DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. +func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + stream, err := tc.FullDuplexCall(ctx, args...) + if err != nil { + if grpc.Code(err) == codes.DeadlineExceeded { + return + } + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: pl, + } + if err := stream.Send(req); err != nil { + if grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Send(_) = %v", stream, err) + } + } + if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { + grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) + } +} + +// DoComputeEngineCreds performs a unary RPC with compute engine auth. +func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScope string) { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if user != serviceAccount { + grpclog.Fatalf("Got user name %q, want %q.", user, serviceAccount) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +func getServiceAccountJSONKey(keyFile string) []byte { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + grpclog.Fatalf("Failed to read the service account key file: %v", err) + } + return jsonKey +} + +// DoServiceAccountCreds performs a unary RPC with service account auth. +func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +// DoJWTTokenCreds performs a unary RPC with JWT token auth. +func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string) { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } +} + +// GetToken obtains an OAUTH token from the input. +func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + config, err := google.JWTConfigFromJSON(jsonKey, oauthScope) + if err != nil { + grpclog.Fatalf("Failed to get the config: %v", err) + } + token, err := config.TokenSource(context.Background()).Token() + if err != nil { + grpclog.Fatalf("Failed to get the token: %v", err) + } + return token +} + +// DoOauth2TokenCreds performs a unary RPC with OAUTH2 token auth. +func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +// DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. +func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { + jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeRespSize)), + Payload: pl, + FillUsername: proto.Bool(true), + FillOauthScope: proto.Bool(true), + } + token := GetToken(serviceAccountKeyFile, oauthScope) + kv := map[string]string{"authorization": token.Type() + " " + token.AccessToken} + ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) + reply, err := tc.UnaryCall(ctx, req) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + user := reply.GetUsername() + scope := reply.GetOauthScope() + if !strings.Contains(string(jsonKey), user) { + grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) + } + if !strings.Contains(oauthScope, scope) { + grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) + } +} + +var ( + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } +) + +// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. +func DoCancelAfterBegin(tc testpb.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), testMetadata)) + stream, err := tc.StreamingInputCall(ctx, args...) + if err != nil { + grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) + } + cancel() + _, err = stream.CloseAndRecv() + if grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } +} + +// DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. +func DoCancelAfterFirstResponse(tc testpb.TestServiceClient, args ...grpc.CallOption) { + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx, args...) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(req); err != nil { + grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + cancel() + if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { + grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) + } +} + +var ( + initialMetadataValue = "test_initial_metadata_value" + trailingMetadataValue = "\x0a\x0b\x0a\x0b\x0a\x0b" + customMetadata = metadata.Pairs( + initialMetadataKey, initialMetadataValue, + trailingMetadataKey, trailingMetadataValue, + ) +) + +func validateMetadata(header, trailer metadata.MD) { + if len(header[initialMetadataKey]) != 1 { + grpclog.Fatalf("Expected exactly one header from server. Received %d", len(header[initialMetadataKey])) + } + if header[initialMetadataKey][0] != initialMetadataValue { + grpclog.Fatalf("Got header %s; want %s", header[initialMetadataKey][0], initialMetadataValue) + } + if len(trailer[trailingMetadataKey]) != 1 { + grpclog.Fatalf("Expected exactly one trailer from server. Received %d", len(trailer[trailingMetadataKey])) + } + if trailer[trailingMetadataKey][0] != trailingMetadataValue { + grpclog.Fatalf("Got trailer %s; want %s", trailer[trailingMetadataKey][0], trailingMetadataValue) + } +} + +// DoCustomMetadata checks that metadata is echoed back to the client. +func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) { + // Testing with UnaryCall. + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(1)), + Payload: pl, + } + ctx := metadata.NewOutgoingContext(context.Background(), customMetadata) + var header, trailer metadata.MD + args = append(args, grpc.Header(&header), grpc.Trailer(&trailer)) + reply, err := tc.UnaryCall( + ctx, + req, + args..., + ) + if err != nil { + grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != 1 { + grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, 1) + } + validateMetadata(header, trailer) + + // Testing with FullDuplex. + stream, err := tc.FullDuplexCall(ctx, args...) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(1), + }, + } + streamReq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: pl, + } + if err := stream.Send(streamReq); err != nil { + grpclog.Fatalf("%v has error %v while sending %v", stream, err, streamReq) + } + streamHeader, err := stream.Header() + if err != nil { + grpclog.Fatalf("%v.Header() = %v", stream, err) + } + if _, err := stream.Recv(); err != nil { + grpclog.Fatalf("%v.Recv() = %v", stream, err) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != io.EOF { + grpclog.Fatalf("%v failed to complete the custom metadata test: %v", stream, err) + } + streamTrailer := stream.Trailer() + validateMetadata(streamHeader, streamTrailer) +} + +// DoStatusCodeAndMessage checks that the status code is propagated back to the client. +func DoStatusCodeAndMessage(tc testpb.TestServiceClient, args ...grpc.CallOption) { + var code int32 = 2 + msg := "test status message" + expectedErr := grpc.Errorf(codes.Code(code), msg) + respStatus := &testpb.EchoStatus{ + Code: proto.Int32(code), + Message: proto.String(msg), + } + // Test UnaryCall. + req := &testpb.SimpleRequest{ + ResponseStatus: respStatus, + } + if _, err := tc.UnaryCall(context.Background(), req, args...); err == nil || err.Error() != expectedErr.Error() { + grpclog.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr) + } + // Test FullDuplexCall. + stream, err := tc.FullDuplexCall(context.Background(), args...) + if err != nil { + grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + streamReq := &testpb.StreamingOutputCallRequest{ + ResponseStatus: respStatus, + } + if err := stream.Send(streamReq); err != nil { + grpclog.Fatalf("%v has error %v while sending %v, want ", stream, err, streamReq) + } + if err := stream.CloseSend(); err != nil { + grpclog.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } + if _, err = stream.Recv(); err.Error() != expectedErr.Error() { + grpclog.Fatalf("%v.Recv() returned error %v, want %v", stream, err, expectedErr) + } +} + +// DoUnimplementedService attempts to call a method from an unimplemented service. +func DoUnimplementedService(tc testpb.UnimplementedServiceClient) { + _, err := tc.UnimplementedCall(context.Background(), &testpb.Empty{}) + if grpc.Code(err) != codes.Unimplemented { + grpclog.Fatalf("%v.UnimplementedCall() = _, %v, want _, %v", tc, grpc.Code(err), codes.Unimplemented) + } +} + +// DoUnimplementedMethod attempts to call an unimplemented method. +func DoUnimplementedMethod(cc *grpc.ClientConn) { + var req, reply proto.Message + if err := grpc.Invoke(context.Background(), "/grpc.testing.TestService/UnimplementedCall", req, reply, cc); err == nil || grpc.Code(err) != codes.Unimplemented { + grpclog.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want error code %s", err, codes.Unimplemented) + } +} + +type testServer struct { +} + +// NewTestServer creates a test server for test service. +func NewTestServer() testpb.TestServiceServer { + return &testServer{} +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return new(testpb.Empty), nil +} + +func serverNewPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("payloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + status := in.GetResponseStatus() + if md, ok := metadata.FromIncomingContext(ctx); ok { + if initialMetadata, ok := md[initialMetadataKey]; ok { + header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) + grpc.SendHeader(ctx, header) + } + if trailingMetadata, ok := md[trailingMetadataKey]; ok { + trailer := metadata.Pairs(trailingMetadataKey, trailingMetadata[0]) + grpc.SetTrailer(ctx, trailer) + } + } + if status != nil && *status.Code != 0 { + return nil, grpc.Errorf(codes.Code(*status.Code), *status.Message) + } + pl, err := serverNewPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + return &testpb.SimpleResponse{ + Payload: pl, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + if md, ok := metadata.FromIncomingContext(stream.Context()); ok { + if initialMetadata, ok := md[initialMetadataKey]; ok { + header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) + stream.SendHeader(header) + } + if trailingMetadata, ok := md[trailingMetadataKey]; ok { + trailer := metadata.Pairs(trailingMetadataKey, trailingMetadata[0]) + stream.SetTrailer(trailer) + } + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + status := in.GetResponseStatus() + if status != nil && *status.Code != 0 { + return grpc.Errorf(codes.Code(*status.Code), *status.Message) + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + pl, err := serverNewPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: pl, + }); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..f8adc7e6d --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client runs keepalive checks even with no active RPCs. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server expects keepalive pings even when there are no active streams(RPCs). + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..ccfea5d45 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,137 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" +) + +// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, md) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdOutgoingKey{}).(MD) + return +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata_test.go b/vendor/google.golang.org/grpc/metadata/metadata_test.go new file mode 100644 index 000000000..210cbb267 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata_test.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package metadata + +import ( + "reflect" + "testing" +) + +func TestPairsMD(t *testing.T) { + for _, test := range []struct { + // input + kv []string + // output + md MD + }{ + {[]string{}, MD{}}, + {[]string{"k1", "v1", "k1", "v2"}, MD{"k1": []string{"v1", "v2"}}}, + } { + md := Pairs(test.kv...) + if !reflect.DeepEqual(md, test.md) { + t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) + } + } +} + +func TestCopy(t *testing.T) { + const key, val = "key", "val" + orig := Pairs(key, val) + copy := orig.Copy() + if !reflect.DeepEqual(orig, copy) { + t.Errorf("copied value not equal to the original, got %v, want %v", copy, orig) + } + orig[key][0] = "foo" + if v := copy[key][0]; v != val { + t.Errorf("change in original should not affect copy, got %q, want %q", v, val) + } +} + +func TestJoin(t *testing.T) { + for _, test := range []struct { + mds []MD + want MD + }{ + {[]MD{}, MD{}}, + {[]MD{Pairs("foo", "bar")}, Pairs("foo", "bar")}, + {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz")}, Pairs("foo", "bar", "foo", "baz")}, + {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz"), Pairs("zip", "zap")}, Pairs("foo", "bar", "foo", "baz", "zip", "zap")}, + } { + md := Join(test.mds...) + if !reflect.DeepEqual(md, test.want) { + t.Errorf("context's metadata is %v, want %v", md, test.want) + } + } +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 000000000..7e69a2ca0 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the adrress resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exisits until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver_test.go b/vendor/google.golang.org/grpc/naming/dns_resolver_test.go new file mode 100644 index 000000000..be1ac1aec --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver_test.go @@ -0,0 +1,315 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" +) + +func newUpdateWithMD(op Operation, addr, lb string) *Update { + return &Update{ + Op: op, + Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: lb}, + } +} + +func toMap(u []*Update) map[string]*Update { + m := make(map[string]*Update) + for _, v := range u { + m[v.Addr] = v + } + return m +} + +func TestCompileUpdate(t *testing.T) { + tests := []struct { + oldAddrs []string + newAddrs []string + want []*Update + }{ + { + []string{}, + []string{"1.0.0.1"}, + []*Update{{Op: Add, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.1"}, + []*Update{}, + }, + { + []string{"1.0.0.0"}, + []string{"1.0.0.1"}, + []*Update{{Op: Delete, Addr: "1.0.0.0"}, {Op: Add, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.0"}, + []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, + []*Update{{Op: Add, Addr: "1.0.0.2"}, {Op: Add, Addr: "1.0.0.3"}}, + }, + { + []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, + []string{"1.0.0.0"}, + []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}, {Op: Delete, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.3"}}, + }, + { + []string{"1.0.0.1", "1.0.0.3", "1.0.0.5"}, + []string{"1.0.0.2", "1.0.0.3", "1.0.0.6"}, + []*Update{{Op: Delete, Addr: "1.0.0.1"}, {Op: Add, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.5"}, {Op: Add, Addr: "1.0.0.6"}}, + }, + { + []string{"1.0.0.1", "1.0.0.1", "1.0.0.2"}, + []string{"1.0.0.1"}, + []*Update{{Op: Delete, Addr: "1.0.0.2"}}, + }, + } + + var w dnsWatcher + for _, c := range tests { + w.curAddrs = make(map[string]*Update) + newUpdates := make(map[string]*Update) + for _, a := range c.oldAddrs { + w.curAddrs[a] = &Update{Addr: a} + } + for _, a := range c.newAddrs { + newUpdates[a] = &Update{Addr: a} + } + r := w.compileUpdate(newUpdates) + if !reflect.DeepEqual(toMap(c.want), toMap(r)) { + t.Errorf("w(%+v).compileUpdate(%+v) = %+v, want %+v", c.oldAddrs, c.newAddrs, updatesToSlice(r), updatesToSlice(c.want)) + } + } +} + +func TestResolveFunc(t *testing.T) { + tests := []struct { + addr string + want error + }{ + // TODO(yuxuanli): More false cases? + {"www.google.com", nil}, + {"foo.bar:12345", nil}, + {"127.0.0.1", nil}, + {"127.0.0.1:12345", nil}, + {"[::1]:80", nil}, + {"[2001:db8:a0b:12f0::1]:21", nil}, + {":80", nil}, + {"127.0.0...1:12345", nil}, + {"[fe80::1%lo0]:80", nil}, + {"golang.org:http", nil}, + {"[2001:db8::1]:http", nil}, + {":", nil}, + {"", errMissingAddr}, + {"[2001:db8:a0b:12f0::1", fmt.Errorf("invalid target address %v", "[2001:db8:a0b:12f0::1")}, + } + + r, err := NewDNSResolver() + if err != nil { + t.Errorf("%v", err) + } + for _, v := range tests { + _, err := r.Resolve(v.addr) + if !reflect.DeepEqual(err, v.want) { + t.Errorf("Resolve(%q) = %v, want %v", v.addr, err, v.want) + } + } +} + +var hostLookupTbl = map[string][]string{ + "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, + "ipv4.single.fake": {"1.2.3.4"}, + "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, + "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, + "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, +} + +func hostLookup(host string) ([]string, error) { + if addrs, ok := hostLookupTbl[host]; ok { + return addrs, nil + } + return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host) +} + +var srvLookupTbl = map[string][]*net.SRV{ + "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, +} + +func srvLookup(service, proto, name string) (string, []*net.SRV, error) { + cname := "_" + service + "._" + proto + "." + name + if srvs, ok := srvLookupTbl[cname]; ok { + return cname, srvs, nil + } + return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname) +} + +func updatesToSlice(updates []*Update) []Update { + res := make([]Update, len(updates)) + for i, u := range updates { + res[i] = *u + } + return res +} + +func testResolver(t *testing.T, freq time.Duration, slp time.Duration) { + tests := []struct { + target string + want []*Update + }{ + { + "foo.bar.com", + []*Update{{Op: Add, Addr: "1.2.3.4" + colonDefaultPort}, {Op: Add, Addr: "5.6.7.8" + colonDefaultPort}}, + }, + { + "foo.bar.com:1234", + []*Update{{Op: Add, Addr: "1.2.3.4:1234"}, {Op: Add, Addr: "5.6.7.8:1234"}}, + }, + { + "srv.ipv4.single.fake", + []*Update{newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.single.fake")}, + }, + { + "srv.ipv4.multi.fake", + []*Update{ + newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.multi.fake"), + newUpdateWithMD(Add, "5.6.7.8:1234", "ipv4.multi.fake"), + newUpdateWithMD(Add, "9.10.11.12:1234", "ipv4.multi.fake")}, + }, + { + "srv.ipv6.single.fake", + []*Update{newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.single.fake")}, + }, + { + "srv.ipv6.multi.fake", + []*Update{ + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.multi.fake"), + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1002]:1234", "ipv6.multi.fake"), + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1003]:1234", "ipv6.multi.fake"), + }, + }, + } + + for _, a := range tests { + r, err := NewDNSResolverWithFreq(freq) + if err != nil { + t.Fatalf("%v\n", err) + } + w, err := r.Resolve(a.target) + if err != nil { + t.Fatalf("%v\n", err) + } + updates, err := w.Next() + if err != nil { + t.Fatalf("%v\n", err) + } + if !reflect.DeepEqual(toMap(a.want), toMap(updates)) { + t.Errorf("Resolve(%q) = %+v, want %+v\n", a.target, updatesToSlice(updates), updatesToSlice(a.want)) + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + _, err := w.Next() + if err != nil { + return + } + t.Error("Execution shouldn't reach here, since w.Next() should be blocked until close happen.") + } + }() + // Sleep for sometime to let watcher do more than one lookup + time.Sleep(slp) + w.Close() + wg.Wait() + } +} + +func TestResolve(t *testing.T) { + defer replaceNetFunc()() + testResolver(t, time.Millisecond*5, time.Millisecond*10) +} + +const colonDefaultPort = ":" + defaultPort + +func TestIPWatcher(t *testing.T) { + tests := []struct { + target string + want []*Update + }{ + {"127.0.0.1", []*Update{{Op: Add, Addr: "127.0.0.1" + colonDefaultPort}}}, + {"127.0.0.1:12345", []*Update{{Op: Add, Addr: "127.0.0.1:12345"}}}, + {"::1", []*Update{{Op: Add, Addr: "[::1]" + colonDefaultPort}}}, + {"[::1]:12345", []*Update{{Op: Add, Addr: "[::1]:12345"}}}, + {"[::1]:", []*Update{{Op: Add, Addr: "[::1]:443"}}}, + {"2001:db8:85a3::8a2e:370:7334", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]:12345", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, + {"[2001:db8::1]:http", []*Update{{Op: Add, Addr: "[2001:db8::1]:http"}}}, + // TODO(yuxuanli): zone support? + } + + for _, v := range tests { + r, err := NewDNSResolverWithFreq(time.Millisecond * 5) + if err != nil { + t.Fatalf("%v\n", err) + } + w, err := r.Resolve(v.target) + if err != nil { + t.Fatalf("%v\n", err) + } + var updates []*Update + var wg sync.WaitGroup + wg.Add(1) + count := 0 + go func() { + defer wg.Done() + for { + u, err := w.Next() + if err != nil { + return + } + updates = u + count++ + } + }() + // Sleep for sometime to let watcher do more than one lookup + time.Sleep(time.Millisecond * 10) + w.Close() + wg.Wait() + if !reflect.DeepEqual(v.want, updates) { + t.Errorf("Resolve(%q) = %v, want %+v\n", v.target, updatesToSlice(updates), updatesToSlice(v.want)) + } + if count != 1 { + t.Errorf("IPWatcher Next() should return only once, not %d times\n", count) + } + } +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 000000000..8bdf21e79 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go17_test.go b/vendor/google.golang.org/grpc/naming/go17_test.go new file mode 100644 index 000000000..d1de221a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17_test.go @@ -0,0 +1,42 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + } +} diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 000000000..b5a0f8427 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/go18_test.go b/vendor/google.golang.org/grpc/naming/go18_test.go new file mode 100644 index 000000000..5e297539b --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18_test.go @@ -0,0 +1,41 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "net" +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + } +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000..1af7e32f8 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..317b8b9d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..9085dbc9c --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper_test.go b/vendor/google.golang.org/grpc/picker_wrapper_test.go new file mode 100644 index 000000000..23bc8f243 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper_test.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/transport" +) + +const goroutineCount = 5 + +var ( + testT = &testTransport{} + testSC = &acBalancerWrapper{ac: &addrConn{ + state: connectivity.Ready, + transport: testT, + }} + testSCNotReady = &acBalancerWrapper{ac: &addrConn{ + state: connectivity.TransientFailure, + }} +) + +type testTransport struct { + transport.ClientTransport +} + +type testingPicker struct { + err error + sc balancer.SubConn + maxCalled int64 +} + +func (p *testingPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if atomic.AddInt64(&p.maxCalled, -1) < 0 { + return nil, nil, fmt.Errorf("Pick called to many times (> goroutineCount)") + } + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func TestBlockingPickTimeout(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, _, err := bp.pick(ctx, true, balancer.PickOptions{}); err != context.DeadlineExceeded { + t.Errorf("bp.pick returned error %v, want DeadlineExceeded", err) + } +} + +func TestBlockingPick(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + // All goroutines should block because picker is nil in bp. + var finishedCount uint64 + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(50 * time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickNoSubAvailable(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + var finishedCount uint64 + bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount}) + // All goroutines should block because picker returns no sc avilable. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(50 * time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickTransientWaitforready(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + bp.updatePicker(&testingPicker{err: balancer.ErrTransientFailure, maxCalled: goroutineCount}) + var finishedCount uint64 + // All goroutines should block because picker returns transientFailure and + // picks are not failfast. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), false, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickSCNotReady(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount}) + var finishedCount uint64 + // All goroutines should block because sc is not ready. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..7f993ef5a --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + } else { + b.sc.UpdateAddresses(addrs) + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc || s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/pickfirst_test.go b/vendor/google.golang.org/grpc/pickfirst_test.go new file mode 100644 index 000000000..e58b3422c --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst_test.go @@ -0,0 +1,352 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/test/leakcheck" +) + +func TestOneBackendPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + // The second RPC should succeed. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestBackendsPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestNewAddressWhileBlockingPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + }() + } + time.Sleep(50 * time.Millisecond) + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + wg.Wait() +} + +func TestCloseWithPendingRPCPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + _, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + }() + } + time.Sleep(50 * time.Millisecond) + cc.Close() + wg.Wait() +} + +func TestOneServerDownPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + + servers[0].stop() + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestAllServersDownPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + + for i := 0; i < numServers; i++ { + servers[i].stop() + } + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); Code(err) == codes.Unavailable { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err) +} + +func TestAddressesRemovedPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 3 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[0]. + r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Append server[0], nothing should change. + r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}}) + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[1]. + r.NewAddress([]resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[2].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[2]. + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 000000000..3e17efec6 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "golang.org/x/net/context" +) + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} diff --git a/vendor/google.golang.org/grpc/proxy_test.go b/vendor/google.golang.org/grpc/proxy_test.go new file mode 100644 index 000000000..39ee123cc --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy_test.go @@ -0,0 +1,182 @@ +// +build !race + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/test/leakcheck" +) + +const ( + envTestAddr = "1.2.3.4:8080" + envProxyAddr = "2.3.4.5:7687" +) + +// overwriteAndRestore overwrite function httpProxyFromEnvironment and +// returns a function to restore the default values. +func overwrite(hpfe func(req *http.Request) (*url.URL, error)) func() { + backHPFE := httpProxyFromEnvironment + httpProxyFromEnvironment = hpfe + return func() { + httpProxyFromEnvironment = backHPFE + } +} + +type proxyServer struct { + t *testing.T + lis net.Listener + in net.Conn + out net.Conn +} + +func (p *proxyServer) run() { + in, err := p.lis.Accept() + if err != nil { + return + } + p.in = in + + req, err := http.ReadRequest(bufio.NewReader(in)) + if err != nil { + p.t.Errorf("failed to read CONNECT req: %v", err) + return + } + if req.Method != http.MethodConnect || req.UserAgent() != grpcUA { + resp := http.Response{StatusCode: http.StatusMethodNotAllowed} + resp.Write(p.in) + p.in.Close() + p.t.Errorf("get wrong CONNECT req: %+v", req) + return + } + + out, err := net.Dial("tcp", req.URL.Host) + if err != nil { + p.t.Errorf("failed to dial to server: %v", err) + return + } + resp := http.Response{StatusCode: http.StatusOK, Proto: "HTTP/1.0"} + resp.Write(p.in) + p.out = out + go io.Copy(p.in, p.out) + go io.Copy(p.out, p.in) +} + +func (p *proxyServer) stop() { + p.lis.Close() + if p.in != nil { + p.in.Close() + } + if p.out != nil { + p.out.Close() + } +} + +func TestHTTPConnect(t *testing.T) { + defer leakcheck.Check(t) + plis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + p := &proxyServer{t: t, lis: plis} + go p.run() + defer p.stop() + + blis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + + msg := []byte{4, 3, 5, 2} + recvBuf := make([]byte, len(msg), len(msg)) + done := make(chan struct{}) + go func() { + in, err := blis.Accept() + if err != nil { + t.Errorf("failed to accept: %v", err) + return + } + defer in.Close() + in.Read(recvBuf) + close(done) + }() + + // Overwrite the function in the test and restore them in defer. + hpfe := func(req *http.Request) (*url.URL, error) { + return &url.URL{Host: plis.Addr().String()}, nil + } + defer overwrite(hpfe)() + + // Dial to proxy server. + dialer := newProxyDialer(func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return net.DialTimeout("tcp", addr, deadline.Sub(time.Now())) + } + return net.Dial("tcp", addr) + }) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + c, err := dialer(ctx, blis.Addr().String()) + if err != nil { + t.Fatalf("http connect Dial failed: %v", err) + } + defer c.Close() + + // Send msg on the connection. + c.Write(msg) + <-done + + // Check received msg. + if string(recvBuf) != string(msg) { + t.Fatalf("received msg: %v, want %v", recvBuf, msg) + } +} + +func TestMapAddressEnv(t *testing.T) { + defer leakcheck.Check(t) + // Overwrite the function in the test and restore them in defer. + hpfe := func(req *http.Request) (*url.URL, error) { + if req.URL.Host == envTestAddr { + return &url.URL{ + Scheme: "https", + Host: envProxyAddr, + }, nil + } + return nil, nil + } + defer overwrite(hpfe)() + + // envTestAddr should be handled by ProxyFromEnvironment. + got, err := mapAddress(context.Background(), envTestAddr) + if err != nil { + t.Error(err) + } + if got != envProxyAddr { + t.Errorf("want %v, got %v", envProxyAddr, got) + } +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 000000000..04b6371af --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 000000000..b40725327 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,763 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_reflection_v1alpha/reflection.proto + +/* +Package grpc_reflection_v1alpha is a generated protocol buffer package. + +It is generated from these files: + grpc_reflection_v1alpha/reflection.proto + +It has these top-level messages: + ServerReflectionRequest + ExtensionRequest + ServerReflectionResponse + FileDescriptorResponse + ExtensionNumberResponse + ListServiceResponse + ServiceResponse + ErrorResponse +*/ +package grpc_reflection_v1alpha + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + Host string `protobuf:"bytes,1,opt,name=host" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are valid to be assigned to MessageRequest: + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } +func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionRequest) ProtoMessage() {} +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,oneof"` +} +type ServerReflectionRequest_FileContainingSymbol struct { + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,oneof"` +} +type ServerReflectionRequest_FileContainingExtension struct { + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,oneof"` +} +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,oneof"` +} +type ServerReflectionRequest_ListServices struct { + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (m *ServerReflectionRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (m *ServerReflectionRequest) GetListServices() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionRequest_OneofMarshaler, _ServerReflectionRequest_OneofUnmarshaler, _ServerReflectionRequest_OneofSizer, []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } +} + +func _ServerReflectionRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileContainingExtension); err != nil { + return err + } + case *ServerReflectionRequest_AllExtensionNumbersOfType: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ListServices) + case nil: + default: + return fmt.Errorf("ServerReflectionRequest.MessageRequest has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionRequest) + switch tag { + case 3: // message_request.file_by_filename + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileByFilename{x} + return true, err + case 4: // message_request.file_containing_symbol + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{x} + return true, err + case 5: // message_request.file_containing_extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionRequest) + err := b.DecodeMessage(msg) + m.MessageRequest = &ServerReflectionRequest_FileContainingExtension{msg} + return true, err + case 6: // message_request.all_extension_numbers_of_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{x} + return true, err + case 7: // message_request.list_services + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_ListServices{x} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileByFilename))) + n += len(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileContainingSymbol))) + n += len(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + s := proto.Size(x.FileContainingExtension) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionRequest_AllExtensionNumbersOfType: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AllExtensionNumbersOfType))) + n += len(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ListServices))) + n += len(x.ListServices) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } +func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionRequest) ProtoMessage() {} +func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionRequest) GetContainingType() string { + if m != nil { + return m.ContainingType + } + return "" +} + +func (m *ExtensionRequest) GetExtensionNumber() int32 { + if m != nil { + return m.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The server set one of the following fields according to the message_request + // in the request. + // + // Types that are valid to be assigned to MessageResponse: + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } +func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionResponse) ProtoMessage() {} +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,oneof"` +} +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,oneof"` +} +type ServerReflectionResponse_ListServicesResponse struct { + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,oneof"` +} +type ServerReflectionResponse_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetValidHost() string { + if m != nil { + return m.ValidHost + } + return "" +} + +func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionResponse_OneofMarshaler, _ServerReflectionResponse_OneofUnmarshaler, _ServerReflectionResponse_OneofSizer, []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } +} + +func _ServerReflectionResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileDescriptorResponse); err != nil { + return err + } + case *ServerReflectionResponse_AllExtensionNumbersResponse: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AllExtensionNumbersResponse); err != nil { + return err + } + case *ServerReflectionResponse_ListServicesResponse: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListServicesResponse); err != nil { + return err + } + case *ServerReflectionResponse_ErrorResponse: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ErrorResponse); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerReflectionResponse.MessageResponse has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionResponse) + switch tag { + case 4: // message_response.file_descriptor_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileDescriptorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{msg} + return true, err + case 5: // message_response.all_extension_numbers_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionNumberResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{msg} + return true, err + case 6: // message_response.list_services_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListServiceResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ListServicesResponse{msg} + return true, err + case 7: // message_response.error_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ErrorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ErrorResponse{msg} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + s := proto.Size(x.FileDescriptorResponse) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_AllExtensionNumbersResponse: + s := proto.Size(x.AllExtensionNumbersResponse) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ListServicesResponse: + s := proto.Size(x.ListServicesResponse) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ErrorResponse: + s := proto.Size(x.ErrorResponse) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } +func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorResponse) ProtoMessage() {} +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if m != nil { + return m.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } +func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionNumberResponse) ProtoMessage() {} +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExtensionNumberResponse) GetBaseTypeName() string { + if m != nil { + return m.BaseTypeName + } + return "" +} + +func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if m != nil { + return m.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service" json:"service,omitempty"` +} + +func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } +func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceResponse) ProtoMessage() {} +func (*ListServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListServiceResponse) GetService() []*ServiceResponse { + if m != nil { + return m.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } +func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ServiceResponse) ProtoMessage() {} +func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ServiceResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` +} + +func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } +func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } +func (*ErrorResponse) ProtoMessage() {} +func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ErrorResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + +func (m *ErrorResponse) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") + proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") + proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") + proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") + proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") + proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") + proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") + proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ServerReflection service + +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc *grpc.ClientConn +} + +func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ServerReflection_serviceDesc.Streams[0], c.cc, "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for ServerReflection service + +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_reflection_v1alpha/reflection.proto", +} + +func init() { proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, + 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, + 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, + 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, + 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, + 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, + 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, + 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, + 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, + 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, + 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, + 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, + 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, + 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, + 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, + 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, + 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, + 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, + 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, + 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, + 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, + 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, + 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, + 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, + 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, + 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, + 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, + 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, + 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, + 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, + 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, + 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, + 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, + 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, + 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, + 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, + 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, + 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, + 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, + 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, + 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 000000000..c52ccc6ab --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,136 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server set one of the following fields according to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requst. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services request. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go new file mode 100644 index 000000000..5b0161885 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go @@ -0,0 +1,77 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto2.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + proto2.proto + proto2_ext.proto + proto2_ext2.proto + test.proto + +It has these top-level messages: + ToBeExtended + Extension + AnotherExtension + SearchResponse + SearchRequest +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ToBeExtended struct { + Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ToBeExtended) Reset() { *m = ToBeExtended{} } +func (m *ToBeExtended) String() string { return proto.CompactTextString(m) } +func (*ToBeExtended) ProtoMessage() {} +func (*ToBeExtended) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +var extRange_ToBeExtended = []proto.ExtensionRange{ + {10, 30}, +} + +func (*ToBeExtended) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ToBeExtended +} + +func (m *ToBeExtended) GetFoo() int32 { + if m != nil && m.Foo != nil { + return *m.Foo + } + return 0 +} + +func init() { + proto.RegisterType((*ToBeExtended)(nil), "grpc.testing.ToBeExtended") +} + +func init() { proto.RegisterFile("proto2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 86 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, + 0x99, 0x79, 0xe9, 0x4a, 0x6a, 0x5c, 0x3c, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, + 0x29, 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, + 0x41, 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x3c, 0x20, 0x00, 0x00, 0xff, 0xff, 0x74, 0x86, + 0x9c, 0x08, 0x44, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto new file mode 100644 index 000000000..a675d143d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto @@ -0,0 +1,22 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package grpc.testing; + +message ToBeExtended { + required int32 foo = 1; + extensions 10 to 30; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go new file mode 100644 index 000000000..9ae4f07af --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go @@ -0,0 +1,82 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto2_ext.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Extension struct { + Whatzit *int32 `protobuf:"varint,1,opt,name=whatzit" json:"whatzit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (m *Extension) String() string { return proto.CompactTextString(m) } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Extension) GetWhatzit() int32 { + if m != nil && m.Whatzit != nil { + return *m.Whatzit + } + return 0 +} + +var E_Foo = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*int32)(nil), + Field: 13, + Name: "grpc.testing.foo", + Tag: "varint,13,opt,name=foo", + Filename: "proto2_ext.proto", +} + +var E_Bar = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*Extension)(nil), + Field: 17, + Name: "grpc.testing.bar", + Tag: "bytes,17,opt,name=bar", + Filename: "proto2_ext.proto", +} + +var E_Baz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*SearchRequest)(nil), + Field: 19, + Name: "grpc.testing.baz", + Tag: "bytes,19,opt,name=baz", + Filename: "proto2_ext.proto", +} + +func init() { + proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") + proto.RegisterExtension(E_Foo) + proto.RegisterExtension(E_Bar) + proto.RegisterExtension(E_Baz) +} + +func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, + 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x29, 0x2e, 0x90, + 0x30, 0x84, 0xad, 0xa4, 0xca, 0xc5, 0xe9, 0x5a, 0x51, 0x92, 0x9a, 0x57, 0x9c, 0x99, 0x9f, 0x27, + 0x24, 0xc1, 0xc5, 0x5e, 0x9e, 0x91, 0x58, 0x52, 0x95, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, + 0x1a, 0x04, 0xe3, 0x5a, 0xe9, 0x70, 0x31, 0xa7, 0xe5, 0xe7, 0x0b, 0x49, 0xe9, 0x21, 0x1b, 0xab, + 0x17, 0x92, 0xef, 0x94, 0x0a, 0xd6, 0x9d, 0x92, 0x9a, 0x22, 0xc1, 0x0b, 0xd6, 0x01, 0x52, 0x66, + 0xe5, 0xca, 0xc5, 0x9c, 0x94, 0x58, 0x84, 0x57, 0xb5, 0xa0, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x38, + 0xaa, 0x0a, 0xb8, 0x4b, 0x82, 0x40, 0xfa, 0xad, 0x3c, 0x41, 0xc6, 0x54, 0xe1, 0x35, 0x46, 0x18, + 0x6c, 0x8c, 0x34, 0xaa, 0x8a, 0xe0, 0xd4, 0xc4, 0xa2, 0xe4, 0x8c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, + 0xe2, 0x12, 0x90, 0x51, 0x55, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x71, 0x6b, 0x94, 0x9f, 0x21, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto new file mode 100644 index 000000000..a4942e481 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto @@ -0,0 +1,30 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package grpc.testing; + +import "proto2.proto"; +import "test.proto"; + +extend ToBeExtended { + optional int32 foo = 13; + optional Extension bar = 17; + optional SearchRequest baz = 19; +} + +message Extension { + optional int32 whatzit = 1; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go new file mode 100644 index 000000000..26ec982aa --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto2_ext2.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AnotherExtension struct { + Whatchamacallit *int32 `protobuf:"varint,1,opt,name=whatchamacallit" json:"whatchamacallit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AnotherExtension) Reset() { *m = AnotherExtension{} } +func (m *AnotherExtension) String() string { return proto.CompactTextString(m) } +func (*AnotherExtension) ProtoMessage() {} +func (*AnotherExtension) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *AnotherExtension) GetWhatchamacallit() int32 { + if m != nil && m.Whatchamacallit != nil { + return *m.Whatchamacallit + } + return 0 +} + +var E_Frob = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*string)(nil), + Field: 23, + Name: "grpc.testing.frob", + Tag: "bytes,23,opt,name=frob", + Filename: "proto2_ext2.proto", +} + +var E_Nitz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtended)(nil), + ExtensionType: (*AnotherExtension)(nil), + Field: 29, + Name: "grpc.testing.nitz", + Tag: "bytes,29,opt,name=nitz", + Filename: "proto2_ext2.proto", +} + +func init() { + proto.RegisterType((*AnotherExtension)(nil), "grpc.testing.AnotherExtension") + proto.RegisterExtension(E_Frob) + proto.RegisterExtension(E_Nitz) +} + +func init() { proto.RegisterFile("proto2_ext2.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0x31, 0xd2, 0x03, 0xb3, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, + 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0x0a, 0x20, 0x72, 0x4a, 0x36, + 0x5c, 0x02, 0x8e, 0x79, 0xf9, 0x25, 0x19, 0xa9, 0x45, 0xae, 0x15, 0x25, 0xa9, 0x79, 0xc5, 0x99, + 0xf9, 0x79, 0x42, 0x1a, 0x5c, 0xfc, 0xe5, 0x19, 0x89, 0x25, 0xc9, 0x19, 0x89, 0xb9, 0x89, 0xc9, + 0x89, 0x39, 0x39, 0x99, 0x25, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0xe8, 0xc2, 0x56, 0x7a, + 0x5c, 0x2c, 0x69, 0x45, 0xf9, 0x49, 0x42, 0x52, 0x7a, 0xc8, 0x56, 0xe8, 0x85, 0xe4, 0x3b, 0xa5, + 0x82, 0x8d, 0x4b, 0x49, 0x4d, 0x91, 0x10, 0x57, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xab, 0xb3, 0xf2, + 0xe3, 0x62, 0xc9, 0xcb, 0x2c, 0xa9, 0xc2, 0xab, 0x5e, 0x56, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x0e, + 0x55, 0x05, 0xba, 0x1b, 0x83, 0xc0, 0xe6, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x7e, 0x0d, + 0x26, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto new file mode 100644 index 000000000..d91ba0061 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto @@ -0,0 +1,28 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package grpc.testing; + +import "proto2.proto"; + +extend ToBeExtended { + optional string frob = 23; + optional AnotherExtension nitz = 29; +} + +message AnotherExtension { + optional int32 whatchamacallit = 1; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go new file mode 100644 index 000000000..62f71ff15 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go @@ -0,0 +1,247 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test.proto + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SearchResponse struct { + Results []*SearchResponse_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *SearchResponse) GetResults() []*SearchResponse_Result { + if m != nil { + return m.Results + } + return nil +} + +type SearchResponse_Result struct { + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` +} + +func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } +func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } +func (*SearchResponse_Result) ProtoMessage() {} +func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +func (m *SearchResponse_Result) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *SearchResponse_Result) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *SearchResponse_Result) GetSnippets() []string { + if m != nil { + return m.Snippets + } + return nil +} + +type SearchRequest struct { + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +func (m *SearchRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func init() { + proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") + proto.RegisterType((*SearchResponse_Result)(nil), "grpc.testing.SearchResponse.Result") + proto.RegisterType((*SearchRequest)(nil), "grpc.testing.SearchRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SearchService service + +type SearchServiceClient interface { + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) +} + +type searchServiceClient struct { + cc *grpc.ClientConn +} + +func NewSearchServiceClient(cc *grpc.ClientConn) SearchServiceClient { + return &searchServiceClient{cc} +} + +func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := grpc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SearchService_serviceDesc.Streams[0], c.cc, "/grpc.testing.SearchService/StreamingSearch", opts...) + if err != nil { + return nil, err + } + x := &searchServiceStreamingSearchClient{stream} + return x, nil +} + +type SearchService_StreamingSearchClient interface { + Send(*SearchRequest) error + Recv() (*SearchResponse, error) + grpc.ClientStream +} + +type searchServiceStreamingSearchClient struct { + grpc.ClientStream +} + +func (x *searchServiceStreamingSearchClient) Send(m *SearchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchClient) Recv() (*SearchResponse, error) { + m := new(SearchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for SearchService service + +type SearchServiceServer interface { + Search(context.Context, *SearchRequest) (*SearchResponse, error) + StreamingSearch(SearchService_StreamingSearchServer) error +} + +func RegisterSearchServiceServer(s *grpc.Server, srv SearchServiceServer) { + s.RegisterService(&_SearchService_serviceDesc, srv) +} + +func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServiceServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.SearchService/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SearchService_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServiceServer).StreamingSearch(&searchServiceStreamingSearchServer{stream}) +} + +type SearchService_StreamingSearchServer interface { + Send(*SearchResponse) error + Recv() (*SearchRequest, error) + grpc.ServerStream +} + +type searchServiceStreamingSearchServer struct { + grpc.ServerStream +} + +func (x *searchServiceStreamingSearchServer) Send(m *SearchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchServer) Recv() (*SearchRequest, error) { + m := new(SearchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SearchService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.SearchService", + HandlerType: (*SearchServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _SearchService_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingSearch", + Handler: _SearchService_StreamingSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "test.proto", +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 231 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, + 0x10, 0x85, 0x59, 0x83, 0xd1, 0x3b, 0xfe, 0x32, 0x58, 0x84, 0x68, 0x11, 0xae, 0x08, 0xa9, 0x16, + 0xb9, 0xd6, 0x56, 0xb6, 0x16, 0xb2, 0x79, 0x82, 0x6b, 0x18, 0xe2, 0x42, 0x4c, 0x36, 0x33, 0x13, + 0xc1, 0x87, 0xb1, 0xf5, 0x39, 0x25, 0x59, 0x23, 0x0a, 0x62, 0x63, 0xb7, 0xe7, 0xe3, 0xcc, 0xb7, + 0xbb, 0x0c, 0x80, 0x92, 0xa8, 0x0d, 0xdc, 0x6b, 0x8f, 0x87, 0x0d, 0x87, 0xda, 0x4e, 0xc0, 0x77, + 0xcd, 0xfa, 0xcd, 0xc0, 0x71, 0x45, 0x5b, 0xae, 0x9f, 0x1c, 0x49, 0xe8, 0x3b, 0x21, 0xbc, 0x85, + 0x3d, 0x26, 0x19, 0x5b, 0x95, 0xcc, 0x14, 0x49, 0x79, 0xb0, 0xb9, 0xb4, 0xdf, 0x47, 0xec, 0xcf, + 0xba, 0x75, 0x73, 0xd7, 0x2d, 0x33, 0xf9, 0x3d, 0xa4, 0x11, 0xe1, 0x29, 0x24, 0x23, 0xb7, 0x99, + 0x29, 0x4c, 0xb9, 0x72, 0xd3, 0x11, 0xcf, 0x60, 0x57, 0xbd, 0xb6, 0x94, 0xed, 0xcc, 0x2c, 0x06, + 0xcc, 0x61, 0x5f, 0x3a, 0x1f, 0x02, 0xa9, 0x64, 0x49, 0x91, 0x94, 0x2b, 0xf7, 0x95, 0xd7, 0x57, + 0x70, 0xb4, 0xdc, 0x37, 0x8c, 0x24, 0x3a, 0x29, 0x86, 0x91, 0xf8, 0xf5, 0x53, 0x1b, 0xc3, 0xe6, + 0xdd, 0x2c, 0xbd, 0x8a, 0xf8, 0xc5, 0xd7, 0x84, 0x77, 0x90, 0x46, 0x80, 0xe7, 0xbf, 0x3f, 0x7f, + 0xd6, 0xe5, 0x17, 0x7f, 0xfd, 0x0d, 0x1f, 0xe0, 0xa4, 0x52, 0xa6, 0xed, 0xb3, 0xef, 0x9a, 0x7f, + 0xdb, 0x4a, 0x73, 0x6d, 0x1e, 0xd3, 0x79, 0x09, 0x37, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, + 0xd6, 0x09, 0xb8, 0x92, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto new file mode 100644 index 000000000..cae3f01a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto @@ -0,0 +1,35 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +message SearchResponse { + message Result { + string url = 1; + string title = 2; + repeated string snippets = 3; + } + repeated Result results = 1; +} + +message SearchRequest { + string query = 1; +} + +service SearchService { + rpc Search(SearchRequest) returns (SearchResponse); + rpc StreamingSearch(stream SearchRequest) returns (stream SearchResponse); +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go new file mode 100644 index 000000000..78876aae7 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. +// source: testv3.proto +// DO NOT EDIT! + +/* +Package grpc_testingv3 is a generated protocol buffer package. + +It is generated from these files: + testv3.proto + +It has these top-level messages: + SearchResponseV3 + SearchRequestV3 +*/ +package grpc_testingv3 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SearchResponseV3 struct { + Results []*SearchResponseV3_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *SearchResponseV3) Reset() { *m = SearchResponseV3{} } +func (m *SearchResponseV3) String() string { return proto.CompactTextString(m) } +func (*SearchResponseV3) ProtoMessage() {} +func (*SearchResponseV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SearchResponseV3) GetResults() []*SearchResponseV3_Result { + if m != nil { + return m.Results + } + return nil +} + +type SearchResponseV3_Result struct { + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` +} + +func (m *SearchResponseV3_Result) Reset() { *m = SearchResponseV3_Result{} } +func (m *SearchResponseV3_Result) String() string { return proto.CompactTextString(m) } +func (*SearchResponseV3_Result) ProtoMessage() {} +func (*SearchResponseV3_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type SearchRequestV3 struct { + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *SearchRequestV3) Reset() { *m = SearchRequestV3{} } +func (m *SearchRequestV3) String() string { return proto.CompactTextString(m) } +func (*SearchRequestV3) ProtoMessage() {} +func (*SearchRequestV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*SearchResponseV3)(nil), "grpc.testingv3.SearchResponseV3") + proto.RegisterType((*SearchResponseV3_Result)(nil), "grpc.testingv3.SearchResponseV3.Result") + proto.RegisterType((*SearchRequestV3)(nil), "grpc.testingv3.SearchRequestV3") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for SearchServiceV3 service + +type SearchServiceV3Client interface { + Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) + StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) +} + +type searchServiceV3Client struct { + cc *grpc.ClientConn +} + +func NewSearchServiceV3Client(cc *grpc.ClientConn) SearchServiceV3Client { + return &searchServiceV3Client{cc} +} + +func (c *searchServiceV3Client) Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) { + out := new(SearchResponseV3) + err := grpc.Invoke(ctx, "/grpc.testingv3.SearchServiceV3/Search", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchServiceV3Client) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SearchServiceV3_serviceDesc.Streams[0], c.cc, "/grpc.testingv3.SearchServiceV3/StreamingSearch", opts...) + if err != nil { + return nil, err + } + x := &searchServiceV3StreamingSearchClient{stream} + return x, nil +} + +type SearchServiceV3_StreamingSearchClient interface { + Send(*SearchRequestV3) error + Recv() (*SearchResponseV3, error) + grpc.ClientStream +} + +type searchServiceV3StreamingSearchClient struct { + grpc.ClientStream +} + +func (x *searchServiceV3StreamingSearchClient) Send(m *SearchRequestV3) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchServiceV3StreamingSearchClient) Recv() (*SearchResponseV3, error) { + m := new(SearchResponseV3) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for SearchServiceV3 service + +type SearchServiceV3Server interface { + Search(context.Context, *SearchRequestV3) (*SearchResponseV3, error) + StreamingSearch(SearchServiceV3_StreamingSearchServer) error +} + +func RegisterSearchServiceV3Server(s *grpc.Server, srv SearchServiceV3Server) { + s.RegisterService(&_SearchServiceV3_serviceDesc, srv) +} + +func _SearchServiceV3_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequestV3) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServiceV3Server).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testingv3.SearchServiceV3/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServiceV3Server).Search(ctx, req.(*SearchRequestV3)) + } + return interceptor(ctx, in, info, handler) +} + +func _SearchServiceV3_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServiceV3Server).StreamingSearch(&searchServiceV3StreamingSearchServer{stream}) +} + +type SearchServiceV3_StreamingSearchServer interface { + Send(*SearchResponseV3) error + Recv() (*SearchRequestV3, error) + grpc.ServerStream +} + +type searchServiceV3StreamingSearchServer struct { + grpc.ServerStream +} + +func (x *searchServiceV3StreamingSearchServer) Send(m *SearchResponseV3) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchServiceV3StreamingSearchServer) Recv() (*SearchRequestV3, error) { + m := new(SearchRequestV3) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SearchServiceV3_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testingv3.SearchServiceV3", + HandlerType: (*SearchServiceV3Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _SearchServiceV3_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingSearch", + Handler: _SearchServiceV3_StreamingSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("testv3.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x91, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x59, 0x83, 0xd1, 0x8e, 0x62, 0xcb, 0xe2, 0x21, 0xe4, 0x62, 0xe8, 0xa5, 0x39, 0x2d, + 0xd2, 0xfd, 0x05, 0x9e, 0xf5, 0xb4, 0x81, 0xe2, 0xb5, 0x86, 0x21, 0x2e, 0xc4, 0x64, 0x3b, 0x33, + 0x09, 0xf8, 0x7b, 0xfc, 0x13, 0xfe, 0x3c, 0x49, 0xd2, 0x08, 0x0a, 0xe2, 0xa5, 0xb7, 0x7d, 0x8f, + 0xf7, 0xbe, 0xe5, 0x31, 0x70, 0x2d, 0xc8, 0xd2, 0x5b, 0x13, 0xa8, 0x95, 0x56, 0xdf, 0x54, 0x14, + 0x4a, 0x33, 0x58, 0xbe, 0xa9, 0x7a, 0xbb, 0xfe, 0x50, 0xb0, 0x2a, 0x70, 0x4f, 0xe5, 0xab, 0x43, + 0x0e, 0x6d, 0xc3, 0xb8, 0xb3, 0xfa, 0x01, 0x2e, 0x08, 0xb9, 0xab, 0x85, 0x13, 0x95, 0x45, 0xf9, + 0xd5, 0x76, 0x63, 0x7e, 0xd6, 0xcc, 0xef, 0x8a, 0x71, 0x63, 0xde, 0xcd, 0xbd, 0xf4, 0x09, 0xe2, + 0xc9, 0xd2, 0x2b, 0x88, 0x3a, 0xaa, 0x13, 0x95, 0xa9, 0x7c, 0xe1, 0x86, 0xa7, 0xbe, 0x85, 0x73, + 0xf1, 0x52, 0x63, 0x72, 0x36, 0x7a, 0x93, 0xd0, 0x29, 0x5c, 0x72, 0xe3, 0x43, 0x40, 0xe1, 0x24, + 0xca, 0xa2, 0x7c, 0xe1, 0xbe, 0xf5, 0x7a, 0x03, 0xcb, 0xf9, 0xc7, 0x43, 0x87, 0x2c, 0x3b, 0x3b, + 0x40, 0x0e, 0x1d, 0xd2, 0xfb, 0x11, 0x3c, 0x89, 0xed, 0xa7, 0x9a, 0x93, 0x05, 0x52, 0xef, 0xcb, + 0x61, 0xcd, 0x23, 0xc4, 0x93, 0xa5, 0xef, 0xfe, 0x9a, 0x71, 0x84, 0xa6, 0xd9, 0x7f, 0x3b, 0xf5, + 0x33, 0x2c, 0x0b, 0x21, 0xdc, 0xbf, 0xf9, 0xa6, 0x3a, 0x19, 0x35, 0x57, 0xf7, 0xea, 0x25, 0x1e, + 0x0f, 0x64, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0xe6, 0xa0, 0xf9, 0xb0, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto new file mode 100644 index 000000000..1e175193e --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package grpc.testingv3; + +message SearchResponseV3 { + message Result { + string url = 1; + string title = 2; + repeated string snippets = 3; + } + repeated Result results = 1; +} + +message SearchRequestV3 { + string query = 1; +} + +service SearchServiceV3 { + rpc Search(SearchRequestV3) returns (SearchResponseV3); + rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 000000000..19bfc51e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,398 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) + +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +type serverReflectionServer struct { + s *grpc.Server + // TODO add more cache if necessary + serviceInfo map[string]grpc.ServiceInfo // cache for s.GetServiceInfo() +} + +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + +// protoMessage is used for type assertion on proto messages. +// Generated proto message implements function Descriptor(), but Descriptor() +// is not part of interface proto.Message. This interface is needed to +// call Descriptor(). +type protoMessage interface { + Descriptor() ([]byte, []int) +} + +// fileDescForType gets the file descriptor for the given type. +// The given type should be a proto message. +func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + enc, _ := m.Descriptor() + + return s.decodeFileDesc(enc) +} + +// decodeFileDesc does decompression and unmarshalling on the given +// file descriptor byte slice. +func (s *serverReflectionServer) decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { + raw, err := decompress(enc) + if err != nil { + return nil, fmt.Errorf("failed to decompress enc: %v", err) + } + + fd := new(dpb.FileDescriptorProto) + if err := proto.Unmarshal(raw, fd); err != nil { + return nil, fmt.Errorf("bad descriptor: %v", err) + } + return fd, nil +} + +// decompress does gzip decompression. +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + return out, nil +} + +func (s *serverReflectionServer) typeForName(name string) (reflect.Type, error) { + pt := proto.MessageType(name) + if pt == nil { + return nil, fmt.Errorf("unknown type: %q", name) + } + st := pt.Elem() + + return st, nil +} + +func (s *serverReflectionServer) fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + var extDesc *proto.ExtensionDesc + for id, desc := range proto.RegisteredExtensions(m) { + if id == ext { + extDesc = desc + break + } + } + + if extDesc == nil { + return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) + } + + return s.decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) +} + +func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + exts := proto.RegisteredExtensions(m) + out := make([]int32, 0, len(exts)) + for id := range exts { + out = append(out, id) + } + return out, nil +} + +// fileDescEncodingByFilename finds the file descriptor for given filename, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { + enc := proto.FileDescriptor(name) + if enc == nil { + return nil, fmt.Errorf("unknown file: %v", name) + } + fd, err := s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// serviceMetadataForSymbol finds the metadata for name in s.serviceInfo. +// name should be a service name or a method name. +func (s *serverReflectionServer) serviceMetadataForSymbol(name string) (interface{}, error) { + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + + // Check if it's a service name. + if info, ok := s.serviceInfo[name]; ok { + return info.Metadata, nil + } + + // Check if it's a method name. + pos := strings.LastIndex(name, ".") + // Not a valid method name. + if pos == -1 { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + info, ok := s.serviceInfo[name[:pos]] + // Substring before last "." is not a service name. + if !ok { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + // Search the method name in info.Methods. + var found bool + for _, m := range info.Methods { + if m.Name == name[pos+1:] { + found = true + break + } + } + if found { + return info.Metadata, nil + } + + return nil, fmt.Errorf("unknown symbol: %v", name) +} + +// parseMetadata finds the file descriptor bytes specified meta. +// For SupportPackageIsVersion4, m is the name of the proto file, we +// call proto.FileDescriptor to get the byte slice. +// For SupportPackageIsVersion3, m is a byte slice itself. +func parseMetadata(meta interface{}) ([]byte, bool) { + // Check if meta is the file name. + if fileNameForMeta, ok := meta.(string); ok { + return proto.FileDescriptor(fileNameForMeta), true + } + + // Check if meta is the byte slice. + if enc, ok := meta.([]byte); ok { + return enc, true + } + + return nil, false +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, +// does marshalling on it and returns the marshalled result. +// The given symbol can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { + var ( + fd *dpb.FileDescriptorProto + ) + // Check if it's a type name. + if st, err := s.typeForName(name); err == nil { + fd, err = s.fileDescForType(st) + if err != nil { + return nil, err + } + } else { // Check if it's a service name or a method name. + meta, err := s.serviceMetadataForSymbol(name) + + // Metadata not found. + if err != nil { + return nil, err + } + + // Metadata not valid. + enc, ok := parseMetadata(meta) + if !ok { + return nil, fmt.Errorf("invalid file descriptor for symbol: %v", name) + } + + fd, err = s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + } + + return proto.Marshal(fd) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing given extension, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { + st, err := s.typeForName(typeName) + if err != nil { + return nil, err + } + fd, err := s.fileDescContainingExtension(st, extNum) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + st, err := s.typeForName(name) + if err != nil { + return nil, err + } + extNums, err := s.allExtensionNumbersForType(st) + if err != nil { + return nil, err + } + return extNums, nil +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + b, err := s.fileDescEncodingByFilename(req.FileByFilename) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + serviceResponses := make([]*rpb.ServiceResponse, 0, len(s.serviceInfo)) + for n := range s.serviceInfo { + serviceResponses = append(serviceResponses, &rpb.ServiceResponse{ + Name: n, + }) + } + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: serviceResponses, + }, + } + default: + return grpc.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go new file mode 100644 index 000000000..908589085 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go @@ -0,0 +1,520 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing/ grpc_testing/proto2.proto grpc_testing/proto2_ext.proto grpc_testing/proto2_ext2.proto grpc_testing/test.proto + +// Note: grpc_testingv3/testv3.pb.go is not re-generated because it was +// intentionally generated by an older version of protoc-gen-go. + +package reflection + +import ( + "fmt" + "net" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "golang.org/x/net/context" + "google.golang.org/grpc" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + pb "google.golang.org/grpc/reflection/grpc_testing" + pbv3 "google.golang.org/grpc/reflection/grpc_testingv3" +) + +var ( + s = &serverReflectionServer{} + // fileDescriptor of each test proto file. + fdTest *dpb.FileDescriptorProto + fdTestv3 *dpb.FileDescriptorProto + fdProto2 *dpb.FileDescriptorProto + fdProto2Ext *dpb.FileDescriptorProto + fdProto2Ext2 *dpb.FileDescriptorProto + // fileDescriptor marshalled. + fdTestByte []byte + fdTestv3Byte []byte + fdProto2Byte []byte + fdProto2ExtByte []byte + fdProto2Ext2Byte []byte +) + +func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { + enc := proto.FileDescriptor(filename) + if enc == nil { + panic(fmt.Sprintf("failed to find fd for file: %v", filename)) + } + fd, err := s.decodeFileDesc(enc) + if err != nil { + panic(fmt.Sprintf("failed to decode enc: %v", err)) + } + b, err := proto.Marshal(fd) + if err != nil { + panic(fmt.Sprintf("failed to marshal fd: %v", err)) + } + return fd, b +} + +func init() { + fdTest, fdTestByte = loadFileDesc("test.proto") + fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") + fdProto2, fdProto2Byte = loadFileDesc("proto2.proto") + fdProto2Ext, fdProto2ExtByte = loadFileDesc("proto2_ext.proto") + fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("proto2_ext2.proto") +} + +func TestFileDescForType(t *testing.T) { + for _, test := range []struct { + st reflect.Type + wantFd *dpb.FileDescriptorProto + }{ + {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, + {reflect.TypeOf(pb.ToBeExtended{}), fdProto2}, + } { + fd, err := s.fileDescForType(test.st) + if err != nil || !proto.Equal(fd, test.wantFd) { + t.Errorf("fileDescForType(%q) = %q, %v, want %q, ", test.st, fd, err, test.wantFd) + } + } +} + +func TestTypeForName(t *testing.T) { + for _, test := range []struct { + name string + want reflect.Type + }{ + {"grpc.testing.SearchResponse", reflect.TypeOf(pb.SearchResponse{})}, + } { + r, err := s.typeForName(test.name) + if err != nil || r != test.want { + t.Errorf("typeForName(%q) = %q, %v, want %q, ", test.name, r, err, test.want) + } + } +} + +func TestTypeForNameNotFound(t *testing.T) { + for _, test := range []string{ + "grpc.testing.not_exiting", + } { + _, err := s.typeForName(test) + if err == nil { + t.Errorf("typeForName(%q) = _, %v, want _, ", test, err) + } + } +} + +func TestFileDescContainingExtension(t *testing.T) { + for _, test := range []struct { + st reflect.Type + extNum int32 + want *dpb.FileDescriptorProto + }{ + {reflect.TypeOf(pb.ToBeExtended{}), 13, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 17, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 19, fdProto2Ext}, + {reflect.TypeOf(pb.ToBeExtended{}), 23, fdProto2Ext2}, + {reflect.TypeOf(pb.ToBeExtended{}), 29, fdProto2Ext2}, + } { + fd, err := s.fileDescContainingExtension(test.st, test.extNum) + if err != nil || !proto.Equal(fd, test.want) { + t.Errorf("fileDescContainingExtension(%q) = %q, %v, want %q, ", test.st, fd, err, test.want) + } + } +} + +// intArray is used to sort []int32 +type intArray []int32 + +func (s intArray) Len() int { return len(s) } +func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s intArray) Less(i, j int) bool { return s[i] < s[j] } + +func TestAllExtensionNumbersForType(t *testing.T) { + for _, test := range []struct { + st reflect.Type + want []int32 + }{ + {reflect.TypeOf(pb.ToBeExtended{}), []int32{13, 17, 19, 23, 29}}, + } { + r, err := s.allExtensionNumbersForType(test.st) + sort.Sort(intArray(r)) + if err != nil || !reflect.DeepEqual(r, test.want) { + t.Errorf("allExtensionNumbersForType(%q) = %v, %v, want %v, ", test.st, r, err, test.want) + } + } +} + +// Do end2end tests. + +type server struct{} + +func (s *server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.SearchResponse, error) { + return &pb.SearchResponse{}, nil +} + +func (s *server) StreamingSearch(stream pb.SearchService_StreamingSearchServer) error { + return nil +} + +type serverV3 struct{} + +func (s *serverV3) Search(ctx context.Context, in *pbv3.SearchRequestV3) (*pbv3.SearchResponseV3, error) { + return &pbv3.SearchResponseV3{}, nil +} + +func (s *serverV3) StreamingSearch(stream pbv3.SearchServiceV3_StreamingSearchServer) error { + return nil +} + +func TestReflectionEnd2end(t *testing.T) { + // Start server. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterSearchServiceServer(s, &server{}) + pbv3.RegisterSearchServiceV3Server(s, &serverV3{}) + // Register reflection service on s. + Register(s) + go s.Serve(lis) + + // Create client. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("cannot connect to server: %v", err) + } + defer conn.Close() + + c := rpb.NewServerReflectionClient(conn) + stream, err := c.ServerReflectionInfo(context.Background(), grpc.FailFast(false)) + if err != nil { + t.Fatalf("cannot get ServerReflectionInfo: %v", err) + } + + testFileByFilename(t, stream) + testFileByFilenameError(t, stream) + testFileContainingSymbol(t, stream) + testFileContainingSymbolError(t, stream) + testFileContainingExtension(t, stream) + testFileContainingExtensionError(t, stream) + testAllExtensionNumbersOfType(t, stream) + testAllExtensionNumbersOfTypeError(t, stream) + testListServices(t, stream) + + s.Stop() +} + +func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + filename string + want []byte + }{ + {"test.proto", fdTestByte}, + {"proto2.proto", fdProto2Byte}, + {"proto2_ext.proto", fdProto2ExtByte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: test.filename, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileByFilename(%v) = %v, want type ", test.filename, r.MessageResponse) + } + } +} + +func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "test.poto", + "proo2.proto", + "proto2_et.proto", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + symbol string + want []byte + }{ + {"grpc.testing.SearchService", fdTestByte}, + {"grpc.testing.SearchService.Search", fdTestByte}, + {"grpc.testing.SearchService.StreamingSearch", fdTestByte}, + {"grpc.testing.SearchResponse", fdTestByte}, + {"grpc.testing.ToBeExtended", fdProto2Byte}, + // Test support package v3. + {"grpc.testingv3.SearchServiceV3", fdTestv3Byte}, + {"grpc.testingv3.SearchServiceV3.Search", fdTestv3Byte}, + {"grpc.testingv3.SearchServiceV3.StreamingSearch", fdTestv3Byte}, + {"grpc.testingv3.SearchResponseV3", fdTestv3Byte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: test.symbol, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileContainingSymbol(%v) = %v, want type ", test.symbol, r.MessageResponse) + } + } +} + +func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "grpc.testing.SerchService", + "grpc.testing.SearchService.SearchE", + "grpc.tesing.SearchResponse", + "gpc.testing.ToBeExtended", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + extNum int32 + want []byte + }{ + {"grpc.testing.ToBeExtended", 13, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 17, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 19, fdProto2ExtByte}, + {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, + {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &rpb.ExtensionRequest{ + ContainingType: test.typeName, + ExtensionNumber: test.extNum, + }, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_FileDescriptorResponse: + if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { + t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) + } + default: + t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) + } + } +} + +func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + extNum int32 + }{ + {"grpc.testing.ToBExtended", 17}, + {"grpc.testing.ToBeExtended", 15}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &rpb.ExtensionRequest{ + ContainingType: test.typeName, + ExtensionNumber: test.extNum, + }, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) + } + } +} + +func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []struct { + typeName string + want []int32 + }{ + {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: test.typeName, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_AllExtensionNumbersResponse: + extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber + sort.Sort(intArray(extNum)) + if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || + !reflect.DeepEqual(extNum, test.want) { + t.Errorf("AllExtensionNumbersOfType(%v)\nreceived: %v,\nwant: {%q %v}", r.GetAllExtensionNumbersResponse(), test.typeName, test.typeName, test.want) + } + default: + t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test.typeName, r.MessageResponse) + } + } +} + +func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + for _, test := range []string{ + "grpc.testing.ToBeExtendedE", + } { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: test, + }, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ErrorResponse: + default: + t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) + } + } +} + +func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&rpb.ServerReflectionRequest{ + MessageRequest: &rpb.ServerReflectionRequest_ListServices{}, + }); err != nil { + t.Fatalf("failed to send request: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + + switch r.MessageResponse.(type) { + case *rpb.ServerReflectionResponse_ListServicesResponse: + services := r.GetListServicesResponse().Service + want := []string{ + "grpc.testingv3.SearchServiceV3", + "grpc.testing.SearchService", + "grpc.reflection.v1alpha.ServerReflection", + } + // Compare service names in response with want. + if len(services) != len(want) { + t.Errorf("= %v, want service names: %v", services, want) + } + m := make(map[string]int) + for _, e := range services { + m[e.Name]++ + } + for _, e := range want { + if m[e] > 0 { + m[e]-- + continue + } + t.Errorf("ListService\nreceived: %v,\nwant: %q", services, want) + } + default: + t.Errorf("ListServices = %v, want type ", r.MessageResponse) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..f31bbb60f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,379 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("missing address") +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(string(sc)) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + var newAddrs []resolver.Address + newAddrs = d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + if r.Intn(100)+1 > *a { + return false + } + return true +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go new file mode 100644 index 000000000..590791168 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go @@ -0,0 +1,898 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "fmt" + "net" + "os" + "reflect" + "sync" + "testing" + "time" + + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/test/leakcheck" +) + +func TestMain(m *testing.M) { + cleanup := replaceNetFunc() + code := m.Run() + cleanup() + os.Exit(code) +} + +const ( + txtBytesLimit = 255 +) + +type testClientConn struct { + target string + m1 sync.Mutex + addrs []resolver.Address + a int + m2 sync.Mutex + sc string + s int +} + +func (t *testClientConn) NewAddress(addresses []resolver.Address) { + t.m1.Lock() + defer t.m1.Unlock() + t.addrs = addresses + t.a++ +} + +func (t *testClientConn) getAddress() ([]resolver.Address, int) { + t.m1.Lock() + defer t.m1.Unlock() + return t.addrs, t.a +} + +func (t *testClientConn) NewServiceConfig(serviceConfig string) { + t.m2.Lock() + defer t.m2.Unlock() + t.sc = serviceConfig + t.s++ +} + +func (t *testClientConn) getSc() (string, int) { + t.m2.Lock() + defer t.m2.Unlock() + return t.sc, t.s +} + +var hostLookupTbl = struct { + sync.Mutex + tbl map[string][]string +}{ + tbl: map[string][]string{ + "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, + "ipv4.single.fake": {"1.2.3.4"}, + "srv.ipv4.single.fake": {"2.4.6.8"}, + "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, + "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, + "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, + }, +} + +func hostLookup(host string) ([]string, error) { + hostLookupTbl.Lock() + defer hostLookupTbl.Unlock() + if addrs, cnt := hostLookupTbl.tbl[host]; cnt { + return addrs, nil + } + return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host) +} + +var srvLookupTbl = struct { + sync.Mutex + tbl map[string][]*net.SRV +}{ + tbl: map[string][]*net.SRV{ + "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, + }, +} + +func srvLookup(service, proto, name string) (string, []*net.SRV, error) { + cname := "_" + service + "._" + proto + "." + name + srvLookupTbl.Lock() + defer srvLookupTbl.Unlock() + if srvs, cnt := srvLookupTbl.tbl[cname]; cnt { + return cname, srvs, nil + } + return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname) +} + +// div divides a byte slice into a slice of strings, each of which is of maximum +// 255 bytes length, which is the length limit per TXT record in DNS. +func div(b []byte) []string { + var r []string + for i := 0; i < len(b); i += txtBytesLimit { + if i+txtBytesLimit > len(b) { + r = append(r, string(b[i:])) + } else { + r = append(r, string(b[i:i+txtBytesLimit])) + } + } + return r +} + +// scfs contains an array of service config file string in JSON format. +// Notes about the scfs contents and usage: +// scfs contains 4 service config file JSON strings for testing. Inside each +// service config file, there are multiple choices. scfs[0:3] each contains 5 +// choices, and first 3 choices are nonmatching choices based on canarying rule, +// while the last two are matched choices. scfs[3] only contains 3 choices, and +// all of them are nonmatching based on canarying rule. For each of scfs[0:3], +// the eventually returned service config, which is from the first of the two +// matched choices, is stored in the corresponding scs element (e.g. +// scfs[0]->scs[0]). scfs and scs elements are used in pair to test the dns +// resolver functionality, with scfs as the input and scs used for validation of +// the output. For scfs[3], it corresponds to empty service config, since there +// isn't a matched choice. +var ( + scfs = []string{ + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "methodConfig": [ + { + "name": [ + { + "method": "bar" + } + ], + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true, + "timeout": "1s", + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo" + } + ], + "waitForReady": true, + "timeout": "1s" + }, + { + "name": [ + { + "service": "bar" + } + ], + "waitForReady": false + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + } +]`, + } +) + +// scs contains an array of service config string in JSON format. +var ( + scs = []string{ + `{ + "methodConfig": [ + { + "name": [ + { + "method": "bar" + } + ], + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + }`, + `{ + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true, + "timeout": "1s", + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + }`, + `{ + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo" + } + ], + "waitForReady": true, + "timeout": "1s" + }, + { + "name": [ + { + "service": "bar" + } + ], + "waitForReady": false + } + ] + }`, + } +) + +// scLookupTbl is a set, which contains targets that have service config. Target +// not in this set should not have service config. +var scLookupTbl = map[string]bool{ + "foo.bar.com": true, + "srv.ipv4.single.fake": true, + "srv.ipv4.multi.fake": true, + "no.attribute": true, +} + +// generateSCF generates a slice of strings (aggregately representing a single +// service config file) for the input name, which mocks the result from a real +// DNS TXT record lookup. +func generateSCF(name string) []string { + var b []byte + switch name { + case "foo.bar.com": + b = []byte(scfs[0]) + case "srv.ipv4.single.fake": + b = []byte(scfs[1]) + case "srv.ipv4.multi.fake": + b = []byte(scfs[2]) + default: + b = []byte(scfs[3]) + } + if name == "no.attribute" { + return div(b) + } + return div(append([]byte(txtAttribute), b...)) +} + +// generateSC returns a service config string in JSON format for the input name. +func generateSC(name string) string { + _, cnt := scLookupTbl[name] + if !cnt || name == "no.attribute" { + return "" + } + switch name { + case "foo.bar.com": + return scs[0] + case "srv.ipv4.single.fake": + return scs[1] + case "srv.ipv4.multi.fake": + return scs[2] + default: + return "" + } +} + +var txtLookupTbl = struct { + sync.Mutex + tbl map[string][]string +}{ + tbl: map[string][]string{ + "foo.bar.com": generateSCF("foo.bar.com"), + "srv.ipv4.single.fake": generateSCF("srv.ipv4.single.fake"), + "srv.ipv4.multi.fake": generateSCF("srv.ipv4.multi.fake"), + "srv.ipv6.single.fake": generateSCF("srv.ipv6.single.fake"), + "srv.ipv6.multi.fake": generateSCF("srv.ipv6.multi.fake"), + "no.attribute": generateSCF("no.attribute"), + }, +} + +func txtLookup(host string) ([]string, error) { + txtLookupTbl.Lock() + defer txtLookupTbl.Unlock() + if scs, cnt := txtLookupTbl.tbl[host]; cnt { + return scs, nil + } + return nil, fmt.Errorf("failed to lookup TXT:%s resolution in txtLookupTbl", host) +} + +func TestResolve(t *testing.T) { + testDNSResolver(t) + testDNSResolveNow(t) + testIPResolver(t) +} + +func testDNSResolver(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + addrWant []resolver.Address + scWant string + }{ + { + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + generateSC("foo.bar.com"), + }, + { + "foo.bar.com:1234", + []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, + generateSC("foo.bar.com"), + }, + { + "srv.ipv4.single.fake", + []resolver.Address{{Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.single.fake"}, {Addr: "2.4.6.8" + colonDefaultPort}}, + generateSC("srv.ipv4.single.fake"), + }, + { + "srv.ipv4.multi.fake", + []resolver.Address{ + {Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + {Addr: "5.6.7.8:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + {Addr: "9.10.11.12:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + }, + generateSC("srv.ipv4.multi.fake"), + }, + { + "srv.ipv6.single.fake", + []resolver.Address{{Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.single.fake"}}, + generateSC("srv.ipv6.single.fake"), + }, + { + "srv.ipv6.multi.fake", + []resolver.Address{ + {Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1002]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1003]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + }, + generateSC("srv.ipv6.multi.fake"), + }, + { + "no.attribute", + nil, + generateSC("no.attribute"), + }, + } + + for _, a := range tests { + b := NewBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + var sc string + for { + sc, cnt = cc.getSc() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrWant, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrWant) + } + if !reflect.DeepEqual(a.scWant, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) + } + r.Close() + } +} + +func mutateTbl(target string) func() { + hostLookupTbl.Lock() + oldHostTblEntry := hostLookupTbl.tbl[target] + hostLookupTbl.tbl[target] = hostLookupTbl.tbl[target][:len(oldHostTblEntry)-1] + hostLookupTbl.Unlock() + txtLookupTbl.Lock() + oldTxtTblEntry := txtLookupTbl.tbl[target] + txtLookupTbl.tbl[target] = []string{""} + txtLookupTbl.Unlock() + + return func() { + hostLookupTbl.Lock() + hostLookupTbl.tbl[target] = oldHostTblEntry + hostLookupTbl.Unlock() + txtLookupTbl.Lock() + txtLookupTbl.tbl[target] = oldTxtTblEntry + txtLookupTbl.Unlock() + } +} + +func testDNSResolveNow(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + addrWant []resolver.Address + addrNext []resolver.Address + scWant string + scNext string + }{ + { + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}}, + generateSC("foo.bar.com"), + "", + }, + } + + for _, a := range tests { + b := NewBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + var sc string + for { + sc, cnt = cc.getSc() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrWant, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrWant) + } + if !reflect.DeepEqual(a.scWant, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) + } + revertTbl := mutateTbl(a.target) + r.ResolveNow(resolver.ResolveNowOption{}) + for { + addrs, cnt = cc.getAddress() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + for { + sc, cnt = cc.getSc() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrNext, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrNext) + } + if !reflect.DeepEqual(a.scNext, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scNext) + } + revertTbl() + r.Close() + } +} + +const colonDefaultPort = ":" + defaultPort + +func testIPResolver(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + want []resolver.Address + }{ + {"127.0.0.1", []resolver.Address{{Addr: "127.0.0.1" + colonDefaultPort}}}, + {"127.0.0.1:12345", []resolver.Address{{Addr: "127.0.0.1:12345"}}}, + {"::1", []resolver.Address{{Addr: "[::1]" + colonDefaultPort}}}, + {"[::1]:12345", []resolver.Address{{Addr: "[::1]:12345"}}}, + {"[::1]:", []resolver.Address{{Addr: "[::1]:443"}}}, + {"2001:db8:85a3::8a2e:370:7334", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]:12345", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, + {"[2001:db8::1]:http", []resolver.Address{{Addr: "[2001:db8::1]:http"}}}, + // TODO(yuxuanli): zone support? + } + + for _, v := range tests { + b := NewBuilder() + cc := &testClientConn{target: v.target} + r, err := b.Build(resolver.Target{Endpoint: v.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(v.want, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", v.target, addrs, v.want) + } + r.ResolveNow(resolver.ResolveNowOption{}) + for { + addrs, cnt = cc.getAddress() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(v.want, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", v.target, addrs, v.want) + } + r.Close() + } +} + +func TestResolveFunc(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + addr string + want error + }{ + // TODO(yuxuanli): More false cases? + {"www.google.com", nil}, + {"foo.bar:12345", nil}, + {"127.0.0.1", nil}, + {"127.0.0.1:12345", nil}, + {"[::1]:80", nil}, + {"[2001:db8:a0b:12f0::1]:21", nil}, + {":80", nil}, + {"127.0.0...1:12345", nil}, + {"[fe80::1%lo0]:80", nil}, + {"golang.org:http", nil}, + {"[2001:db8::1]:http", nil}, + {":", nil}, + {"", errMissingAddr}, + {"[2001:db8:a0b:12f0::1", errForInvalidTarget}, + } + + b := NewBuilder() + for _, v := range tests { + cc := &testClientConn{target: v.addr} + r, err := b.Build(resolver.Target{Endpoint: v.addr}, cc, resolver.BuildOption{}) + if err == nil { + r.Close() + } + if !reflect.DeepEqual(err, v.want) { + t.Errorf("Build(%q, cc, resolver.BuildOption{}) = %v, want %v", v.addr, err, v.want) + } + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 000000000..b466bc8f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17_test.go b/vendor/google.golang.org/grpc/resolver/dns/go17_test.go new file mode 100644 index 000000000..07fdcb03f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17_test.go @@ -0,0 +1,52 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "fmt" + "net" + + "golang.org/x/net/context" +) + +var ( + errForInvalidTarget = fmt.Errorf("invalid target address [2001:db8:a0b:12f0::1, error info: missing ']' in address [2001:db8:a0b:12f0::1:443") +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + oldLookupTXT := lookupTXT + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + lookupTXT = func(ctx context.Context, host string) ([]string, error) { + return txtLookup(host) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + lookupTXT = oldLookupTXT + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 000000000..fa34f14ca --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18_test.go b/vendor/google.golang.org/grpc/resolver/dns/go18_test.go new file mode 100644 index 000000000..8e016709c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18_test.go @@ -0,0 +1,51 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "context" + "fmt" + "net" +) + +var ( + errForInvalidTarget = fmt.Errorf("invalid target address [2001:db8:a0b:12f0::1, error info: address [2001:db8:a0b:12f0::1:443: missing ']' in address") +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + oldLookupTXT := lookupTXT + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + lookupTXT = func(ctx context.Context, host string) ([]string, error) { + return txtLookup(host) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + lookupTXT = oldLookupTXT + } +} diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 000000000..de2fb25b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual contains a resolver for testing purpose only. +package manual + +import ( + "strconv" + "time" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + scheme string + + // Fields actually belong to the resolver. + cc resolver.ClientConn +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r.cc = cc + return r, nil +} + +// Scheme returns the test scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {} + +// Close is a noop for Resolver. +func (*Resolver) Close() {} + +// NewAddress calls cc.NewAddress. +func (r *Resolver) NewAddress(addrs []resolver.Address) { + r.cc.NewAddress(addrs) +} + +// NewServiceConfig calls cc.NewServiceConfig. +func (r *Resolver) NewServiceConfig(sc string) { + r.cc.NewServiceConfig(sc) +} + +// GenerateAndRegisterManualResolver generates a random scheme and a Resolver +// with it. It also regieter this Resolver. +// It returns the Resolver and a cleanup function to unregister it. +func GenerateAndRegisterManualResolver() (*Resolver, func()) { + scheme := strconv.FormatInt(time.Now().UnixNano(), 36) + r := NewBuilderWithScheme(scheme) + resolver.Register(r) + return r, func() { resolver.UnregisterForTesting(scheme) } +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..4d4420aa4 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. It's for gRPC internal +// test only. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..49307e8fe --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme string +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. +// b.Scheme will be used as the scheme registered with this builder. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "dns" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + if b, ok := m[defaultScheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "dns". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // It's the name of the grpc load balancer, which will be used for authentication. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name again. + // It's just a hint, resolver can ignore this if it's not necessary. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..7d53964d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,139 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns "", s instead. +func split2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", s + } + return spl[0], spl[1] +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + ret.Scheme, ret.Endpoint = split2(target, "://") + ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +// +// This function could return nil, nil, in tests for old behaviors. +// TODO(bar) never return nil, nil when DNS becomes the default resolver. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + target := parseTarget(cc.target) + grpclog.Infof("dialing to target with scheme: %q", target.Scheme) + + rb := resolver.Get(target.Scheme) + if rb == nil { + // TODO(bar) return error when DNS becomes the default (implemented and + // registered by DNS package). + grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) + return nil, nil + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) + // TODO(bar switching) this should never be nil. Pickfirst should be default. + if ccr.cc.balancerWrapper != nil { + // TODO(bar switching) create balancer if it's nil? + ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) + } + case sc := <-ccr.scCh: + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go new file mode 100644 index 000000000..024942301 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "testing" + + "google.golang.org/grpc/resolver" +) + +func TestParseTarget(t *testing.T) { + for _, test := range []resolver.Target{ + {"", "", ""}, + {"a", "", ""}, + {"", "a", ""}, + {"", "", "a"}, + {"a", "b", ""}, + {"a", "", "b"}, + {"", "a", "b"}, + {"a", "b", "c"}, + {"dns", "a.server.com", "google.com"}, + {"dns", "a.server.com", "google.com"}, + {"dns", "a.server.com", "google.com/?a=b"}, + } { + str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint + got := parseTarget(str) + if got != test { + t.Errorf("parseTarget(%q) = %v, want %v", str, got, test) + } + } +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..d5f142d82 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,572 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + stdctx "context" + "encoding/binary" + "io" + "io/ioutil" + "math" + "os" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials +} + +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +type beforeCall func(c *callInfo) error + +func (o beforeCall) before(c *callInfo) error { return o(c) } +func (o beforeCall) after(c *callInfo) {} + +type afterCall func(c *callInfo) + +func (o afterCall) before(c *callInfo) error { return nil } +func (o afterCall) after(c *callInfo) { o(c) } + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.headerMD + }) +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.trailerMD + }) +} + +// Peer returns a CallOption that retrieves peer information for a +// unary RPC. +func Peer(peer *peer.Peer) CallOption { + return afterCall(func(c *callInfo) { + if c.peer != nil { + *peer = *c.peer + } + }) +} + +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failfast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will retry +// the call if it fails due to a transient error. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// Note: failFast is default to true. +func FailFast(failFast bool) CallOption { + return beforeCall(func(c *callInfo) error { + c.failFast = failFast + return nil + }) +} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionMade +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail + // at https://grpc.io/docs/guides/wire.html. + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { + var b []byte + const ( + payloadLen = 1 + sizeLen = 4 + ) + + if msg != nil { + var err error + b, err = c.Marshal(msg) + if err != nil { + return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if outPayload != nil { + outPayload.Payload = msg + // TODO truncate large payload. + outPayload.Data = b + outPayload.Length = len(b) + } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + b = cbuf.Bytes() + } + } + + if uint(len(b)) > math.MaxUint32 { + return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + + bufHeader := make([]byte, payloadLen+sizeLen) + if cp == nil { + bufHeader[0] = byte(compressionNone) + } else { + bufHeader[0] = byte(compressionMade) + } + // Write length of b into buf + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) + if outPayload != nil { + outPayload.WireLength = payloadLen + sizeLen + len(b) + } + return bufHeader, b, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if dc == nil || recvCompress != dc.Type() { + return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return err + } + if inPayload != nil { + inPayload.WireLength = len(d) + } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + if err := c.Unmarshal(d, m); err != nil { + return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if inPayload != nil { + inPayload.RecvTime = time.Now() + inPayload.Payload = m + // TODO truncate large payload. + inPayload.Data = d + inPayload.Length = len(d) + } + return nil +} + +type rpcInfo struct { + failfast bool + bytesSent bool + bytesReceived bool +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { + if ss, ok := rpcInfoFromContext(ctx); ok { + ss.bytesReceived = s.bytesReceived + ss.bytesSent = s.bytesSent + } + return +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, stdctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled, stdctx.Canceled: + return codes.Canceled + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated; use status.FromError and Code method instead. +func Code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated; use status.FromError and Message method instead. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated; use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// This is EXPERIMENTAL and subject to change. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// This is EXPERIMENTAL and subject to change. +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB Balancer + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. + Methods map[string]MethodConfig +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +// SupportPackageIsVersion3 is referenced from generated protocol buffer files. +// The latest support package version is 4. +// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the +// next support package version update. +const SupportPackageIsVersion3 = true + +// SupportPackageIsVersion4 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion4 = true + +// Version is the current grpc version. +const Version = "1.7.1" + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/rpc_util_test.go b/vendor/google.golang.org/grpc/rpc_util_test.go new file mode 100644 index 000000000..23c471e2e --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util_test.go @@ -0,0 +1,237 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "io" + "math" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + perfpb "google.golang.org/grpc/test/codec_perf" + "google.golang.org/grpc/transport" +) + +type fullReader struct { + reader io.Reader +} + +func (f fullReader) Read(p []byte) (int, error) { + return io.ReadFull(f.reader, p) +} + +var _ CallOption = EmptyCallOption{} // ensure EmptyCallOption implements the interface + +func TestSimpleParsing(t *testing.T) { + bigMsg := bytes.Repeat([]byte{'x'}, 1<<24) + for _, test := range []struct { + // input + p []byte + // outputs + err error + b []byte + pt payloadFormat + }{ + {nil, io.EOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone}, + {[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone}, + {[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone}, + // Check that messages with length >= 2^24 are parsed. + {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, + } { + buf := fullReader{bytes.NewReader(test.p)} + parser := &parser{r: buf} + pt, b, err := parser.recvMsg(math.MaxInt32) + if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { + t.Fatalf("parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) + } + } +} + +func TestMultipleParsing(t *testing.T) { + // Set a byte stream consists of 3 messages with their headers. + p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} + b := fullReader{bytes.NewReader(p)} + parser := &parser{r: b} + + wantRecvs := []struct { + pt payloadFormat + data []byte + }{ + {compressionNone, []byte("a")}, + {compressionNone, []byte("bc")}, + {compressionNone, []byte("d")}, + } + for i, want := range wantRecvs { + pt, data, err := parser.recvMsg(math.MaxInt32) + if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) { + t.Fatalf("after %d calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, ", + i, p, pt, data, err, want.pt, want.data) + } + } + + pt, data, err := parser.recvMsg(math.MaxInt32) + if err != io.EOF { + t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant _, _, %v", + len(wantRecvs), p, pt, data, err, io.EOF) + } +} + +func TestEncode(t *testing.T) { + for _, test := range []struct { + // input + msg proto.Message + cp Compressor + // outputs + hdr []byte + data []byte + err error + }{ + {nil, nil, []byte{0, 0, 0, 0, 0}, []byte{}, nil}, + } { + hdr, data, err := encode(protoCodec{}, test.msg, nil, nil, nil) + if err != test.err || !bytes.Equal(hdr, test.hdr) || !bytes.Equal(data, test.data) { + t.Fatalf("encode(_, _, %v, _) = %v, %v, %v\nwant %v, %v, %v", test.cp, hdr, data, err, test.hdr, test.data, test.err) + } + } +} + +func TestCompress(t *testing.T) { + for _, test := range []struct { + // input + data []byte + cp Compressor + dc Decompressor + // outputs + err error + }{ + {make([]byte, 1024), NewGZIPCompressor(), NewGZIPDecompressor(), nil}, + } { + b := new(bytes.Buffer) + if err := test.cp.Do(b, test.data); err != test.err { + t.Fatalf("Compressor.Do(_, %v) = %v, want %v", test.data, err, test.err) + } + if b.Len() >= len(test.data) { + t.Fatalf("The compressor fails to compress data.") + } + if p, err := test.dc.Do(b); err != nil || !bytes.Equal(test.data, p) { + t.Fatalf("Decompressor.Do(%v) = %v, %v, want %v, ", b, p, err, test.data) + } + } +} + +func TestToRPCErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut error + }{ + {transport.StreamError{Code: codes.Unknown, Desc: ""}, status.Error(codes.Unknown, "")}, + {transport.ErrConnClosing, status.Error(codes.Unavailable, transport.ErrConnClosing.Desc)}, + } { + err := toRPCErr(test.errIn) + if _, ok := status.FromError(err); !ok { + t.Fatalf("toRPCErr{%v} returned type %T, want %T", test.errIn, err, status.Error(codes.Unknown, "")) + } + if !reflect.DeepEqual(err, test.errOut) { + t.Fatalf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +// bmEncode benchmarks encoding a Protocol Buffer message containing mSize +// bytes. +func bmEncode(b *testing.B, mSize int) { + msg := &perfpb.Buffer{Body: make([]byte, mSize)} + encodeHdr, encodeData, _ := encode(protoCodec{}, msg, nil, nil, nil) + encodedSz := int64(len(encodeHdr) + len(encodeData)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + encode(protoCodec{}, msg, nil, nil, nil) + } + b.SetBytes(encodedSz) +} + +func BenchmarkEncode1B(b *testing.B) { + bmEncode(b, 1) +} + +func BenchmarkEncode1KiB(b *testing.B) { + bmEncode(b, 1024) +} + +func BenchmarkEncode8KiB(b *testing.B) { + bmEncode(b, 8*1024) +} + +func BenchmarkEncode64KiB(b *testing.B) { + bmEncode(b, 64*1024) +} + +func BenchmarkEncode512KiB(b *testing.B) { + bmEncode(b, 512*1024) +} + +func BenchmarkEncode1MiB(b *testing.B) { + bmEncode(b, 1024*1024) +} + +// bmCompressor benchmarks a compressor of a Protocol Buffer message containing +// mSize bytes. +func bmCompressor(b *testing.B, mSize int, cp Compressor) { + payload := make([]byte, mSize) + cBuf := bytes.NewBuffer(make([]byte, mSize)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + cp.Do(cBuf, payload) + cBuf.Reset() + } +} + +func BenchmarkGZIPCompressor1B(b *testing.B) { + bmCompressor(b, 1, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor1KiB(b *testing.B) { + bmCompressor(b, 1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor8KiB(b *testing.B) { + bmCompressor(b, 8*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor64KiB(b *testing.B) { + bmCompressor(b, 64*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor512KiB(b *testing.B) { + bmCompressor(b, 512*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor1MiB(b *testing.B) { + bmCompressor(b, 1024*1024, NewGZIPCompressor()) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..7c882dbe6 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1177 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" + "google.golang.org/grpc/transport" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + serve bool + drain bool + ctx context.Context + cancel context.CancelFunc + // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished + // and all the transport goes away. + cv *sync.Cond + m map[string]*service // service name -> service info + events trace.EventLog +} + +type options struct { + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int +} + +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption func(*options) + +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + return func(o *options) { + o.keepaliveParams = kp + } +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return func(o *options) { + o.keepalivePolicy = kep + } +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return func(o *options) { + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + } +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return func(o *options) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + } +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return func(o *options) { + o.statsHandler = h + } +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o(&opts) + } + if opts.codec == nil { + // Set the default codec. + opts.codec = protoCodec{} + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + } + s.cv = sync.NewCond(&s.mu) + s.ctx, s.cancel = context.WithCancel(context.Background()) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +var ( + // ErrServerStopped indicates that the operation is now illegal because of + // the server being stopped. + ErrServerStopped = errors.New("grpc: the server has been stopped") +) + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve always returns non-nil error. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + s.lis[lis] = true + s.mu.Unlock() + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[lis] { + lis.Close() + delete(s.lis, lis) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.ctx.Done(): + } + timer.Stop() + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) + } +} + +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandShake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveHTTP2Transport(conn, authInfo) + } +} + +// serveHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil || s.drain { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + s.cv.Broadcast() + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var ( + cbuf *bytes.Buffer + outPayload *stats.OutPayload + ) + if cp != nil { + cbuf = new(bytes.Buffer) + } + if s.opts.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil { + begin := &stats.Begin{ + BeginTime: time.Now(), + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + if s.opts.cp != nil { + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + stream.SetSendCompress(s.opts.cp.Type()) + } + p := &parser{r: stream} + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil { + begin := &stats.Begin{ + BeginTime: time.Now(), + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + ss := &serverStream{ + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + switch err := appErr.(type) { + case transport.StreamError: + appStatus = status.New(err.Code, err.Desc) + default: + appStatus = status.New(convertCode(appErr), appErr.Error()) + } + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, status.New(codes.OK, "")) + +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + errDesc := fmt.Sprintf("unknown method %v", method) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + s.cancel() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return + } + for lis := range s.lis { + lis.Close() + } + s.lis = nil + s.cancel() + if !s.drain { + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + s.drain = true + } + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { + s.mu.Lock() + for c := range s.conns { + c.Close() + delete(s.conns, c) + } + s.mu.Unlock() +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + if err := t.WriteHeader(stream, md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} diff --git a/vendor/google.golang.org/grpc/server_test.go b/vendor/google.golang.org/grpc/server_test.go new file mode 100644 index 000000000..6438b5f8a --- /dev/null +++ b/vendor/google.golang.org/grpc/server_test.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "net" + "reflect" + "strings" + "testing" + + "google.golang.org/grpc/test/leakcheck" +) + +type emptyServiceServer interface{} + +type testServer struct{} + +func TestStopBeforeServe(t *testing.T) { + defer leakcheck.Check(t) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + server := NewServer() + server.Stop() + err = server.Serve(lis) + if err != ErrServerStopped { + t.Fatalf("server.Serve() error = %v, want %v", err, ErrServerStopped) + } + + // server.Serve is responsible for closing the listener, even if the + // server was already stopped. + err = lis.Close() + if got, want := ErrorDesc(err), "use of closed"; !strings.Contains(got, want) { + t.Errorf("Close() error = %q, want %q", got, want) + } +} + +func TestGetServiceInfo(t *testing.T) { + defer leakcheck.Check(t) + testSd := ServiceDesc{ + ServiceName: "grpc.testing.EmptyService", + HandlerType: (*emptyServiceServer)(nil), + Methods: []MethodDesc{ + { + MethodName: "EmptyCall", + Handler: nil, + }, + }, + Streams: []StreamDesc{ + { + StreamName: "EmptyStream", + Handler: nil, + ServerStreams: false, + ClientStreams: true, + }, + }, + Metadata: []int{0, 2, 1, 3}, + } + + server := NewServer() + server.RegisterService(&testSd, &testServer{}) + + info := server.GetServiceInfo() + want := map[string]ServiceInfo{ + "grpc.testing.EmptyService": { + Methods: []MethodInfo{ + { + Name: "EmptyCall", + IsClientStream: false, + IsServerStream: false, + }, + { + Name: "EmptyStream", + IsClientStream: true, + IsServerStream: false, + }}, + Metadata: []int{0, 2, 1, 3}, + }, + } + + if !reflect.DeepEqual(info, want) { + t.Errorf("GetServiceInfo() = %+v, want %+v", info, want) + } +} diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go new file mode 100644 index 000000000..c0c14a24b --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + grpc_testing/test.proto + +It has these top-level messages: + SimpleRequest + SimpleResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SimpleRequest struct { + Id int32 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SimpleRequest) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +type SimpleResponse struct { + Id int32 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SimpleResponse) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func init() { + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TestService service + +type TestServiceClient interface { + // One request followed by one response. + // The server returns the client id as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // Client stream + ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) + // Server stream + ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/ClientStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceClientStreamCallClient{stream} + return x, nil +} + +type TestService_ClientStreamCallClient interface { + Send(*SimpleRequest) error + CloseAndRecv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceClientStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceClientStreamCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallClient) CloseAndRecv() (*SimpleResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/ServerStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceServerStreamCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_ServerStreamCallClient interface { + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceServerStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceServerStreamCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One request followed by one response. + // The server returns the client id as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // Client stream + ClientStreamCall(TestService_ClientStreamCallServer) error + // Server stream + ServerStreamCall(*SimpleRequest, TestService_ServerStreamCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_ClientStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).ClientStreamCall(&testServiceClientStreamCallServer{stream}) +} + +type TestService_ClientStreamCallServer interface { + SendAndClose(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type testServiceClientStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceClientStreamCallServer) SendAndClose(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_ServerStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SimpleRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).ServerStreamCall(m, &testServiceServerStreamCallServer{stream}) +} + +type TestService_ServerStreamCallServer interface { + Send(*SimpleResponse) error + grpc.ServerStream +} + +type testServiceServerStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceServerStreamCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "ClientStreamCall", + Handler: _TestService_ClientStreamCall_Handler, + ClientStreams: true, + }, + { + StreamName: "ServerStreamCall", + Handler: _TestService_ServerStreamCall_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_testing/test.proto", +} + +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0xd7, 0x07, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x3c, 0x20, 0x09, 0x3d, 0xa8, 0x84, 0x92, 0x3c, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, + 0x4e, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, + 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x53, 0x66, 0x8a, 0x92, 0x02, 0x17, 0x1f, 0x4c, 0x41, 0x71, + 0x41, 0x7e, 0x5e, 0x71, 0x2a, 0x54, 0x05, 0x33, 0x4c, 0x85, 0xd1, 0x09, 0x26, 0x2e, 0xee, 0x90, + 0xd4, 0xe2, 0x92, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x37, 0x2e, 0xce, 0xd0, 0xbc, + 0xc4, 0xa2, 0x4a, 0xe7, 0xc4, 0x9c, 0x1c, 0x21, 0x69, 0x3d, 0x64, 0xeb, 0xf4, 0x50, 0xec, 0x92, + 0x92, 0xc1, 0x2e, 0x09, 0xb5, 0xc7, 0x9f, 0x8b, 0xcf, 0xad, 0x34, 0x27, 0xc7, 0xa5, 0xb4, 0x20, + 0x27, 0xb5, 0x82, 0x42, 0xc3, 0x34, 0x18, 0x0d, 0x18, 0x85, 0xfc, 0xb9, 0x04, 0x9c, 0x73, 0x32, + 0x53, 0xf3, 0x4a, 0x82, 0x4b, 0x8a, 0x52, 0x13, 0x73, 0x29, 0x36, 0x12, 0x64, 0x20, 0xc8, 0xd3, + 0xa9, 0x45, 0x54, 0x31, 0xd0, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x45, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x4c, 0x43, 0x27, 0x67, 0xbd, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto b/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto new file mode 100644 index 000000000..b49a0d5a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto @@ -0,0 +1,43 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest { + int32 id = 2; +} + +message SimpleResponse { + int32 id = 3; +} + +// A simple test service. +service TestService { + // One request followed by one response. + // The server returns the client id as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream SimpleRequest) returns (stream SimpleResponse); + + // Client stream + rpc ClientStreamCall(stream SimpleRequest) returns (SimpleResponse); + + // Server stream + rpc ServerStreamCall(SimpleRequest) returns (stream SimpleResponse); +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..05b384c69 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "net" + + "golang.org/x/net/context" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..d5aa2f793 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,294 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // EndTime is the time when the RPC ends. + EndTime time.Time + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/stats/stats_test.go b/vendor/google.golang.org/grpc/stats/stats_test.go new file mode 100644 index 000000000..141324c0d --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats_test.go @@ -0,0 +1,1265 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats_test + +import ( + "fmt" + "io" + "net" + "reflect" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + testpb "google.golang.org/grpc/stats/grpc_testing" +) + +func init() { + grpc.EnableTracing = false +} + +type connCtxKey struct{} +type rpcCtxKey struct{} + +var ( + // For headers: + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } + // For trailers: + testTrailerMetadata = metadata.MD{ + "tkey1": []string{"trailerValue1"}, + "tkey2": []string{"trailerValue2"}, + } + // The id for which the service handler should return error. + errorID int32 = 32202 +) + +type testServer struct{} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + if err := grpc.SendHeader(ctx, md); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SendHeader(_, %v) = %v, want ", md, err) + } + if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) + } + } + + if in.Id == errorID { + return nil, fmt.Errorf("got error id: %v", in.Id) + } + + return &testpb.SimpleResponse{Id: in.Id}, nil +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(testTrailerMetadata) + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + + if in.Id == errorID { + return fmt.Errorf("got error id: %v", in.Id) + } + + if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { + return err + } + } +} + +func (s *testServer) ClientStreamCall(stream testpb.TestService_ClientStreamCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(testTrailerMetadata) + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return stream.SendAndClose(&testpb.SimpleResponse{Id: int32(0)}) + } + if err != nil { + return err + } + + if in.Id == errorID { + return fmt.Errorf("got error id: %v", in.Id) + } + } +} + +func (s *testServer) ServerStreamCall(in *testpb.SimpleRequest, stream testpb.TestService_ServerStreamCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(testTrailerMetadata) + } + + if in.Id == errorID { + return fmt.Errorf("got error id: %v", in.Id) + } + + for i := 0; i < 5; i++ { + if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { + return err + } + } + return nil +} + +// test is an end-to-end test. It should be created with the newTest +// func, modified as needed, and then started with its startServer method. +// It should be cleaned up with the tearDown method. +type test struct { + t *testing.T + compress string + clientStatsHandler stats.Handler + serverStatsHandler stats.Handler + + testServer testpb.TestServiceServer // nil means none + // srv and srvAddr are set once startServer is called. + srv *grpc.Server + srvAddr string + + cc *grpc.ClientConn // nil until requested via clientConn +} + +func (te *test) tearDown() { + if te.cc != nil { + te.cc.Close() + te.cc = nil + } + te.srv.Stop() +} + +type testConfig struct { + compress string +} + +// newTest returns a new test using the provided testing.T and +// environment. It is returned with default values. Tests should +// modify it before calling its startServer and clientConn methods. +func newTest(t *testing.T, tc *testConfig, ch stats.Handler, sh stats.Handler) *test { + te := &test{ + t: t, + compress: tc.compress, + clientStatsHandler: ch, + serverStatsHandler: sh, + } + return te +} + +// startServer starts a gRPC server listening. Callers should defer a +// call to te.tearDown to clean up. +func (te *test) startServer(ts testpb.TestServiceServer) { + te.testServer = ts + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + te.t.Fatalf("Failed to listen: %v", err) + } + var opts []grpc.ServerOption + if te.compress == "gzip" { + opts = append(opts, + grpc.RPCCompressor(grpc.NewGZIPCompressor()), + grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.serverStatsHandler != nil { + opts = append(opts, grpc.StatsHandler(te.serverStatsHandler)) + } + s := grpc.NewServer(opts...) + te.srv = s + if te.testServer != nil { + testpb.RegisterTestServiceServer(s, te.testServer) + } + + go s.Serve(lis) + te.srvAddr = lis.Addr().String() +} + +func (te *test) clientConn() *grpc.ClientConn { + if te.cc != nil { + return te.cc + } + opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()} + if te.compress == "gzip" { + opts = append(opts, + grpc.WithCompressor(grpc.NewGZIPCompressor()), + grpc.WithDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.clientStatsHandler != nil { + opts = append(opts, grpc.WithStatsHandler(te.clientStatsHandler)) + } + + var err error + te.cc, err = grpc.Dial(te.srvAddr, opts...) + if err != nil { + te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) + } + return te.cc +} + +type rpcType int + +const ( + unaryRPC rpcType = iota + clientStreamRPC + serverStreamRPC + fullDuplexStreamRPC +) + +type rpcConfig struct { + count int // Number of requests and responses for streaming RPCs. + success bool // Whether the RPC should succeed or return error. + failfast bool + callType rpcType // Type of RPC. + noLastRecv bool // Whether to call recv for io.EOF. When true, last recv won't be called. Only valid for streaming RPCs. +} + +func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) { + var ( + resp *testpb.SimpleResponse + req *testpb.SimpleRequest + err error + ) + tc := testpb.NewTestServiceClient(te.clientConn()) + if c.success { + req = &testpb.SimpleRequest{Id: errorID + 1} + } else { + req = &testpb.SimpleRequest{Id: errorID} + } + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + + resp, err = tc.UnaryCall(ctx, req, grpc.FailFast(c.failfast)) + return req, resp, err +} + +func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { + var ( + reqs []*testpb.SimpleRequest + resps []*testpb.SimpleResponse + err error + ) + tc := testpb.NewTestServiceClient(te.clientConn()) + stream, err := tc.FullDuplexCall(metadata.NewOutgoingContext(context.Background(), testMetadata), grpc.FailFast(c.failfast)) + if err != nil { + return reqs, resps, err + } + var startID int32 + if !c.success { + startID = errorID + } + for i := 0; i < c.count; i++ { + req := &testpb.SimpleRequest{ + Id: int32(i) + startID, + } + reqs = append(reqs, req) + if err = stream.Send(req); err != nil { + return reqs, resps, err + } + var resp *testpb.SimpleResponse + if resp, err = stream.Recv(); err != nil { + return reqs, resps, err + } + resps = append(resps, resp) + } + if err = stream.CloseSend(); err != nil && err != io.EOF { + return reqs, resps, err + } + if !c.noLastRecv { + if _, err = stream.Recv(); err != io.EOF { + return reqs, resps, err + } + } else { + // In the case of not calling the last recv, sleep to avoid + // returning too fast to miss the remaining stats (InTrailer and End). + time.Sleep(time.Second) + } + + return reqs, resps, nil +} + +func (te *test) doClientStreamCall(c *rpcConfig) ([]*testpb.SimpleRequest, *testpb.SimpleResponse, error) { + var ( + reqs []*testpb.SimpleRequest + resp *testpb.SimpleResponse + err error + ) + tc := testpb.NewTestServiceClient(te.clientConn()) + stream, err := tc.ClientStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), grpc.FailFast(c.failfast)) + if err != nil { + return reqs, resp, err + } + var startID int32 + if !c.success { + startID = errorID + } + for i := 0; i < c.count; i++ { + req := &testpb.SimpleRequest{ + Id: int32(i) + startID, + } + reqs = append(reqs, req) + if err = stream.Send(req); err != nil { + return reqs, resp, err + } + } + resp, err = stream.CloseAndRecv() + return reqs, resp, err +} + +func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { + var ( + req *testpb.SimpleRequest + resps []*testpb.SimpleResponse + err error + ) + + tc := testpb.NewTestServiceClient(te.clientConn()) + + var startID int32 + if !c.success { + startID = errorID + } + req = &testpb.SimpleRequest{Id: startID} + stream, err := tc.ServerStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), req, grpc.FailFast(c.failfast)) + if err != nil { + return req, resps, err + } + for { + var resp *testpb.SimpleResponse + resp, err := stream.Recv() + if err == io.EOF { + return req, resps, nil + } else if err != nil { + return req, resps, err + } + resps = append(resps, resp) + } +} + +type expectedData struct { + method string + serverAddr string + compression string + reqIdx int + requests []*testpb.SimpleRequest + respIdx int + responses []*testpb.SimpleResponse + err error + failfast bool +} + +type gotData struct { + ctx context.Context + client bool + s interface{} // This could be RPCStats or ConnStats. +} + +const ( + begin int = iota + end + inPayload + inHeader + inTrailer + outPayload + outHeader + outTrailer + connbegin + connend +) + +func checkBegin(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.Begin + ) + if st, ok = d.s.(*stats.Begin); !ok { + t.Fatalf("got %T, want Begin", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if st.BeginTime.IsZero() { + t.Fatalf("st.BeginTime = %v, want ", st.BeginTime) + } + if d.client { + if st.FailFast != e.failfast { + t.Fatalf("st.FailFast = %v, want %v", st.FailFast, e.failfast) + } + } +} + +func checkInHeader(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.InHeader + ) + if st, ok = d.s.(*stats.InHeader); !ok { + t.Fatalf("got %T, want InHeader", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if !d.client { + if st.FullMethod != e.method { + t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) + } + if st.LocalAddr.String() != e.serverAddr { + t.Fatalf("st.LocalAddr = %v, want %v", st.LocalAddr, e.serverAddr) + } + if st.Compression != e.compression { + t.Fatalf("st.Compression = %v, want %v", st.Compression, e.compression) + } + + if connInfo, ok := d.ctx.Value(connCtxKey{}).(*stats.ConnTagInfo); ok { + if connInfo.RemoteAddr != st.RemoteAddr { + t.Fatalf("connInfo.RemoteAddr = %v, want %v", connInfo.RemoteAddr, st.RemoteAddr) + } + if connInfo.LocalAddr != st.LocalAddr { + t.Fatalf("connInfo.LocalAddr = %v, want %v", connInfo.LocalAddr, st.LocalAddr) + } + } else { + t.Fatalf("got context %v, want one with connCtxKey", d.ctx) + } + if rpcInfo, ok := d.ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo); ok { + if rpcInfo.FullMethodName != st.FullMethod { + t.Fatalf("rpcInfo.FullMethod = %s, want %v", rpcInfo.FullMethodName, st.FullMethod) + } + } else { + t.Fatalf("got context %v, want one with rpcCtxKey", d.ctx) + } + } +} + +func checkInPayload(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.InPayload + ) + if st, ok = d.s.(*stats.InPayload); !ok { + t.Fatalf("got %T, want InPayload", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if d.client { + b, err := proto.Marshal(e.responses[e.respIdx]) + if err != nil { + t.Fatalf("failed to marshal message: %v", err) + } + if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.responses[e.respIdx]) { + t.Fatalf("st.Payload = %T, want %T", st.Payload, e.responses[e.respIdx]) + } + e.respIdx++ + if string(st.Data) != string(b) { + t.Fatalf("st.Data = %v, want %v", st.Data, b) + } + if st.Length != len(b) { + t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) + } + } else { + b, err := proto.Marshal(e.requests[e.reqIdx]) + if err != nil { + t.Fatalf("failed to marshal message: %v", err) + } + if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.requests[e.reqIdx]) { + t.Fatalf("st.Payload = %T, want %T", st.Payload, e.requests[e.reqIdx]) + } + e.reqIdx++ + if string(st.Data) != string(b) { + t.Fatalf("st.Data = %v, want %v", st.Data, b) + } + if st.Length != len(b) { + t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) + } + } + // TODO check WireLength and ReceivedTime. + if st.RecvTime.IsZero() { + t.Fatalf("st.ReceivedTime = %v, want ", st.RecvTime) + } +} + +func checkInTrailer(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + ) + if _, ok = d.s.(*stats.InTrailer); !ok { + t.Fatalf("got %T, want InTrailer", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } +} + +func checkOutHeader(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.OutHeader + ) + if st, ok = d.s.(*stats.OutHeader); !ok { + t.Fatalf("got %T, want OutHeader", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if d.client { + if st.FullMethod != e.method { + t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) + } + if st.RemoteAddr.String() != e.serverAddr { + t.Fatalf("st.RemoteAddr = %v, want %v", st.RemoteAddr, e.serverAddr) + } + if st.Compression != e.compression { + t.Fatalf("st.Compression = %v, want %v", st.Compression, e.compression) + } + + if rpcInfo, ok := d.ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo); ok { + if rpcInfo.FullMethodName != st.FullMethod { + t.Fatalf("rpcInfo.FullMethod = %s, want %v", rpcInfo.FullMethodName, st.FullMethod) + } + } else { + t.Fatalf("got context %v, want one with rpcCtxKey", d.ctx) + } + } +} + +func checkOutPayload(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.OutPayload + ) + if st, ok = d.s.(*stats.OutPayload); !ok { + t.Fatalf("got %T, want OutPayload", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if d.client { + b, err := proto.Marshal(e.requests[e.reqIdx]) + if err != nil { + t.Fatalf("failed to marshal message: %v", err) + } + if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.requests[e.reqIdx]) { + t.Fatalf("st.Payload = %T, want %T", st.Payload, e.requests[e.reqIdx]) + } + e.reqIdx++ + if string(st.Data) != string(b) { + t.Fatalf("st.Data = %v, want %v", st.Data, b) + } + if st.Length != len(b) { + t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) + } + } else { + b, err := proto.Marshal(e.responses[e.respIdx]) + if err != nil { + t.Fatalf("failed to marshal message: %v", err) + } + if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.responses[e.respIdx]) { + t.Fatalf("st.Payload = %T, want %T", st.Payload, e.responses[e.respIdx]) + } + e.respIdx++ + if string(st.Data) != string(b) { + t.Fatalf("st.Data = %v, want %v", st.Data, b) + } + if st.Length != len(b) { + t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) + } + } + // TODO check WireLength and ReceivedTime. + if st.SentTime.IsZero() { + t.Fatalf("st.SentTime = %v, want ", st.SentTime) + } +} + +func checkOutTrailer(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.OutTrailer + ) + if st, ok = d.s.(*stats.OutTrailer); !ok { + t.Fatalf("got %T, want OutTrailer", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if st.Client { + t.Fatalf("st IsClient = true, want false") + } +} + +func checkEnd(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.End + ) + if st, ok = d.s.(*stats.End); !ok { + t.Fatalf("got %T, want End", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + if st.EndTime.IsZero() { + t.Fatalf("st.EndTime = %v, want ", st.EndTime) + } + if grpc.Code(st.Error) != grpc.Code(e.err) || grpc.ErrorDesc(st.Error) != grpc.ErrorDesc(e.err) { + t.Fatalf("st.Error = %v, want %v", st.Error, e.err) + } +} + +func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.ConnBegin + ) + if st, ok = d.s.(*stats.ConnBegin); !ok { + t.Fatalf("got %T, want ConnBegin", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + st.IsClient() // TODO remove this. +} + +func checkConnEnd(t *testing.T, d *gotData, e *expectedData) { + var ( + ok bool + st *stats.ConnEnd + ) + if st, ok = d.s.(*stats.ConnEnd); !ok { + t.Fatalf("got %T, want ConnEnd", d.s) + } + if d.ctx == nil { + t.Fatalf("d.ctx = nil, want ") + } + st.IsClient() // TODO remove this. +} + +type statshandler struct { + mu sync.Mutex + gotRPC []*gotData + gotConn []*gotData +} + +func (h *statshandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + return context.WithValue(ctx, connCtxKey{}, info) +} + +func (h *statshandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + return context.WithValue(ctx, rpcCtxKey{}, info) +} + +func (h *statshandler) HandleConn(ctx context.Context, s stats.ConnStats) { + h.mu.Lock() + defer h.mu.Unlock() + h.gotConn = append(h.gotConn, &gotData{ctx, s.IsClient(), s}) +} + +func (h *statshandler) HandleRPC(ctx context.Context, s stats.RPCStats) { + h.mu.Lock() + defer h.mu.Unlock() + h.gotRPC = append(h.gotRPC, &gotData{ctx, s.IsClient(), s}) +} + +func checkConnStats(t *testing.T, got []*gotData) { + if len(got) <= 0 || len(got)%2 != 0 { + for i, g := range got { + t.Errorf(" - %v, %T = %+v, ctx: %v", i, g.s, g.s, g.ctx) + } + t.Fatalf("got %v stats, want even positive number", len(got)) + } + // The first conn stats must be a ConnBegin. + checkConnBegin(t, got[0], nil) + // The last conn stats must be a ConnEnd. + checkConnEnd(t, got[len(got)-1], nil) +} + +func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { + if len(got) != len(checkFuncs) { + for i, g := range got { + t.Errorf(" - %v, %T", i, g.s) + } + t.Fatalf("got %v stats, want %v stats", len(got), len(checkFuncs)) + } + + var rpcctx context.Context + for i := 0; i < len(got); i++ { + if _, ok := got[i].s.(stats.RPCStats); ok { + if rpcctx != nil && got[i].ctx != rpcctx { + t.Fatalf("got different contexts with stats %T", got[i].s) + } + rpcctx = got[i].ctx + } + } + + for i, f := range checkFuncs { + f(t, got[i], expect) + } +} + +func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { + h := &statshandler{} + te := newTest(t, tc, nil, h) + te.startServer(&testServer{}) + defer te.tearDown() + + var ( + reqs []*testpb.SimpleRequest + resps []*testpb.SimpleResponse + err error + method string + + req *testpb.SimpleRequest + resp *testpb.SimpleResponse + e error + ) + + switch cc.callType { + case unaryRPC: + method = "/grpc.testing.TestService/UnaryCall" + req, resp, e = te.doUnaryCall(cc) + reqs = []*testpb.SimpleRequest{req} + resps = []*testpb.SimpleResponse{resp} + err = e + case clientStreamRPC: + method = "/grpc.testing.TestService/ClientStreamCall" + reqs, resp, e = te.doClientStreamCall(cc) + resps = []*testpb.SimpleResponse{resp} + err = e + case serverStreamRPC: + method = "/grpc.testing.TestService/ServerStreamCall" + req, resps, e = te.doServerStreamCall(cc) + reqs = []*testpb.SimpleRequest{req} + err = e + case fullDuplexStreamRPC: + method = "/grpc.testing.TestService/FullDuplexCall" + reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + } + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + for { + h.mu.Lock() + if len(h.gotRPC) >= len(checkFuncs) { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + expect := &expectedData{ + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + err: err, + } + + h.mu.Lock() + checkConnStats(t, h.gotConn) + h.mu.Unlock() + checkServerStats(t, h.gotRPC, expect, checkFuncs) +} + +func TestServerStatsUnaryRPC(t *testing.T) { + testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + checkOutPayload, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsUnaryRPCError(t *testing.T) { + testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsClientStreamRPC(t *testing.T) { + count := 5 + checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + } + ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInPayload, + } + for i := 0; i < count; i++ { + checkFuncs = append(checkFuncs, ioPayFuncs...) + } + checkFuncs = append(checkFuncs, + checkOutPayload, + checkOutTrailer, + checkEnd, + ) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: clientStreamRPC}, checkFuncs) +} + +func TestServerStatsClientStreamRPCError(t *testing.T) { + count := 1 + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: clientStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + checkInPayload, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsServerStreamRPC(t *testing.T) { + count := 5 + checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + } + ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkOutPayload, + } + for i := 0; i < count; i++ { + checkFuncs = append(checkFuncs, ioPayFuncs...) + } + checkFuncs = append(checkFuncs, + checkOutTrailer, + checkEnd, + ) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: serverStreamRPC}, checkFuncs) +} + +func TestServerStatsServerStreamRPCError(t *testing.T) { + count := 5 + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: serverStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsFullDuplexRPC(t *testing.T) { + count := 5 + checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + } + ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInPayload, + checkOutPayload, + } + for i := 0; i < count; i++ { + checkFuncs = append(checkFuncs, ioPayFuncs...) + } + checkFuncs = append(checkFuncs, + checkOutTrailer, + checkEnd, + ) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}, checkFuncs) +} + +func TestServerStatsFullDuplexRPCError(t *testing.T) { + count := 5 + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + checkInPayload, + checkOutTrailer, + checkEnd, + }) +} + +type checkFuncWithCount struct { + f func(t *testing.T, d *gotData, e *expectedData) + c int // expected count +} + +func checkClientStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs map[int]*checkFuncWithCount) { + var expectLen int + for _, v := range checkFuncs { + expectLen += v.c + } + if len(got) != expectLen { + for i, g := range got { + t.Errorf(" - %v, %T", i, g.s) + } + t.Fatalf("got %v stats, want %v stats", len(got), expectLen) + } + + var tagInfoInCtx *stats.RPCTagInfo + for i := 0; i < len(got); i++ { + if _, ok := got[i].s.(stats.RPCStats); ok { + tagInfoInCtxNew, _ := got[i].ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo) + if tagInfoInCtx != nil && tagInfoInCtx != tagInfoInCtxNew { + t.Fatalf("got context containing different tagInfo with stats %T", got[i].s) + } + tagInfoInCtx = tagInfoInCtxNew + } + } + + for _, s := range got { + switch s.s.(type) { + case *stats.Begin: + if checkFuncs[begin].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[begin].f(t, s, expect) + checkFuncs[begin].c-- + case *stats.OutHeader: + if checkFuncs[outHeader].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[outHeader].f(t, s, expect) + checkFuncs[outHeader].c-- + case *stats.OutPayload: + if checkFuncs[outPayload].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[outPayload].f(t, s, expect) + checkFuncs[outPayload].c-- + case *stats.InHeader: + if checkFuncs[inHeader].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[inHeader].f(t, s, expect) + checkFuncs[inHeader].c-- + case *stats.InPayload: + if checkFuncs[inPayload].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[inPayload].f(t, s, expect) + checkFuncs[inPayload].c-- + case *stats.InTrailer: + if checkFuncs[inTrailer].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[inTrailer].f(t, s, expect) + checkFuncs[inTrailer].c-- + case *stats.End: + if checkFuncs[end].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[end].f(t, s, expect) + checkFuncs[end].c-- + case *stats.ConnBegin: + if checkFuncs[connbegin].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[connbegin].f(t, s, expect) + checkFuncs[connbegin].c-- + case *stats.ConnEnd: + if checkFuncs[connend].c <= 0 { + t.Fatalf("unexpected stats: %T", s.s) + } + checkFuncs[connend].f(t, s, expect) + checkFuncs[connend].c-- + default: + t.Fatalf("unexpected stats: %T", s.s) + } + } +} + +func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map[int]*checkFuncWithCount) { + h := &statshandler{} + te := newTest(t, tc, h, nil) + te.startServer(&testServer{}) + defer te.tearDown() + + var ( + reqs []*testpb.SimpleRequest + resps []*testpb.SimpleResponse + method string + err error + + req *testpb.SimpleRequest + resp *testpb.SimpleResponse + e error + ) + switch cc.callType { + case unaryRPC: + method = "/grpc.testing.TestService/UnaryCall" + req, resp, e = te.doUnaryCall(cc) + reqs = []*testpb.SimpleRequest{req} + resps = []*testpb.SimpleResponse{resp} + err = e + case clientStreamRPC: + method = "/grpc.testing.TestService/ClientStreamCall" + reqs, resp, e = te.doClientStreamCall(cc) + resps = []*testpb.SimpleResponse{resp} + err = e + case serverStreamRPC: + method = "/grpc.testing.TestService/ServerStreamCall" + req, resps, e = te.doServerStreamCall(cc) + reqs = []*testpb.SimpleRequest{req} + err = e + case fullDuplexStreamRPC: + method = "/grpc.testing.TestService/FullDuplexCall" + reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + } + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + lenRPCStats := 0 + for _, v := range checkFuncs { + lenRPCStats += v.c + } + for { + h.mu.Lock() + if len(h.gotRPC) >= lenRPCStats { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + expect := &expectedData{ + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + failfast: cc.failfast, + err: err, + } + + h.mu.Lock() + checkConnStats(t, h.gotConn) + h.mu.Unlock() + checkClientStats(t, h.gotRPC, expect, checkFuncs) +} + +func TestClientStatsUnaryRPC(t *testing.T) { + testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inPayload: {checkInPayload, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsUnaryRPCError(t *testing.T) { + testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsClientStreamRPC(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + inHeader: {checkInHeader, 1}, + outPayload: {checkOutPayload, count}, + inTrailer: {checkInTrailer, 1}, + inPayload: {checkInPayload, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsClientStreamRPCError(t *testing.T) { + count := 1 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + inHeader: {checkInHeader, 1}, + outPayload: {checkOutPayload, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsServerStreamRPC(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inPayload: {checkInPayload, count}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsServerStreamRPCError(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsFullDuplexRPC(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, count}, + inHeader: {checkInHeader, 1}, + inPayload: {checkInPayload, count}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsFullDuplexRPCError(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +// If the user doesn't call the last recv() on clientStream. +func TestClientStatsFullDuplexRPCNotCallingLastRecv(t *testing.T) { + count := 1 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC, noLastRecv: true}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, count}, + inHeader: {checkInHeader, 1}, + inPayload: {checkInPayload, count}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestTags(t *testing.T) { + b := []byte{5, 2, 4, 3, 1} + ctx := stats.SetTags(context.Background(), b) + if tg := stats.OutgoingTags(ctx); !reflect.DeepEqual(tg, b) { + t.Errorf("OutgoingTags(%v) = %v; want %v", ctx, tg, b) + } + if tg := stats.Tags(ctx); tg != nil { + t.Errorf("Tags(%v) = %v; want nil", ctx, tg) + } + + ctx = stats.SetIncomingTags(context.Background(), b) + if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, b) { + t.Errorf("Tags(%v) = %v; want %v", ctx, tg, b) + } + if tg := stats.OutgoingTags(ctx); tg != nil { + t.Errorf("OutgoingTags(%v) = %v; want nil", ctx, tg) + } +} + +func TestTrace(t *testing.T) { + b := []byte{5, 2, 4, 3, 1} + ctx := stats.SetTrace(context.Background(), b) + if tr := stats.OutgoingTrace(ctx); !reflect.DeepEqual(tr, b) { + t.Errorf("OutgoingTrace(%v) = %v; want %v", ctx, tr, b) + } + if tr := stats.Trace(ctx); tr != nil { + t.Errorf("Trace(%v) = %v; want nil", ctx, tr) + } + + ctx = stats.SetIncomingTrace(context.Background(), b) + if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, b) { + t.Errorf("Trace(%v) = %v; want %v", ctx, tr, b) + } + if tr := stats.OutgoingTrace(ctx); tr != nil { + t.Errorf("OutgoingTrace(%v) = %v; want nil", ctx, tr) + } +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..871dc4b31 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) status() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package, otherwise it returns nil, false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if s, ok := err.(*statusError); ok { + return s.status(), true + } + return nil, false +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} diff --git a/vendor/google.golang.org/grpc/status/status_test.go b/vendor/google.golang.org/grpc/status/status_test.go new file mode 100644 index 000000000..69be8c9f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status_test.go @@ -0,0 +1,261 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + apb "github.com/golang/protobuf/ptypes/any" + dpb "github.com/golang/protobuf/ptypes/duration" + cpb "google.golang.org/genproto/googleapis/rpc/code" + epb "google.golang.org/genproto/googleapis/rpc/errdetails" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +func TestErrorsWithSameParameters(t *testing.T) { + const description = "some description" + e1 := Errorf(codes.AlreadyExists, description) + e2 := Errorf(codes.AlreadyExists, description) + if e1 == e2 || !reflect.DeepEqual(e1, e2) { + t.Fatalf("Errors should be equivalent but unique - e1: %v, %v e2: %p, %v", e1.(*statusError), e1, e2.(*statusError), e2) + } +} + +func TestFromToProto(t *testing.T) { + s := &spb.Status{ + Code: int32(codes.Internal), + Message: "test test test", + Details: []*apb.Any{{TypeUrl: "foo", Value: []byte{3, 2, 1}}}, + } + + err := FromProto(s) + if got := err.Proto(); !proto.Equal(s, got) { + t.Fatalf("Expected errors to be identical - s: %v got: %v", s, got) + } +} + +func TestFromNilProto(t *testing.T) { + tests := []*Status{nil, FromProto(nil)} + for _, s := range tests { + if c := s.Code(); c != codes.OK { + t.Errorf("s: %v - Expected s.Code() = OK; got %v", s, c) + } + if m := s.Message(); m != "" { + t.Errorf("s: %v - Expected s.Message() = \"\"; got %q", s, m) + } + if p := s.Proto(); p != nil { + t.Errorf("s: %v - Expected s.Proto() = nil; got %q", s, p) + } + if e := s.Err(); e != nil { + t.Errorf("s: %v - Expected s.Err() = nil; got %v", s, e) + } + } +} + +func TestError(t *testing.T) { + err := Error(codes.Internal, "test description") + if got, want := err.Error(), "rpc error: code = Internal desc = test description"; got != want { + t.Fatalf("err.Error() = %q; want %q", got, want) + } + s, _ := FromError(err) + if got, want := s.Code(), codes.Internal; got != want { + t.Fatalf("err.Code() = %s; want %s", got, want) + } + if got, want := s.Message(), "test description"; got != want { + t.Fatalf("err.Message() = %s; want %s", got, want) + } +} + +func TestErrorOK(t *testing.T) { + err := Error(codes.OK, "foo") + if err != nil { + t.Fatalf("Error(codes.OK, _) = %p; want nil", err.(*statusError)) + } +} + +func TestErrorProtoOK(t *testing.T) { + s := &spb.Status{Code: int32(codes.OK)} + if got := ErrorProto(s); got != nil { + t.Fatalf("ErrorProto(%v) = %v; want nil", s, got) + } +} + +func TestFromError(t *testing.T) { + code, message := codes.Internal, "test description" + err := Error(code, message) + s, ok := FromError(err) + if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) + } +} + +func TestFromErrorOK(t *testing.T) { + code, message := codes.OK, "" + s, ok := FromError(nil) + if !ok || s.Code() != code || s.Message() != message || s.Err() != nil { + t.Fatalf("FromError(nil) = %v, %v; want , true", s, ok, code, message) + } +} + +func TestStatus_ErrorDetails(t *testing.T) { + tests := []struct { + code codes.Code + details []proto.Message + }{ + { + code: codes.NotFound, + details: nil, + }, + { + code: codes.NotFound, + details: []proto.Message{ + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + { + code: codes.Internal, + details: []proto.Message{ + &epb.DebugInfo{ + StackEntries: []string{ + "first stack", + "second stack", + }, + }, + }, + }, + { + code: codes.Unavailable, + details: []proto.Message{ + &epb.RetryInfo{ + RetryDelay: &dpb.Duration{Seconds: 60}, + }, + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + } + + for _, tc := range tests { + s, err := New(tc.code, "").WithDetails(tc.details...) + if err != nil { + t.Fatalf("(%v).WithDetails(%+v) failed: %v", str(s), tc.details, err) + } + details := s.Details() + for i := range details { + if !proto.Equal(details[i].(proto.Message), tc.details[i]) { + t.Fatalf("(%v).Details()[%d] = %+v, want %+v", str(s), i, details[i], tc.details[i]) + } + } + } +} + +func TestStatus_WithDetails_Fail(t *testing.T) { + tests := []*Status{ + nil, + FromProto(nil), + New(codes.OK, ""), + } + for _, s := range tests { + if s, err := s.WithDetails(); err == nil || s != nil { + t.Fatalf("(%v).WithDetails(%+v) = %v, %v; want nil, non-nil", str(s), []proto.Message{}, s, err) + } + } +} + +func TestStatus_ErrorDetails_Fail(t *testing.T) { + tests := []struct { + s *Status + i []interface{} + }{ + { + nil, + nil, + }, + { + FromProto(nil), + nil, + }, + { + New(codes.OK, ""), + []interface{}{}, + }, + { + FromProto(&spb.Status{ + Code: int32(cpb.Code_CANCELLED), + Details: []*apb.Any{ + { + TypeUrl: "", + Value: []byte{}, + }, + mustMarshalAny(&epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }), + }, + }), + []interface{}{ + errors.New(`message type url "" is invalid`), + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + } + for _, tc := range tests { + got := tc.s.Details() + if !reflect.DeepEqual(got, tc.i) { + t.Errorf("(%v).Details() = %+v, want %+v", str(tc.s), got, tc.i) + } + } +} + +func str(s *Status) string { + if s == nil { + return "nil" + } + if s.s == nil { + return "" + } + return fmt.Sprintf("", codes.Code(s.s.GetCode()), s.s.GetMessage(), s.s.GetDetails()) +} + +// mustMarshalAny converts a protobuf message to an any. +func mustMarshalAny(msg proto.Message) *apb.Any { + any, err := ptypes.MarshalAny(msg) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", msg, err)) + } + return any +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..75eab40b1 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,657 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satisfy. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // Always call Stream.RecvMsg() to get the final status if you care about the status of + // the RPC. + Stream +} + +// NewClientStream creates a new Stream for the client side. This is called +// by generated code. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + var ( + t transport.ClientTransport + s *transport.Stream + done func(balancer.DoneInfo) + cancel context.CancelFunc + ) + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer func() { + if err != nil { + cancel() + } + }() + } + + opts = append(cc.dopts.callOptions, opts...) + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + begin := &stats.Begin{ + Client: true, + BeginTime: time.Now(), + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) + } + }() + } + for { + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := status.FromError(err); ok { + return nil, err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return nil, Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return nil, Errorf(codes.Internal, "%v", err) + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return nil, toRPCErr(err) + } + break + } + // Set callInfo.peer object from stream's context. + if peer, ok := peer.FromContext(s.Context()); ok { + c.peer = peer + } + cs := &clientStream{ + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + cancel: cancel, + + done: done, + t: t, + s: s, + p: &parser{r: s}, + + tracing: EnableTracing, + trInfo: trInfo, + + statsCtx: ctx, + statsHandler: cc.dopts.copts.StatsHandler, + } + // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination + // when there is no pending I/O operations on this stream. + go func() { + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + cs.closeTransportStream(ErrClientConnClosing) + case <-s.Done(): + // TODO: The trace of the RPC is terminated here when there is no pending + // I/O, which is probably not the optimal solution. + cs.finish(s.Status().Err()) + cs.closeTransportStream(nil) + case <-s.GoAway(): + cs.finish(errConnDrain) + cs.closeTransportStream(errConnDrain) + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } + }() + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + opts []CallOption + c *callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + dc Decompressor + cancel context.CancelFunc + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex + done func(balancer.DoneInfo) + closed bool + finished bool + // trInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + trInfo traceInfo + + // statsCtx keeps the user context for stats handling. + // All stats collection should use the statsCtx (instead of the stream context) + // so that all the generated stats for a particular RPC can be associated in the processing phase. + statsCtx context.Context + statsHandler stats.Handler +} + +func (cs *clientStream) Context() context.Context { + return cs.s.Context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.s.Header() + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + return cs.s.Trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } + // TODO Investigate how to signal the stats handling party. + // generate error stats if err != nil && err != io.EOF? + defer func() { + if err != nil { + cs.finish(err) + } + if err == nil { + return + } + if err == io.EOF { + // Specialize the process for server streaming. SendMsg is only called + // once when creating the stream object. io.EOF needs to be skipped when + // the rpc is early finished (before the stream object is created.). + // TODO: It is probably better to move this into the generated code. + if !cs.desc.ClientStreams && cs.desc.ServerStreams { + err = nil + } + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + }() + var outPayload *stats.OutPayload + if cs.statsHandler != nil { + outPayload = &stats.OutPayload{ + Client: true, + } + } + hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) + if err != nil { + return err + } + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + } + err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) + } + return err +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + var inPayload *stats.InPayload + if cs.statsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() + if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } + if inPayload != nil { + cs.statsHandler.HandleRPC(cs.statsCtx, inPayload) + } + if !cs.desc.ClientStreams || cs.desc.ServerStreams { + return + } + // Special handling for client streaming rpc. + // This recv expects EOF or errors, so we don't collect inPayload. + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) + cs.closeTransportStream(err) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + if se := cs.s.Status().Err(); se != nil { + return se + } + cs.finish(err) + return nil + } + return toRPCErr(err) + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + if err == io.EOF { + if statusErr := cs.s.Status().Err(); statusErr != nil { + return statusErr + } + // Returns io.EOF to indicate the end of the stream. + return + } + return toRPCErr(err) +} + +func (cs *clientStream) CloseSend() (err error) { + err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() + if err == nil || err == io.EOF { + return nil + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + return +} + +func (cs *clientStream) closeTransportStream(err error) { + cs.mu.Lock() + if cs.closed { + cs.mu.Unlock() + return + } + cs.closed = true + cs.mu.Unlock() + cs.t.CloseStream(cs.s, err) +} + +func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.finished { + return + } + cs.finished = true + defer func() { + if cs.cancel != nil { + cs.cancel() + } + }() + for _, o := range cs.opts { + o.after(cs.c) + } + if cs.done != nil { + updateRPCInfoInContext(cs.s.Context(), rpcInfo{ + bytesSent: cs.s.BytesSent(), + bytesReceived: cs.s.BytesReceived(), + }) + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil + } + if cs.statsHandler != nil { + end := &stats.End{ + Client: true, + EndTime: time.Now(), + } + if err != io.EOF { + // end.Error is nil if the RPC finished successfully. + end.Error = toRPCErr(err) + } + cs.statsHandler.HandleRPC(cs.statsCtx, end) + } + if !cs.tracing { + return + } + if cs.trInfo.tr != nil { + if err == nil || err == io.EOF { + cs.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.trInfo.tr.SetError() + } + cs.trInfo.tr.Finish() + cs.trInfo.tr = nil + } +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.s.Context() +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) + return +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + }() + var outPayload *stats.OutPayload + if ss.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) + if err != nil { + return err + } + if len(data) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if outPayload != nil { + outPayload.SentTime = time.Now() + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + }() + var inPayload *stats.InPayload + if ss.statsHandler != nil { + inPayload = &stats.InPayload{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if inPayload != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/stress/client/main.go b/vendor/google.golang.org/grpc/stress/client/main.go new file mode 100644 index 000000000..635f1ad38 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/client/main.go @@ -0,0 +1,336 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc -I ../grpc_testing --go_out=plugins=grpc:../grpc_testing ../grpc_testing/metrics.proto + +// client starts an interop client to do stress test and a metrics server to report qps. +package main + +import ( + "flag" + "fmt" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" + metricspb "google.golang.org/grpc/stress/grpc_testing" + "google.golang.org/grpc/testdata" +) + +var ( + serverAddresses = flag.String("server_addresses", "localhost:8080", "a list of server addresses") + testCases = flag.String("test_cases", "", "a list of test cases along with the relative weights") + testDurationSecs = flag.Int("test_duration_secs", -1, "test duration in seconds") + numChannelsPerServer = flag.Int("num_channels_per_server", 1, "Number of channels (i.e connections) to each server") + numStubsPerChannel = flag.Int("num_stubs_per_channel", 1, "Number of client stubs per each connection to server") + metricsPort = flag.Int("metrics_port", 8081, "The port at which the stress client exposes QPS metrics") + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") + testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") + tlsServerName = flag.String("server_host_override", "foo.test.google.fr", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") +) + +// testCaseWithWeight contains the test case type and its weight. +type testCaseWithWeight struct { + name string + weight int +} + +// parseTestCases converts test case string to a list of struct testCaseWithWeight. +func parseTestCases(testCaseString string) []testCaseWithWeight { + testCaseStrings := strings.Split(testCaseString, ",") + testCases := make([]testCaseWithWeight, len(testCaseStrings)) + for i, str := range testCaseStrings { + testCase := strings.Split(str, ":") + if len(testCase) != 2 { + panic(fmt.Sprintf("invalid test case with weight: %s", str)) + } + // Check if test case is supported. + switch testCase[0] { + case + "empty_unary", + "large_unary", + "client_streaming", + "server_streaming", + "ping_pong", + "empty_stream", + "timeout_on_sleeping_server", + "cancel_after_begin", + "cancel_after_first_response", + "status_code_and_message", + "custom_metadata": + default: + panic(fmt.Sprintf("unknown test type: %s", testCase[0])) + } + testCases[i].name = testCase[0] + w, err := strconv.Atoi(testCase[1]) + if err != nil { + panic(fmt.Sprintf("%v", err)) + } + testCases[i].weight = w + } + return testCases +} + +// weightedRandomTestSelector defines a weighted random selector for test case types. +type weightedRandomTestSelector struct { + tests []testCaseWithWeight + totalWeight int +} + +// newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight. +func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector { + var totalWeight int + for _, t := range tests { + totalWeight += t.weight + } + rand.Seed(time.Now().UnixNano()) + return &weightedRandomTestSelector{tests, totalWeight} +} + +func (selector weightedRandomTestSelector) getNextTest() string { + random := rand.Intn(selector.totalWeight) + var weightSofar int + for _, test := range selector.tests { + weightSofar += test.weight + if random < weightSofar { + return test.name + } + } + panic("no test case selected by weightedRandomTestSelector") +} + +// gauge stores the qps of one interop client (one stub). +type gauge struct { + mutex sync.RWMutex + val int64 +} + +func (g *gauge) set(v int64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.val = v +} + +func (g *gauge) get() int64 { + g.mutex.RLock() + defer g.mutex.RUnlock() + return g.val +} + +// server implements metrics server functions. +type server struct { + mutex sync.RWMutex + // gauges is a map from /stress_test/server_/channel_/stub_/qps to its qps gauge. + gauges map[string]*gauge +} + +// newMetricsServer returns a new metrics server. +func newMetricsServer() *server { + return &server{gauges: make(map[string]*gauge)} +} + +// GetAllGauges returns all gauges. +func (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for name, gauge := range s.gauges { + if err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil { + return err + } + } + return nil +} + +// GetGauge returns the gauge for the given name. +func (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + if g, ok := s.gauges[in.Name]; ok { + return &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil + } + return nil, grpc.Errorf(codes.InvalidArgument, "gauge with name %s not found", in.Name) +} + +// createGauge creates a gauge using the given name in metrics server. +func (s *server) createGauge(name string) *gauge { + s.mutex.Lock() + defer s.mutex.Unlock() + + if _, ok := s.gauges[name]; ok { + // gauge already exists. + panic(fmt.Sprintf("gauge %s already exists", name)) + } + var g gauge + s.gauges[name] = &g + return &g +} + +func startServer(server *server, port int) { + lis, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + + s := grpc.NewServer() + metricspb.RegisterMetricsServiceServer(s, server) + s.Serve(lis) + +} + +// performRPCs uses weightedRandomTestSelector to select test case and runs the tests. +func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) { + client := testpb.NewTestServiceClient(conn) + var numCalls int64 + startTime := time.Now() + for { + test := selector.getNextTest() + switch test { + case "empty_unary": + interop.DoEmptyUnaryCall(client, grpc.FailFast(false)) + case "large_unary": + interop.DoLargeUnaryCall(client, grpc.FailFast(false)) + case "client_streaming": + interop.DoClientStreaming(client, grpc.FailFast(false)) + case "server_streaming": + interop.DoServerStreaming(client, grpc.FailFast(false)) + case "ping_pong": + interop.DoPingPong(client, grpc.FailFast(false)) + case "empty_stream": + interop.DoEmptyStream(client, grpc.FailFast(false)) + case "timeout_on_sleeping_server": + interop.DoTimeoutOnSleepingServer(client, grpc.FailFast(false)) + case "cancel_after_begin": + interop.DoCancelAfterBegin(client, grpc.FailFast(false)) + case "cancel_after_first_response": + interop.DoCancelAfterFirstResponse(client, grpc.FailFast(false)) + case "status_code_and_message": + interop.DoStatusCodeAndMessage(client, grpc.FailFast(false)) + case "custom_metadata": + interop.DoCustomMetadata(client, grpc.FailFast(false)) + } + numCalls++ + gauge.set(int64(float64(numCalls) / time.Since(startTime).Seconds())) + + select { + case <-stop: + return + default: + } + } +} + +func logParameterInfo(addresses []string, tests []testCaseWithWeight) { + grpclog.Printf("server_addresses: %s", *serverAddresses) + grpclog.Printf("test_cases: %s", *testCases) + grpclog.Printf("test_duration_secs: %d", *testDurationSecs) + grpclog.Printf("num_channels_per_server: %d", *numChannelsPerServer) + grpclog.Printf("num_stubs_per_channel: %d", *numStubsPerChannel) + grpclog.Printf("metrics_port: %d", *metricsPort) + grpclog.Printf("use_tls: %t", *useTLS) + grpclog.Printf("use_test_ca: %t", *testCA) + grpclog.Printf("server_host_override: %s", *tlsServerName) + + grpclog.Println("addresses:") + for i, addr := range addresses { + grpclog.Printf("%d. %s\n", i+1, addr) + } + grpclog.Println("tests:") + for i, test := range tests { + grpclog.Printf("%d. %v\n", i+1, test) + } +} + +func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.ClientConn, error) { + var opts []grpc.DialOption + if useTLS { + var sn string + if tlsServerName != "" { + sn = tlsServerName + } + var creds credentials.TransportCredentials + if testCA { + var err error + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + if err != nil { + grpclog.Fatalf("Failed to create TLS credentials %v", err) + } + } else { + creds = credentials.NewClientTLSFromCert(nil, sn) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + return grpc.Dial(address, opts...) +} + +func main() { + flag.Parse() + addresses := strings.Split(*serverAddresses, ",") + tests := parseTestCases(*testCases) + logParameterInfo(addresses, tests) + testSelector := newWeightedRandomTestSelector(tests) + metricsServer := newMetricsServer() + + var wg sync.WaitGroup + wg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel) + stop := make(chan bool) + + for serverIndex, address := range addresses { + for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ { + conn, err := newConn(address, *useTLS, *testCA, *tlsServerName) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ { + name := fmt.Sprintf("/stress_test/server_%d/channel_%d/stub_%d/qps", serverIndex+1, connIndex+1, clientIndex+1) + go func() { + defer wg.Done() + g := metricsServer.createGauge(name) + performRPCs(g, conn, testSelector, stop) + }() + } + + } + } + go startServer(metricsServer, *metricsPort) + if *testDurationSecs > 0 { + time.Sleep(time.Duration(*testDurationSecs) * time.Second) + close(stop) + } + wg.Wait() + grpclog.Printf(" ===== ALL DONE ===== ") + +} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go new file mode 100644 index 000000000..466668a4d --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go @@ -0,0 +1,374 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + GaugeResponse + GaugeRequest + EmptyMessage +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Response message containing the gauge name and value +type GaugeResponse struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Types that are valid to be assigned to Value: + // *GaugeResponse_LongValue + // *GaugeResponse_DoubleValue + // *GaugeResponse_StringValue + Value isGaugeResponse_Value `protobuf_oneof:"value"` +} + +func (m *GaugeResponse) Reset() { *m = GaugeResponse{} } +func (m *GaugeResponse) String() string { return proto.CompactTextString(m) } +func (*GaugeResponse) ProtoMessage() {} +func (*GaugeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isGaugeResponse_Value interface { + isGaugeResponse_Value() +} + +type GaugeResponse_LongValue struct { + LongValue int64 `protobuf:"varint,2,opt,name=long_value,json=longValue,oneof"` +} +type GaugeResponse_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type GaugeResponse_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,oneof"` +} + +func (*GaugeResponse_LongValue) isGaugeResponse_Value() {} +func (*GaugeResponse_DoubleValue) isGaugeResponse_Value() {} +func (*GaugeResponse_StringValue) isGaugeResponse_Value() {} + +func (m *GaugeResponse) GetValue() isGaugeResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GaugeResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GaugeResponse) GetLongValue() int64 { + if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { + return x.LongValue + } + return 0 +} + +func (m *GaugeResponse) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*GaugeResponse_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *GaugeResponse) GetStringValue() string { + if x, ok := m.GetValue().(*GaugeResponse_StringValue); ok { + return x.StringValue + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GaugeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GaugeResponse_OneofMarshaler, _GaugeResponse_OneofUnmarshaler, _GaugeResponse_OneofSizer, []interface{}{ + (*GaugeResponse_LongValue)(nil), + (*GaugeResponse_DoubleValue)(nil), + (*GaugeResponse_StringValue)(nil), + } +} + +func _GaugeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *GaugeResponse_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case nil: + default: + return fmt.Errorf("GaugeResponse.Value has unexpected type %T", x) + } + return nil +} + +func _GaugeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GaugeResponse) + switch tag { + case 2: // value.long_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &GaugeResponse_LongValue{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &GaugeResponse_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &GaugeResponse_StringValue{x} + return true, err + default: + return false, nil + } +} + +func _GaugeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *GaugeResponse_StringValue: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message containing the gauge name +type GaugeRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GaugeRequest) Reset() { *m = GaugeRequest{} } +func (m *GaugeRequest) String() string { return proto.CompactTextString(m) } +func (*GaugeRequest) ProtoMessage() {} +func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GaugeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type EmptyMessage struct { +} + +func (m *EmptyMessage) Reset() { *m = EmptyMessage{} } +func (m *EmptyMessage) String() string { return proto.CompactTextString(m) } +func (*EmptyMessage) ProtoMessage() {} +func (*EmptyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func init() { + proto.RegisterType((*GaugeResponse)(nil), "grpc.testing.GaugeResponse") + proto.RegisterType((*GaugeRequest)(nil), "grpc.testing.GaugeRequest") + proto.RegisterType((*EmptyMessage)(nil), "grpc.testing.EmptyMessage") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for MetricsService service + +type MetricsServiceClient interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) + // Returns the value of one gauge + GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_MetricsService_serviceDesc.Streams[0], c.cc, "/grpc.testing.MetricsService/GetAllGauges", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceGetAllGaugesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type MetricsService_GetAllGaugesClient interface { + Recv() (*GaugeResponse, error) + grpc.ClientStream +} + +type metricsServiceGetAllGaugesClient struct { + grpc.ClientStream +} + +func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { + m := new(GaugeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { + out := new(GaugeResponse) + err := grpc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for MetricsService service + +type MetricsServiceServer interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(*EmptyMessage, MetricsService_GetAllGaugesServer) error + // Returns the value of one gauge + GetGauge(context.Context, *GaugeRequest) (*GaugeResponse, error) +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_GetAllGauges_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EmptyMessage) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MetricsServiceServer).GetAllGauges(m, &metricsServiceGetAllGaugesServer{stream}) +} + +type MetricsService_GetAllGaugesServer interface { + Send(*GaugeResponse) error + grpc.ServerStream +} + +type metricsServiceGetAllGaugesServer struct { + grpc.ServerStream +} + +func (x *metricsServiceGetAllGaugesServer) Send(m *GaugeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GaugeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).GetGauge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.MetricsService/GetGauge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetGauge", + Handler: _MetricsService_GetGauge_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetAllGauges", + Handler: _MetricsService_GetAllGauges_Handler, + ServerStreams: true, + }, + }, + Metadata: "metrics.proto", +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4f, 0xc3, 0x30, + 0x10, 0xc5, 0x6b, 0x5a, 0xfe, 0xf4, 0x70, 0x3b, 0x78, 0xaa, 0xca, 0x40, 0x14, 0x96, 0x4c, 0x11, + 0x82, 0x4f, 0x00, 0x08, 0xa5, 0x0c, 0x5d, 0x82, 0xc4, 0x8a, 0xd2, 0x70, 0xb2, 0x22, 0x39, 0x71, + 0xf0, 0x5d, 0x2a, 0xf1, 0x49, 0x58, 0xf9, 0xa8, 0xc8, 0x4e, 0x55, 0xa5, 0x08, 0x75, 0xb3, 0x7e, + 0xf7, 0xfc, 0xfc, 0x9e, 0x0f, 0x66, 0x35, 0xb2, 0xab, 0x4a, 0x4a, 0x5b, 0x67, 0xd9, 0x2a, 0xa9, + 0x5d, 0x5b, 0xa6, 0x8c, 0xc4, 0x55, 0xa3, 0xe3, 0x6f, 0x01, 0xb3, 0xac, 0xe8, 0x34, 0xe6, 0x48, + 0xad, 0x6d, 0x08, 0x95, 0x82, 0x49, 0x53, 0xd4, 0xb8, 0x10, 0x91, 0x48, 0xa6, 0x79, 0x38, 0xab, + 0x6b, 0x00, 0x63, 0x1b, 0xfd, 0xbe, 0x2d, 0x4c, 0x87, 0x8b, 0x93, 0x48, 0x24, 0xe3, 0xd5, 0x28, + 0x9f, 0x7a, 0xf6, 0xe6, 0x91, 0xba, 0x01, 0xf9, 0x61, 0xbb, 0x8d, 0xc1, 0x9d, 0x64, 0x1c, 0x89, + 0x44, 0xac, 0x46, 0xf9, 0x65, 0x4f, 0xf7, 0x22, 0x62, 0x57, 0xed, 0x7d, 0x26, 0xfe, 0x05, 0x2f, + 0xea, 0x69, 0x10, 0x3d, 0x9e, 0xc3, 0x69, 0x98, 0xc6, 0x31, 0xc8, 0x5d, 0xb0, 0xcf, 0x0e, 0x89, + 0xff, 0xcb, 0x15, 0xcf, 0x41, 0x3e, 0xd7, 0x2d, 0x7f, 0xad, 0x91, 0xa8, 0xd0, 0x78, 0xf7, 0x23, + 0x60, 0xbe, 0xee, 0xdb, 0xbe, 0xa2, 0xdb, 0x56, 0x25, 0xaa, 0x17, 0x90, 0x19, 0xf2, 0x83, 0x31, + 0xc1, 0x8c, 0xd4, 0x32, 0x1d, 0xf6, 0x4f, 0x87, 0xd7, 0x97, 0x57, 0x87, 0xb3, 0x83, 0x7f, 0xb9, + 0x15, 0xea, 0x09, 0x2e, 0x32, 0xe4, 0x40, 0xff, 0xda, 0x0c, 0x93, 0x1e, 0xb5, 0xd9, 0x9c, 0x85, + 0x2d, 0xdc, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto new file mode 100644 index 000000000..695040064 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto @@ -0,0 +1,49 @@ +// Copyright 2015-2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Contains the definitions for a metrics service and the type of metrics +// exposed by the service. +// +// Currently, 'Gauge' (i.e a metric that represents the measured value of +// something at an instant of time) is the only metric type supported by the +// service. +syntax = "proto3"; + +package grpc.testing; + +// Response message containing the gauge name and value +message GaugeResponse { + string name = 1; + oneof value { + int64 long_value = 2; + double double_value = 3; + string string_value = 4; + } +} + +// Request message containing the gauge name +message GaugeRequest { + string name = 1; +} + +message EmptyMessage {} + +service MetricsService { + // Returns the values of all the gauges that are currently being maintained by + // the service + rpc GetAllGauges(EmptyMessage) returns (stream GaugeResponse); + + // Returns the value of one gauge + rpc GetGauge(GaugeRequest) returns (GaugeResponse); +} diff --git a/vendor/google.golang.org/grpc/stress/metrics_client/main.go b/vendor/google.golang.org/grpc/stress/metrics_client/main.go new file mode 100644 index 000000000..6405ec85e --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/metrics_client/main.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + metricspb "google.golang.org/grpc/stress/grpc_testing" +) + +var ( + metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the fomrat :") + totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges") +) + +func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { + stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) + if err != nil { + grpclog.Fatalf("failed to call GetAllGuages: %v", err) + } + + var ( + overallQPS int64 + rpcStatus error + ) + for { + gaugeResponse, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { + panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) + } + v := gaugeResponse.GetLongValue() + if !totalOnly { + grpclog.Printf("%s: %d", gaugeResponse.Name, v) + } + overallQPS += v + } + if rpcStatus != io.EOF { + grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) + } + grpclog.Printf("overall qps: %d", overallQPS) +} + +func main() { + flag.Parse() + if *metricsServerAddress == "" { + grpclog.Fatalf("Metrics server address is empty.") + } + + conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("cannot connect to metrics server: %v", err) + } + defer conn.Close() + + c := metricspb.NewMetricsServiceClient(conn) + printMetrics(c, *totalOnly) +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..22b8fb50d --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "golang.org/x/net/context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go new file mode 100644 index 000000000..bc0ab839f --- /dev/null +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bufconn provides a net.Conn implemented by a buffer and related +// dialing and listening functionality. +package bufconn + +import ( + "fmt" + "io" + "net" + "sync" + "time" +) + +// Listener implements a net.Listener that creates local, buffered net.Conns +// via its Accept and Dial method. +type Listener struct { + mu sync.Mutex + sz int + ch chan net.Conn + done chan struct{} +} + +var errClosed = fmt.Errorf("Closed") + +// Listen returns a Listener that can only be contacted by its own Dialers and +// creates buffered connections between the two. +func Listen(sz int) *Listener { + return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} +} + +// Accept blocks until Dial is called, then returns a net.Conn for the server +// half of the connection. +func (l *Listener) Accept() (net.Conn, error) { + select { + case <-l.done: + return nil, errClosed + case c := <-l.ch: + return c, nil + } +} + +// Close stops the listener. +func (l *Listener) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.done: + // Already closed. + break + default: + close(l.done) + } + return nil +} + +// Addr reports the address of the listener. +func (l *Listener) Addr() net.Addr { return addr{} } + +// Dial creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. +func (l *Listener) Dial() (net.Conn, error) { + p1, p2 := newPipe(l.sz), newPipe(l.sz) + select { + case <-l.done: + return nil, errClosed + case l.ch <- &conn{p1, p2}: + return &conn{p2, p1}, nil + } +} + +type pipe struct { + mu sync.Mutex + + // buf contains the data in the pipe. It is a ring buffer of fixed capacity, + // with r and w pointing to the offset to read and write, respsectively. + // + // Data is read between [r, w) and written to [w, r), wrapping around the end + // of the slice if necessary. + // + // The buffer is empty if r == len(buf), otherwise if r == w, it is full. + // + // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. + buf []byte + w, r int + + wwait sync.Cond + rwait sync.Cond + closed bool +} + +func newPipe(sz int) *pipe { + p := &pipe{buf: make([]byte, 0, sz)} + p.wwait.L = &p.mu + p.rwait.L = &p.mu + return p +} + +func (p *pipe) empty() bool { + return p.r == len(p.buf) +} + +func (p *pipe) full() bool { + return p.r < len(p.buf) && p.r == p.w +} + +func (p *pipe) Read(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + // Block until p has data. + for { + if p.closed { + return 0, io.ErrClosedPipe + } + if !p.empty() { + break + } + p.rwait.Wait() + } + wasFull := p.full() + + n = copy(b, p.buf[p.r:len(p.buf)]) + p.r += n + if p.r == cap(p.buf) { + p.r = 0 + p.buf = p.buf[:p.w] + } + + // Signal a blocked writer, if any + if wasFull { + p.wwait.Signal() + } + + return n, nil +} + +func (p *pipe) Write(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.closed { + return 0, io.ErrClosedPipe + } + for len(b) > 0 { + // Block until p is not full. + for { + if p.closed { + return 0, io.ErrClosedPipe + } + if !p.full() { + break + } + p.wwait.Wait() + } + wasEmpty := p.empty() + + end := cap(p.buf) + if p.w < p.r { + end = p.r + } + x := copy(p.buf[p.w:end], b) + b = b[x:] + n += x + p.w += x + if p.w > len(p.buf) { + p.buf = p.buf[:p.w] + } + if p.w == cap(p.buf) { + p.w = 0 + } + + // Signal a blocked reader, if any. + if wasEmpty { + p.rwait.Signal() + } + } + return n, nil +} + +func (p *pipe) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + // Signal all blocked readers and writers to return an error. + p.rwait.Broadcast() + p.wwait.Broadcast() + return nil +} + +type conn struct { + io.ReadCloser + io.WriteCloser +} + +func (c *conn) Close() error { + err1 := c.ReadCloser.Close() + err2 := c.WriteCloser.Close() + if err1 != nil { + return err1 + } + return err2 +} + +func (*conn) LocalAddr() net.Addr { return addr{} } +func (*conn) RemoteAddr() net.Addr { return addr{} } +func (c *conn) SetDeadline(t time.Time) error { return fmt.Errorf("unsupported") } +func (c *conn) SetReadDeadline(t time.Time) error { return fmt.Errorf("unsupported") } +func (c *conn) SetWriteDeadline(t time.Time) error { return fmt.Errorf("unsupported") } + +type addr struct{} + +func (addr) Network() string { return "bufconn" } +func (addr) String() string { return "bufconn" } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go new file mode 100644 index 000000000..0f7bc2227 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bufconn + +import ( + "fmt" + "io" + "net" + "reflect" + "testing" + "time" +) + +func testRW(r io.Reader, w io.Writer) error { + for i := 0; i < 20; i++ { + d := make([]byte, i) + for j := 0; j < i; j++ { + d[j] = byte(i - j) + } + var rn int + var rerr error + b := make([]byte, i) + done := make(chan struct{}) + go func() { + for rn < len(b) && rerr == nil { + var x int + x, rerr = r.Read(b[rn:]) + rn += x + } + close(done) + }() + wn, werr := w.Write(d) + if wn != i || werr != nil { + return fmt.Errorf("%v: w.Write(%v) = %v, %v; want %v, nil", i, d, wn, werr, i) + } + select { + case <-done: + case <-time.After(500 * time.Millisecond): + return fmt.Errorf("%v: r.Read never returned", i) + } + if rn != i || rerr != nil { + return fmt.Errorf("%v: r.Read = %v, %v; want %v, nil", i, rn, rerr, i) + } + if !reflect.DeepEqual(b, d) { + return fmt.Errorf("%v: r.Read read %v; want %v", i, b, d) + } + } + return nil +} + +func TestPipe(t *testing.T) { + p := newPipe(10) + if err := testRW(p, p); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestPipeClose(t *testing.T) { + p := newPipe(10) + p.Close() + if _, err := p.Write(nil); err != io.ErrClosedPipe { + t.Fatalf("p.Write = _, %v; want _, %v", err, io.ErrClosedPipe) + } + if _, err := p.Read(nil); err != io.ErrClosedPipe { + t.Fatalf("p.Read = _, %v; want _, %v", err, io.ErrClosedPipe) + } +} + +func TestConn(t *testing.T) { + p1, p2 := newPipe(10), newPipe(10) + c1, c2 := &conn{p1, p2}, &conn{p2, p1} + + if err := testRW(c1, c2); err != nil { + t.Fatalf(err.Error()) + } + if err := testRW(c2, c1); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestListener(t *testing.T) { + l := Listen(7) + var s net.Conn + var serr error + done := make(chan struct{}) + go func() { + s, serr = l.Accept() + close(done) + }() + c, cerr := l.Dial() + <-done + if cerr != nil || serr != nil { + t.Fatalf("cerr = %v, serr = %v; want nil, nil", cerr, serr) + } + if err := testRW(c, s); err != nil { + t.Fatalf(err.Error()) + } + if err := testRW(s, c); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestCloseWhileDialing(t *testing.T) { + l := Listen(7) + var c net.Conn + var err error + done := make(chan struct{}) + go func() { + c, err = l.Dial() + close(done) + }() + l.Close() + <-done + if c != nil || err != errClosed { + t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) + } +} + +func TestCloseWhileAccepting(t *testing.T) { + l := Listen(7) + var c net.Conn + var err error + done := make(chan struct{}) + go func() { + c, err = l.Accept() + close(done) + }() + l.Close() + <-done + if c != nil || err != errClosed { + t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) + } +} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go new file mode 100644 index 000000000..9401b1dd8 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: codec_perf/perf.proto + +/* +Package codec_perf is a generated protocol buffer package. + +It is generated from these files: + codec_perf/perf.proto + +It has these top-level messages: + Buffer +*/ +package codec_perf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +type Buffer struct { + Body []byte `protobuf:"bytes,1,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Buffer) Reset() { *m = Buffer{} } +func (m *Buffer) String() string { return proto.CompactTextString(m) } +func (*Buffer) ProtoMessage() {} +func (*Buffer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Buffer) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func init() { + proto.RegisterType((*Buffer)(nil), "codec.perf.Buffer") +} + +func init() { proto.RegisterFile("codec_perf/perf.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 78 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0xce, 0x4f, 0x49, + 0x4d, 0x8e, 0x2f, 0x48, 0x2d, 0x4a, 0xd3, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x5c, 0x60, 0x61, 0x3d, 0x90, 0x88, 0x92, 0x0c, 0x17, 0x9b, 0x53, 0x69, 0x5a, 0x5a, 0x6a, 0x91, + 0x90, 0x10, 0x17, 0x4b, 0x52, 0x7e, 0x4a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, + 0x0d, 0x08, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x4c, 0x5f, 0x41, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto new file mode 100644 index 000000000..f42dbcafe --- /dev/null +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto @@ -0,0 +1,25 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Messages used for performance tests that may not reference grpc directly for +// reasons of import cycles. +syntax = "proto2"; + +package codec.perf; + +// Buffer is a message that contains a body of bytes that is used to exercise +// encoding and decoding overheads. +message Buffer { + optional bytes body = 1; +} diff --git a/vendor/google.golang.org/grpc/test/end2end_test.go b/vendor/google.golang.org/grpc/test/end2end_test.go new file mode 100644 index 000000000..62ebf905c --- /dev/null +++ b/vendor/google.golang.org/grpc/test/end2end_test.go @@ -0,0 +1,5030 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. codec_perf/perf.proto +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +package test + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "math" + "net" + "os" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "time" + + "github.com/golang/protobuf/proto" + anypb "github.com/golang/protobuf/ptypes/any" + "golang.org/x/net/context" + "golang.org/x/net/http2" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + _ "google.golang.org/grpc/resolver/passthrough" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/testdata" +) + +var ( + // For headers: + testMetadata = metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + "key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})}, + } + testMetadata2 = metadata.MD{ + "key1": []string{"value12"}, + "key2": []string{"value22"}, + } + // For trailers: + testTrailerMetadata = metadata.MD{ + "tkey1": []string{"trailerValue1"}, + "tkey2": []string{"trailerValue2"}, + "tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})}, + } + testTrailerMetadata2 = metadata.MD{ + "tkey1": []string{"trailerValue12"}, + "tkey2": []string{"trailerValue22"}, + } + // capital "Key" is illegal in HTTP/2. + malformedHTTP2Metadata = metadata.MD{ + "Key": []string{"foo"}, + } + testAppUA = "myApp1/1.0 myApp2/0.9" + failAppUA = "fail-this-RPC" + detailedError = status.ErrorProto(&spb.Status{ + Code: int32(codes.DataLoss), + Message: "error for testing: " + failAppUA, + Details: []*anypb.Any{{ + TypeUrl: "url", + Value: []byte{6, 0, 0, 6, 1, 3}, + }}, + }) +) + +var raceMode bool // set by race.go in race mode + +type testServer struct { + security string // indicate the authentication protocol used by this server. + earlyFail bool // whether to error out the execution of a service handler prematurely. + setAndSendHeader bool // whether to call setHeader and sendHeader. + setHeaderOnly bool // whether to only call setHeader, not sendHeader. + multipleSetTrailer bool // whether to call setTrailer multiple times. + unaryCallSleepTime time.Duration +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if md, ok := metadata.FromIncomingContext(ctx); ok { + // For testing purpose, returns an error if user-agent is failAppUA. + // To test that client gets the correct error. + if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) { + return nil, detailedError + } + var str []string + for _, entry := range md["user-agent"] { + str = append(str, "ua", entry) + } + grpc.SendHeader(ctx, metadata.Pairs(str...)) + } + return new(testpb.Empty), nil +} + +func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { + if size < 0 { + return nil, fmt.Errorf("Requested a response with invalid length %d", size) + } + body := make([]byte, size) + switch t { + case testpb.PayloadType_COMPRESSABLE: + case testpb.PayloadType_UNCOMPRESSABLE: + return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") + default: + return nil, fmt.Errorf("Unsupported payload type: %d", t) + } + return &testpb.Payload{ + Type: t.Enum(), + Body: body, + }, nil +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + if _, exists := md[":authority"]; !exists { + return nil, grpc.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) + } + if s.setAndSendHeader { + if err := grpc.SetHeader(ctx, md); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetHeader(_, %v) = %v, want ", md, err) + } + if err := grpc.SendHeader(ctx, testMetadata2); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SendHeader(_, %v) = %v, want ", testMetadata2, err) + } + } else if s.setHeaderOnly { + if err := grpc.SetHeader(ctx, md); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetHeader(_, %v) = %v, want ", md, err) + } + if err := grpc.SetHeader(ctx, testMetadata2); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetHeader(_, %v) = %v, want ", testMetadata2, err) + } + } else { + if err := grpc.SendHeader(ctx, md); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SendHeader(_, %v) = %v, want ", md, err) + } + } + if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) + } + if s.multipleSetTrailer { + if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil { + return nil, grpc.Errorf(grpc.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata2, err) + } + } + } + pr, ok := peer.FromContext(ctx) + if !ok { + return nil, grpc.Errorf(codes.DataLoss, "failed to get peer from ctx") + } + if pr.Addr == net.Addr(nil) { + return nil, grpc.Errorf(codes.DataLoss, "failed to get peer address") + } + if s.security != "" { + // Check Auth info + var authType, serverName string + switch info := pr.AuthInfo.(type) { + case credentials.TLSInfo: + authType = info.AuthType() + serverName = info.State.ServerName + default: + return nil, grpc.Errorf(codes.Unauthenticated, "Unknown AuthInfo type") + } + if authType != s.security { + return nil, grpc.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security) + } + if serverName != "x.test.youtube.com" { + return nil, grpc.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName) + } + } + // Simulate some service delay. + time.Sleep(s.unaryCallSleepTime) + + payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) + if err != nil { + return nil, err + } + + return &testpb.SimpleResponse{ + Payload: payload, + }, nil +} + +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + if md, ok := metadata.FromIncomingContext(stream.Context()); ok { + if _, exists := md[":authority"]; !exists { + return grpc.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) + } + // For testing purpose, returns an error if user-agent is failAppUA. + // To test that client gets the correct error. + if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) { + return grpc.Errorf(codes.DataLoss, "error for testing: "+failAppUA) + } + } + cs := args.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(args.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + return nil +} + +func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + var sum int + for { + in, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{ + AggregatedPayloadSize: proto.Int32(int32(sum)), + }) + } + if err != nil { + return err + } + p := in.GetPayload().GetBody() + sum += len(p) + if s.earlyFail { + return grpc.Errorf(codes.NotFound, "not found") + } + } +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if s.setAndSendHeader { + if err := stream.SetHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, md, err) + } + if err := stream.SendHeader(testMetadata2); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(_, %v) = %v, want ", stream, testMetadata2, err) + } + } else if s.setHeaderOnly { + if err := stream.SetHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, md, err) + } + if err := stream.SetHeader(testMetadata2); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, testMetadata2, err) + } + } else { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + } + stream.SetTrailer(testTrailerMetadata) + if s.multipleSetTrailer { + stream.SetTrailer(testTrailerMetadata2) + } + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return nil + } + if err != nil { + // to facilitate testSvrWriteStatusEarlyWrite + if grpc.Code(err) == codes.ResourceExhausted { + return grpc.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) + } + return err + } + cs := in.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(in.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + // to facilitate testSvrWriteStatusEarlyWrite + if grpc.Code(err) == codes.ResourceExhausted { + return grpc.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) + } + return err + } + } + } +} + +func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { + var msgBuf []*testpb.StreamingOutputCallRequest + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + break + } + if err != nil { + return err + } + msgBuf = append(msgBuf, in) + } + for _, m := range msgBuf { + cs := m.GetResponseParameters() + for _, c := range cs { + if us := c.GetIntervalUs(); us > 0 { + time.Sleep(time.Duration(us) * time.Microsecond) + } + + payload, err := newPayload(m.GetResponseType(), c.GetSize()) + if err != nil { + return err + } + + if err := stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: payload, + }); err != nil { + return err + } + } + } + return nil +} + +type env struct { + name string + network string // The type of network such as tcp, unix, etc. + security string // The security protocol such as TLS, SSH, etc. + httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS + balancer string // One of "roundrobin", "pickfirst", "v1", or "". + customDialer func(string, string, time.Duration) (net.Conn, error) +} + +func (e env) runnable() bool { + if runtime.GOOS == "windows" && e.network == "unix" { + return false + } + return true +} + +func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) { + if e.customDialer != nil { + return e.customDialer(e.network, addr, timeout) + } + return net.DialTimeout(e.network, addr, timeout) +} + +var ( + tcpClearEnv = env{name: "tcp-clear-v1-balancer", network: "tcp", balancer: "v1"} + tcpTLSEnv = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls", balancer: "v1"} + tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "roundrobin"} + tcpTLSRREnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "roundrobin"} + handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "roundrobin"} + noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"} + allEnv = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv} +) + +var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.") + +func listTestEnv() (envs []env) { + if *onlyEnv != "" { + for _, e := range allEnv { + if e.name == *onlyEnv { + if !e.runnable() { + panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS)) + } + return []env{e} + } + } + panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv)) + } + for _, e := range allEnv { + if e.runnable() { + envs = append(envs, e) + } + } + return envs +} + +// test is an end-to-end test. It should be created with the newTest +// func, modified as needed, and then started with its startServer method. +// It should be cleaned up with the tearDown method. +type test struct { + t *testing.T + e env + + ctx context.Context // valid for life of test, before tearDown + cancel context.CancelFunc + + // Configurable knobs, after newTest returns: + testServer testpb.TestServiceServer // nil means none + healthServer *health.Server // nil means disabled + maxStream uint32 + tapHandle tap.ServerInHandle + maxMsgSize *int + maxClientReceiveMsgSize *int + maxClientSendMsgSize *int + maxServerReceiveMsgSize *int + maxServerSendMsgSize *int + userAgent string + clientCompression bool + serverCompression bool + unaryClientInt grpc.UnaryClientInterceptor + streamClientInt grpc.StreamClientInterceptor + unaryServerInt grpc.UnaryServerInterceptor + streamServerInt grpc.StreamServerInterceptor + unknownHandler grpc.StreamHandler + sc <-chan grpc.ServiceConfig + customCodec grpc.Codec + serverInitialWindowSize int32 + serverInitialConnWindowSize int32 + clientInitialWindowSize int32 + clientInitialConnWindowSize int32 + perRPCCreds credentials.PerRPCCredentials + + // All test dialing is blocking by default. Set this to true if dial + // should be non-blocking. + nonBlockingDial bool + + // srv and srvAddr are set once startServer is called. + srv *grpc.Server + srvAddr string + + cc *grpc.ClientConn // nil until requested via clientConn + restoreLogs func() // nil unless declareLogNoise is used +} + +func (te *test) tearDown() { + if te.cancel != nil { + te.cancel() + te.cancel = nil + } + if te.cc != nil { + te.cc.Close() + te.cc = nil + } + if te.restoreLogs != nil { + te.restoreLogs() + te.restoreLogs = nil + } + if te.srv != nil { + te.srv.Stop() + } +} + +// newTest returns a new test using the provided testing.T and +// environment. It is returned with default values. Tests should +// modify it before calling its startServer and clientConn methods. +func newTest(t *testing.T, e env) *test { + te := &test{ + t: t, + e: e, + maxStream: math.MaxUint32, + } + te.ctx, te.cancel = context.WithCancel(context.Background()) + return te +} + +// startServer starts a gRPC server listening. Callers should defer a +// call to te.tearDown to clean up. +func (te *test) startServer(ts testpb.TestServiceServer) { + te.testServer = ts + te.t.Logf("Running test in %s environment...", te.e.name) + sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} + if te.maxMsgSize != nil { + sopts = append(sopts, grpc.MaxMsgSize(*te.maxMsgSize)) + } + if te.maxServerReceiveMsgSize != nil { + sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize)) + } + if te.maxServerSendMsgSize != nil { + sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize)) + } + if te.tapHandle != nil { + sopts = append(sopts, grpc.InTapHandle(te.tapHandle)) + } + if te.serverCompression { + sopts = append(sopts, + grpc.RPCCompressor(grpc.NewGZIPCompressor()), + grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.unaryServerInt != nil { + sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt)) + } + if te.streamServerInt != nil { + sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt)) + } + if te.unknownHandler != nil { + sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler)) + } + if te.serverInitialWindowSize > 0 { + sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize)) + } + if te.serverInitialConnWindowSize > 0 { + sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize)) + } + la := "localhost:0" + switch te.e.network { + case "unix": + la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano()) + syscall.Unlink(la) + } + lis, err := net.Listen(te.e.network, la) + if err != nil { + te.t.Fatalf("Failed to listen: %v", err) + } + switch te.e.security { + case "tls": + creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) + if err != nil { + te.t.Fatalf("Failed to generate credentials %v", err) + } + sopts = append(sopts, grpc.Creds(creds)) + case "clientAlwaysFailCred": + sopts = append(sopts, grpc.Creds(clientAlwaysFailCred{})) + case "clientTimeoutCreds": + sopts = append(sopts, grpc.Creds(&clientTimeoutCreds{})) + } + if te.customCodec != nil { + sopts = append(sopts, grpc.CustomCodec(te.customCodec)) + } + s := grpc.NewServer(sopts...) + te.srv = s + if te.e.httpHandler { + internal.TestingUseHandlerImpl(s) + } + if te.healthServer != nil { + healthpb.RegisterHealthServer(s, te.healthServer) + } + if te.testServer != nil { + testpb.RegisterTestServiceServer(s, te.testServer) + } + addr := la + switch te.e.network { + case "unix": + default: + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + te.t.Fatalf("Failed to parse listener address: %v", err) + } + addr = "localhost:" + port + } + + go s.Serve(lis) + te.srvAddr = addr +} + +func (te *test) clientConn() *grpc.ClientConn { + if te.cc != nil { + return te.cc + } + opts := []grpc.DialOption{ + grpc.WithDialer(te.e.dialer), + grpc.WithUserAgent(te.userAgent), + } + + if te.sc != nil { + opts = append(opts, grpc.WithServiceConfig(te.sc)) + } + + if te.clientCompression { + opts = append(opts, + grpc.WithCompressor(grpc.NewGZIPCompressor()), + grpc.WithDecompressor(grpc.NewGZIPDecompressor()), + ) + } + if te.unaryClientInt != nil { + opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt)) + } + if te.streamClientInt != nil { + opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt)) + } + if te.maxMsgSize != nil { + opts = append(opts, grpc.WithMaxMsgSize(*te.maxMsgSize)) + } + if te.maxClientReceiveMsgSize != nil { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize))) + } + if te.maxClientSendMsgSize != nil { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize))) + } + switch te.e.security { + case "tls": + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") + if err != nil { + te.t.Fatalf("Failed to load credentials: %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + case "clientAlwaysFailCred": + opts = append(opts, grpc.WithTransportCredentials(clientAlwaysFailCred{})) + case "clientTimeoutCreds": + opts = append(opts, grpc.WithTransportCredentials(&clientTimeoutCreds{})) + default: + opts = append(opts, grpc.WithInsecure()) + } + // TODO(bar) switch balancer case "pickfirst". + var scheme string + switch te.e.balancer { + case "v1": + opts = append(opts, grpc.WithBalancer(grpc.RoundRobin(nil))) + case "roundrobin": + rr := balancer.Get("roundrobin") + if rr == nil { + te.t.Fatalf("got nil when trying to get roundrobin balancer builder") + } + opts = append(opts, grpc.WithBalancerBuilder(rr)) + scheme = "passthrough:///" + } + if te.clientInitialWindowSize > 0 { + opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize)) + } + if te.clientInitialConnWindowSize > 0 { + opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize)) + } + if te.perRPCCreds != nil { + opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds)) + } + if te.customCodec != nil { + opts = append(opts, grpc.WithCodec(te.customCodec)) + } + if !te.nonBlockingDial && te.srvAddr != "" { + // Only do a blocking dial if server is up. + opts = append(opts, grpc.WithBlock()) + } + var err error + te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...) + if err != nil { + te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err) + } + return te.cc +} + +func (te *test) declareLogNoise(phrases ...string) { + te.restoreLogs = declareLogNoise(te.t, phrases...) +} + +func (te *test) withServerTester(fn func(st *serverTester)) { + c, err := te.e.dialer(te.srvAddr, 10*time.Second) + if err != nil { + te.t.Fatal(err) + } + defer c.Close() + if te.e.security == "tls" { + c = tls.Client(c, &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{http2.NextProtoTLS}, + }) + } + st := newServerTesterFromConn(te.t, c) + st.greet() + fn(st) +} + +type lazyConn struct { + net.Conn + beLazy int32 +} + +func (l *lazyConn) Write(b []byte) (int, error) { + if atomic.LoadInt32(&(l.beLazy)) == 1 { + // The sleep duration here needs to less than the leakCheck deadline. + time.Sleep(time.Second) + } + return l.Conn.Write(b) +} + +func TestContextDeadlineNotIgnored(t *testing.T) { + defer leakcheck.Check(t) + e := noBalancerEnv + var lc *lazyConn + e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, timeout) + if err != nil { + return nil, err + } + lc = &lazyConn{Conn: conn} + return lc, nil + } + + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + atomic.StoreInt32(&(lc.beLazy), 1) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + t1 := time.Now() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err) + } + if time.Since(t1) > 2*time.Second { + t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline") + } +} + +func TestTimeoutOnDeadServer(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testTimeoutOnDeadServer(t, e) + } +} + +func testTimeoutOnDeadServer(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + te.srv.Stop() + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)) + cancel() + if e.balancer != "" && grpc.Code(err) != codes.DeadlineExceeded { + // If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error, + // the error will be an internal error. + t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded) + } + awaitNewConnLogOutput() +} + +func TestServerGracefulStopIdempotent(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGracefulStopIdempotent(t, e) + } +} + +func testServerGracefulStopIdempotent(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + for i := 0; i < 3; i++ { + te.srv.GracefulStop() + } +} + +func TestServerGoAway(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAway(t, e) + } +} + +func testServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && grpc.Code(err) != codes.DeadlineExceeded { + cancel() + break + } + cancel() + } + // A new RPC should fail. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable && grpc.Code(err) != codes.Internal { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) + } + <-ch + awaitNewConnLogOutput() +} + +func TestServerGoAwayPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAwayPendingRPC(t, e) + } +} + +func testServerGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break + } + cancel() + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(1), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + cancel() + <-ch + awaitNewConnLogOutput() +} + +func TestServerMultipleGoAwayPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerMultipleGoAwayPendingRPC(t, e) + } +} + +func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch1 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch1) + }() + ch2 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch2) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break + } + cancel() + } + select { + case <-ch1: + t.Fatal("GracefulStop() terminated early") + case <-ch2: + t.Fatal("GracefulStop() terminated early") + default: + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(1), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } + <-ch1 + <-ch2 + cancel() + awaitNewConnLogOutput() +} + +func TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentClientConnCloseAndServerGoAway(t, e) + } +} + +func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + // Close ClientConn and Server concurrently. + go func() { + te.srv.GracefulStop() + close(ch) + }() + go func() { + cc.Close() + }() + <-ch +} + +func TestConcurrentServerStopAndGoAway(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentServerStopAndGoAway(t, e) + } +} + +func testConcurrentServerStopAndGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + stream, err := tc.FullDuplexCall(context.Background(), grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break + } + cancel() + } + // Stop the server and close all the connections. + te.srv.Stop() + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(1), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err == nil { + if _, err := stream.Recv(); err == nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + } + <-ch + awaitNewConnLogOutput() +} + +func TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testClientConnCloseAfterGoAwayWithActiveStream(t, e) + } +} + +func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + if _, err := tc.FullDuplexCall(context.Background()); err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) + } + done := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(done) + }() + time.Sleep(50 * time.Millisecond) + cc.Close() + timeout := time.NewTimer(time.Second) + select { + case <-done: + case <-timeout.C: + t.Fatalf("Test timed-out.") + } +} + +func TestFailFast(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testFailFast(t, e) + } +} + +func testFailFast(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + // Stop the server and tear down all the exisiting connections. + te.srv.Stop() + // Loop until the server teardown is propagated to the client. + for { + _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) + if grpc.Code(err) == codes.Unavailable { + break + } + fmt.Printf("%v.EmptyCall(_, _) = _, %v", tc, err) + time.Sleep(10 * time.Millisecond) + } + // The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable) + } + if _, err := tc.StreamingInputCall(context.Background()); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable) + } + + awaitNewConnLogOutput() +} + +func testServiceConfigSetup(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { + te := newTest(t, e) + // We write before read. + ch := make(chan grpc.ServiceConfig, 1) + te.sc = ch + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + return te, ch +} + +func newBool(b bool) (a *bool) { + return &b +} + +func newInt(b int) (a *int) { + return &b +} + +func newDuration(b time.Duration) (a *time.Duration) { + a = new(time.Duration) + *a = b + return +} + +func TestServiceConfigGetMethodConfig(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testGetMethodConfig(t, e) + } +} + +func testGetMethodConfig(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) + defer te.tearDown() + + mc1 := grpc.MethodConfig{ + WaitForReady: newBool(true), + Timeout: newDuration(time.Millisecond), + } + mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + // Wait for the new service config to propagate. + for { + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) == codes.DeadlineExceeded { + continue + } + break + } + // The following RPCs are expected to become fail-fast. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + } +} + +func TestServiceConfigWaitForReady(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServiceConfigWaitForReady(t, e) + } +} + +func testServiceConfigWaitForReady(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) + defer te.tearDown() + + // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + WaitForReady: newBool(false), + Timeout: newDuration(time.Millisecond), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(context.Background(), grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + + // Generate a service config update. + // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. + mc.WaitForReady = newBool(true) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + for { + if !*mc.WaitForReady { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + continue + } + break + } + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(context.Background()); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } +} + +func TestServiceConfigTimeout(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServiceConfigTimeout(t, e) + } +} + +func testServiceConfigTimeout(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) + defer te.tearDown() + + // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + Timeout: newDuration(time.Hour), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() + + // Generate a service config update. + // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc.Timeout = newDuration(time.Nanosecond) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + for { + if *mc.Timeout != time.Nanosecond { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + continue + } + break + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() +} + +func TestServiceConfigMaxMsgSize(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServiceConfigMaxMsgSize(t, e) + } +} + +func testServiceConfigMaxMsgSize(t *testing.T, e env) { + // Setting up values and objects shared across all test cases. + const smallSize = 1 + const largeSize = 1024 + const extraLargeSize = 2048 + + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + + mc := grpc.MethodConfig{ + MaxReqSize: newInt(extraLargeSize), + MaxRespSize: newInt(extraLargeSize), + } + + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te1, ch1 := testServiceConfigSetup(t, e) + te1.startServer(&testServer{security: e.security}) + defer te1.tearDown() + + ch1 <- sc + tc := testpb.NewTestServiceClient(te1.clientConn()) + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(extraLargeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = extraLargePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(extraLargeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + stream, err := tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = extraLargePayload + stream, err = tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te2, ch2 := testServiceConfigSetup(t, e) + te2.maxClientReceiveMsgSize = newInt(1024) + te2.maxClientSendMsgSize = newInt(1024) + te2.startServer(&testServer{security: e.security}) + defer te2.tearDown() + ch2 <- sc + tc = testpb.NewTestServiceClient(te2.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = proto.Int32(int32(largeSize)) + + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te2.ctx) + respParam[0].Size = proto.Int32(int32(largeSize)) + sreq.Payload = smallPayload + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te2.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te3, ch3 := testServiceConfigSetup(t, e) + te3.maxClientReceiveMsgSize = newInt(4096) + te3.maxClientSendMsgSize = newInt(4096) + te3.startServer(&testServer{security: e.security}) + defer te3.tearDown() + ch3 <- sc + tc = testpb.NewTestServiceClient(te3.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = proto.Int32(int32(largeSize)) + + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.ResponseSize = proto.Int32(int32(extraLargeSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.Payload = extraLargePayload + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam[0].Size = proto.Int32(int32(largeSize)) + sreq.Payload = smallPayload + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want ", stream, err) + } + + respParam[0].Size = proto.Int32(int32(extraLargeSize)) + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + sreq.Payload = extraLargePayload + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeClientDefault(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeClientDefault(t, e) + } +} + +func testMaxMsgSizeClientDefault(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 4 * 1024 * 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC recv. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeClientAPI(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeClientAPI(t, e) + } +} + +func testMaxMsgSizeClientAPI(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + // To avoid error on server side. + te.maxServerSendMsgSize = newInt(5 * 1024 * 1024) + te.maxClientReceiveMsgSize = newInt(1024) + te.maxClientSendMsgSize = newInt(1024) + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC recv. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeServerAPI(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeServerAPI(t, e) + } +} + +func testMaxMsgSizeServerAPI(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.maxServerReceiveMsgSize = newInt(1024) + te.maxServerSendMsgSize = newInt(1024) + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC send. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC recv. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC send. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} + +func TestTap(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testTap(t, e) + } +} + +type myTap struct { + cnt int +} + +func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { + if info != nil { + if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { + t.cnt++ + } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { + return nil, fmt.Errorf("tap error") + } + } + return ctx, nil +} + +func testTap(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + ttap := &myTap{} + te.tapHandle = ttap.handle + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + if ttap.cnt != 1 { + t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt) + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(45), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + } +} + +func healthCheck(d time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + hc := healthpb.NewHealthClient(cc) + req := &healthpb.HealthCheckRequest{ + Service: serviceName, + } + return hc.Check(ctx, req) +} + +func TestHealthCheckOnSuccess(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testHealthCheckOnSuccess(t, e) + } +} + +func testHealthCheckOnSuccess(t *testing.T, e env) { + te := newTest(t, e) + hs := health.NewServer() + hs.SetServingStatus("grpc.health.v1.Health", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } +} + +func TestHealthCheckOnFailure(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testHealthCheckOnFailure(t, e) + } +} + +func testHealthCheckOnFailure(t *testing.T, e env) { + defer leakcheck.Check(t) + te := newTest(t, e) + te.declareLogNoise( + "Failed to dial ", + "grpc: the client connection is closing; please retry", + ) + hs := health.NewServer() + hs.SetServingStatus("grpc.health.v1.HealthCheck", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + wantErr := grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") + if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1.Health"); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %s", err, codes.DeadlineExceeded) + } + awaitNewConnLogOutput() +} + +func TestHealthCheckOff(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + want := grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1.Health") + if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !reflect.DeepEqual(err, want) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) + } +} + +func TestUnknownHandler(t *testing.T) { + defer leakcheck.Check(t) + // An example unknownHandler that returns a different code and a different method, making sure that we do not + // expose what methods are implemented to a client that is not authenticated. + unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { + return grpc.Errorf(codes.Unauthenticated, "user unauthenticated") + } + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testUnknownHandler(t, e, unknownHandler) + } +} + +func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { + te := newTest(t, e) + te.unknownHandler = unknownHandler + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + want := grpc.Errorf(codes.Unauthenticated, "user unauthenticated") + if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !reflect.DeepEqual(err, want) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) + } +} + +func TestHealthCheckServingStatus(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testHealthCheckServingStatus(t, e) + } +} + +func testHealthCheckServingStatus(t *testing.T, e env) { + te := newTest(t, e) + hs := health.NewServer() + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + out, err := healthCheck(1*time.Second, cc, "") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + wantErr := grpc.Errorf(codes.NotFound, "unknown service") + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %s", err, codes.NotFound) + } + hs.SetServingStatus("grpc.health.v1.Health", healthpb.HealthCheckResponse_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1.Health") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("Got the serving status %v, want SERVING", out.Status) + } + hs.SetServingStatus("grpc.health.v1.Health", healthpb.HealthCheckResponse_NOT_SERVING) + out, err = healthCheck(1*time.Second, cc, "grpc.health.v1.Health") + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if out.Status != healthpb.HealthCheckResponse_NOT_SERVING { + t.Fatalf("Got the serving status %v, want NOT_SERVING", out.Status) + } + +} + +func TestErrorChanNoIO(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testErrorChanNoIO(t, e) + } +} + +func testErrorChanNoIO(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + if _, err := tc.FullDuplexCall(context.Background()); err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } +} + +func TestEmptyUnaryWithUserAgent(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testEmptyUnaryWithUserAgent(t, e) + } +} + +func testEmptyUnaryWithUserAgent(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + var header metadata.MD + reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) + if err != nil || !proto.Equal(&testpb.Empty{}, reply) { + t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) + } + if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) { + t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA) + } + + te.srv.Stop() +} + +func TestFailedEmptyUnary(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // This test covers status details, but + // Grpc-Status-Details-Bin is not support in handler_server. + continue + } + testFailedEmptyUnary(t, e) + } +} + +func testFailedEmptyUnary(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = failAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + wantErr := detailedError + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) + } +} + +func TestLargeUnary(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testLargeUnary(t, e) + } +} + +func testLargeUnary(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req) + if err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + } +} + +// Test backward-compatibility API for setting msg size limit. +func TestExceedMsgLimit(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testExceedMsgLimit(t, e) + } +} + +func testExceedMsgLimit(t *testing.T, e env) { + te := newTest(t, e) + te.maxMsgSize = newInt(1024) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + argSize := int32(*te.maxMsgSize + 1) + const smallSize = 1 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + + // Test on server side for unary RPC. + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(smallSize), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + // Test on client side for unary RPC. + req.ResponseSize = proto.Int32(int32(*te.maxMsgSize) + 1) + req.Payload = smallPayload + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test on server side for streaming RPC. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(1), + }, + } + + spayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(*te.maxMsgSize+1)) + if err != nil { + t.Fatal(err) + } + + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: spayload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test on client side for streaming RPC. + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam[0].Size = proto.Int32(int32(*te.maxMsgSize) + 1) + sreq.Payload = smallPayload + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + +} + +func TestPeerClientSide(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPeerClientSide(t, e) + } +} + +func testPeerClientSide(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + peer := new(peer.Peer) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(peer), grpc.FailFast(false)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + pa := peer.Addr.String() + if e.network == "unix" { + if pa != te.srvAddr { + t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) + } + return + } + _, pp, err := net.SplitHostPort(pa) + if err != nil { + t.Fatalf("Failed to parse address from peer.") + } + _, sp, err := net.SplitHostPort(te.srvAddr) + if err != nil { + t.Fatalf("Failed to parse address of test server.") + } + if pp != sp { + t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) + } +} + +// TestPeerNegative tests that if call fails setting peer +// doesn't cause a segmentation fault. +// issue#1141 https://github.com/grpc/grpc-go/issues/1141 +func TestPeerNegative(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPeerNegative(t, e) + } +} + +func testPeerNegative(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + peer := new(peer.Peer) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) +} + +func TestPeerFailedRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPeerFailedRPC(t, e) + } +} + +func testPeerFailedRPC(t *testing.T, e env) { + te := newTest(t, e) + te.maxServerReceiveMsgSize = newInt(1 * 1024) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // first make a successful request to the server + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + + // make a second request that will be rejected by the server + const largeSize = 5 * 1024 + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: largePayload, + } + + peer := new(peer.Peer) + if _, err := tc.UnaryCall(context.Background(), req, grpc.Peer(peer)); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } else { + pa := peer.Addr.String() + if e.network == "unix" { + if pa != te.srvAddr { + t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) + } + return + } + _, pp, err := net.SplitHostPort(pa) + if err != nil { + t.Fatalf("Failed to parse address from peer.") + } + _, sp, err := net.SplitHostPort(te.srvAddr) + if err != nil { + t.Fatalf("Failed to parse address of test server.") + } + if pp != sp { + t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) + } + } +} + +func TestMetadataUnaryRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMetadataUnaryRPC(t, e) + } +} + +func testMetadataUnaryRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + var header, trailer metadata.MD + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + // Ignore optional response headers that Servers may set: + if header != nil { + delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers + delete(header, "date") // the Date header is also optional + delete(header, "user-agent") + } + if !reflect.DeepEqual(header, testMetadata) { + t.Fatalf("Received header metadata %v, want %v", header, testMetadata) + } + if !reflect.DeepEqual(trailer, testTrailerMetadata) { + t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata) + } +} + +func TestMultipleSetTrailerUnaryRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMultipleSetTrailerUnaryRPC(t, e) + } +} + +func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = 1 + ) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + var trailer metadata.MD + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2) + if !reflect.DeepEqual(trailer, expectedTrailer) { + t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer) + } +} + +func TestMultipleSetTrailerStreamingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMultipleSetTrailerStreamingRPC(t, e) + } +} + +func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + stream, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) + } + + trailer := stream.Trailer() + expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2) + if !reflect.DeepEqual(trailer, expectedTrailer) { + t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer) + } +} + +func TestSetAndSendHeaderUnaryRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testSetAndSendHeaderUnaryRPC(t, e) + } +} + +// To test header metadata is sent on SendHeader(). +func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setAndSendHeader: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = 1 + ) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + var header metadata.MD + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.FailFast(false)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } +} + +func TestMultipleSetHeaderUnaryRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testMultipleSetHeaderUnaryRPC(t, e) + } +} + +// To test header metadata is sent when sending response. +func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setHeaderOnly: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = 1 + ) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + + var header metadata.MD + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.FailFast(false)); err != nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } +} + +func TestMultipleSetHeaderUnaryRPCError(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testMultipleSetHeaderUnaryRPCError(t, e) + } +} + +// To test header metadata is sent when sending status. +func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setHeaderOnly: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = -1 // Invalid respSize to make RPC fail. + ) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + var header metadata.MD + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.FailFast(false)); err == nil { + t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } +} + +func TestSetAndSendHeaderStreamingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testSetAndSendHeaderStreamingRPC(t, e) + } +} + +// To test header metadata is sent on SendHeader(). +func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setAndSendHeader: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = 1 + ) + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) + } + + header, err := stream.Header() + if err != nil { + t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } +} + +func TestMultipleSetHeaderStreamingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testMultipleSetHeaderStreamingRPC(t, e) + } +} + +// To test header metadata is sent when sending response. +func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setHeaderOnly: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = 1 + ) + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: []*testpb.ResponseParameters{ + {Size: proto.Int32(respSize)}, + }, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) + } + + header, err := stream.Header() + if err != nil { + t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } + +} + +func TestMultipleSetHeaderStreamingRPCError(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testMultipleSetHeaderStreamingRPCError(t, e) + } +} + +// To test header metadata is sent when sending status. +func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, setHeaderOnly: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const ( + argSize = 1 + respSize = -1 + ) + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: []*testpb.ResponseParameters{ + {Size: proto.Int32(respSize)}, + }, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err == nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + + header, err := stream.Header() + if err != nil { + t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) + } + delete(header, "user-agent") + expectedHeader := metadata.Join(testMetadata, testMetadata2) + if !reflect.DeepEqual(header, expectedHeader) { + t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) + } + + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } +} + +// TestMalformedHTTP2Metedata verfies the returned error when the client +// sends an illegal metadata. +func TestMalformedHTTP2Metadata(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // Failed with "server stops accepting new RPCs". + // Server stops accepting new RPCs when the client sends an illegal http2 header. + continue + } + testMalformedHTTP2Metadata(t, e) + } +} + +func testMalformedHTTP2Metadata(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(314), + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata) + if _, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.Internal { + t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal) + } +} + +func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { + defer wg.Done() + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Error(err) + return + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + reply, err := tc.UnaryCall(context.Background(), req, grpc.FailFast(false)) + if err != nil { + t.Errorf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + return + } + pt := reply.GetPayload().GetType() + ps := len(reply.GetPayload().GetBody()) + if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { + t.Errorf("Got reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) + return + } +} + +func TestRetry(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // Fails with RST_STREAM / FLOW_CONTROL_ERROR + continue + } + testRetry(t, e) + } +} + +// This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. +// TODO(zhaoq): Refactor to make this clearer and add more cases to test racy +// and error-prone paths. +func testRetry(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("transport: http2Client.notifyError got notified that the client transport was broken") + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + var wg sync.WaitGroup + + numRPC := 1000 + rpcSpacing := 2 * time.Millisecond + if raceMode { + // The race detector has a limit on how many goroutines it can track. + // This test is near the upper limit, and goes over the limit + // depending on the environment (the http.Handler environment uses + // more goroutines) + t.Logf("Shortening test in race mode.") + numRPC /= 2 + rpcSpacing *= 2 + } + + wg.Add(1) + go func() { + // Halfway through starting RPCs, kill all connections: + time.Sleep(time.Duration(numRPC/2) * rpcSpacing) + + // The server shuts down the network connection to make a + // transport error which will be detected by the client side + // code. + internal.TestingCloseConns(te.srv) + wg.Done() + }() + // All these RPCs should succeed eventually. + for i := 0; i < numRPC; i++ { + time.Sleep(rpcSpacing) + wg.Add(1) + go performOneRPC(t, tc, &wg) + } + wg.Wait() +} + +func TestRPCTimeout(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testRPCTimeout(t, e) + } +} + +// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. +func testRPCTimeout(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, unaryCallSleepTime: 50 * time.Millisecond}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + for i := -1; i <= 10; i++ { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) + if _, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want , error code: %s", err, codes.DeadlineExceeded) + } + cancel() + } +} + +func TestCancel(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testCancel(t, e) + } +} + +func testCancel(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("grpc: the client connection is closing; please retry") + te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + const argSize = 2718 + const respSize = 314 + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + ctx, cancel := context.WithCancel(context.Background()) + time.AfterFunc(1*time.Millisecond, cancel) + if r, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.Canceled { + t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled) + } + awaitNewConnLogOutput() +} + +func TestCancelNoIO(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testCancelNoIO(t, e) + } +} + +func testCancelNoIO(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken") + te.maxStream = 1 // Only allows 1 live stream per server transport. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + // Start one blocked RPC for which we'll never send streaming + // input. This will consume the 1 maximum concurrent streams, + // causing future RPCs to hang. + ctx, cancelFirst := context.WithCancel(context.Background()) + _, err := tc.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + + // Loop until the ClientConn receives the initial settings + // frame from the server, notifying it about the maximum + // concurrent streams. We know when it's received it because + // an RPC will fail with codes.DeadlineExceeded instead of + // succeeding. + // TODO(bradfitz): add internal test hook for this (Issue 534) + for { + ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond) + _, err := tc.StreamingInputCall(ctx) + cancelSecond() + if err == nil { + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) + } + // If there are any RPCs in flight before the client receives + // the max streams setting, let them be expired. + // TODO(bradfitz): add internal test hook for this (Issue 534) + time.Sleep(50 * time.Millisecond) + + go func() { + time.Sleep(50 * time.Millisecond) + cancelFirst() + }() + + // This should be blocked until the 1st is canceled, then succeed. + ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond) + if _, err := tc.StreamingInputCall(ctx); err != nil { + t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + cancelThird() +} + +// The following tests the gRPC streaming RPC implementations. +// TODO(zhaoq): Have better coverage on error cases. +var ( + reqSizes = []int{27182, 8, 1828, 45904} + respSizes = []int{31415, 9, 2653, 58979} +) + +func TestNoService(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testNoService(t, e) + } +} + +func testNoService(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(nil) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + stream, err := tc.FullDuplexCall(te.ctx, grpc.FailFast(false)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if _, err := stream.Recv(); grpc.Code(err) != codes.Unimplemented { + t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented) + } +} + +func TestPingPong(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPingPong(t, e) + } +} + +func testPingPong(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + var index int + for index < len(reqSizes) { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + reply, err := stream.Recv() + if err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) + } +} + +func TestMetadataStreamingRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMetadataStreamingRPC(t, e) + } +} + +func testMetadataStreamingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + go func() { + headerMD, err := stream.Header() + if e.security == "tls" { + delete(headerMD, "transport_security_type") + } + delete(headerMD, "trailer") // ignore if present + delete(headerMD, "user-agent") + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + // test the cached value. + headerMD, err = stream.Header() + delete(headerMD, "trailer") // ignore if present + delete(headerMD, "user-agent") + if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { + t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) + } + err = func() error { + for index := 0; index < len(reqSizes); index++ { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + return err + } + + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + return fmt.Errorf("%v.Send(%v) = %v, want ", stream, req, err) + } + } + return nil + }() + // Tell the server we're done sending args. + stream.CloseSend() + if err != nil { + t.Error(err) + } + }() + for { + if _, err := stream.Recv(); err != nil { + break + } + } + trailerMD := stream.Trailer() + if !reflect.DeepEqual(testTrailerMetadata, trailerMD) { + t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata) + } +} + +func TestServerStreaming(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServerStreaming(t, e) + } +} + +func testServerStreaming(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + var rpcStatus error + var respCnt int + var index int + for { + reply, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + pt := reply.GetPayload().GetType() + if pt != testpb.PayloadType_COMPRESSABLE { + t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) + } + size := len(reply.GetPayload().GetBody()) + if size != int(respSizes[index]) { + t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) + } + index++ + respCnt++ + } + if rpcStatus != io.EOF { + t.Fatalf("Failed to finish the server streaming rpc: %v, want ", rpcStatus) + } + if respCnt != len(respSizes) { + t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) + } +} + +func TestFailedServerStreaming(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testFailedServerStreaming(t, e) + } +} + +func testFailedServerStreaming(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = failAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + respParam := make([]*testpb.ResponseParameters, len(respSizes)) + for i, s := range respSizes { + respParam[i] = &testpb.ResponseParameters{ + Size: proto.Int32(int32(s)), + } + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + } + ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) + stream, err := tc.StreamingOutputCall(ctx, req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + } + wantErr := grpc.Errorf(codes.DataLoss, "error for testing: "+failAppUA) + if _, err := stream.Recv(); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr) + } +} + +// concurrentSendServer is a TestServiceServer whose +// StreamingOutputCall makes ten serial Send calls, sending payloads +// "0".."9", inclusive. TestServerStreamingConcurrent verifies they +// were received in the correct order, and that there were no races. +// +// All other TestServiceServer methods crash if called. +type concurrentSendServer struct { + testpb.TestServiceServer +} + +func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { + for i := 0; i < 10; i++ { + stream.Send(&testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + } + return nil +} + +// Tests doing a bunch of concurrent streaming output calls. +func TestServerStreamingConcurrent(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServerStreamingConcurrent(t, e) + } +} + +func testServerStreamingConcurrent(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(concurrentSendServer{}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + doStreamingCall := func() { + req := &testpb.StreamingOutputCallRequest{} + stream, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Errorf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) + return + } + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } + } + + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(1) + go func() { + defer wg.Done() + doStreamingCall() + }() + } + wg.Wait() + +} + +func generatePayloadSizes() [][]int { + reqSizes := [][]int{ + {27182, 8, 1828, 45904}, + } + + num8KPayloads := 1024 + eightKPayloads := []int{} + for i := 0; i < num8KPayloads; i++ { + eightKPayloads = append(eightKPayloads, (1 << 13)) + } + reqSizes = append(reqSizes, eightKPayloads) + + num2MPayloads := 8 + twoMPayloads := []int{} + for i := 0; i < num2MPayloads; i++ { + twoMPayloads = append(twoMPayloads, (1 << 21)) + } + reqSizes = append(reqSizes, twoMPayloads) + + return reqSizes +} + +func TestClientStreaming(t *testing.T) { + defer leakcheck.Check(t) + for _, s := range generatePayloadSizes() { + for _, e := range listTestEnv() { + testClientStreaming(t, e, s) + } + } +} + +func testClientStreaming(t *testing.T, e env, sizes []int) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + ctx, cancel := context.WithTimeout(te.ctx, time.Second*30) + defer cancel() + stream, err := tc.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) + } + + var sum int + for _, s := range sizes { + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingInputCallRequest{ + Payload: payload, + } + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + sum += s + } + reply, err := stream.CloseAndRecv() + if err != nil { + t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + } + if reply.GetAggregatedPayloadSize() != int32(sum) { + t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) + } +} + +func TestClientStreamingError(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testClientStreamingError(t, e) + } +} + +func testClientStreamingError(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security, earlyFail: true}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + stream, err := tc.StreamingInputCall(te.ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1) + if err != nil { + t.Fatal(err) + } + + req := &testpb.StreamingInputCallRequest{ + Payload: payload, + } + // The 1st request should go through. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + for { + if err := stream.Send(req); err != io.EOF { + continue + } + if _, err := stream.CloseAndRecv(); grpc.Code(err) != codes.NotFound { + t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound) + } + break + } +} + +func TestExceedMaxStreamsLimit(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testExceedMaxStreamsLimit(t, e) + } +} + +func testExceedMaxStreamsLimit(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the connection is closing", + ) + te.maxStream = 1 // Only allows 1 live stream per server transport. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + _, err := tc.StreamingInputCall(te.ctx) + if err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + // Loop until receiving the new max stream setting from the server. + for { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil { + time.Sleep(50 * time.Millisecond) + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) + } +} + +const defaultMaxStreamsClient = 100 + +func TestExceedDefaultMaxStreamsLimit(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // The default max stream limit in handler_server is not 100? + continue + } + testExceedDefaultMaxStreamsLimit(t, e) + } +} + +func testExceedDefaultMaxStreamsLimit(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the connection is closing", + ) + // When masStream is set to 0 the server doesn't send a settings frame for + // MaxConcurrentStreams, essentially allowing infinite (math.MaxInt32) streams. + // In such a case, there should be a default cap on the client-side. + te.maxStream = 0 + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + // Create as many streams as a client can. + for i := 0; i < defaultMaxStreamsClient; i++ { + if _, err := tc.StreamingInputCall(te.ctx); err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + } + + // Trying to create one more should timeout. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) + } +} + +func TestStreamsQuotaRecovery(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testStreamsQuotaRecovery(t, e) + } +} + +func testStreamsQuotaRecovery(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "http2Client.notifyError got notified that the client transport was broken", + "Conn.resetTransport failed to create client transport", + "grpc: the connection is closing", + ) + te.maxStream = 1 // Allows 1 live stream. + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.StreamingInputCall(context.Background()); err != nil { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + // Loop until the new max stream setting is effective. + for { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + _, err := tc.StreamingInputCall(ctx) + if err == nil { + time.Sleep(50 * time.Millisecond) + continue + } + if grpc.Code(err) == codes.DeadlineExceeded { + break + } + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) + } + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314) + if err != nil { + t.Error(err) + return + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(1592), + Payload: payload, + } + // No rpc should go through due to the max streams limit. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + if _, err := tc.UnaryCall(ctx, req, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Errorf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + }() + } + wg.Wait() +} + +func TestCompressServerHasNoSupport(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testCompressServerHasNoSupport(t, e) + } +} + +func testCompressServerHasNoSupport(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = false + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.Unimplemented { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) + } + // Streaming RPC + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.Unimplemented { + t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) + } +} + +func TestCompressOK(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testCompressOK(t, e) + } +} + +func testCompressOK(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = true + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(respSize), + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(31415), + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } +} + +func TestUnaryClientInterceptor(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testUnaryClientInterceptor(t, e) + } +} + +func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + err := invoker(ctx, method, req, reply, cc, opts...) + if err == nil { + return grpc.Errorf(codes.NotFound, "") + } + return err +} + +func testUnaryClientInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.unaryClientInt = failOkayRPC + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.NotFound { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound) + } +} + +func TestStreamClientInterceptor(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testStreamClientInterceptor(t, e) + } +} + +func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + s, err := streamer(ctx, desc, cc, method, opts...) + if err == nil { + return nil, grpc.Errorf(codes.NotFound, "") + } + return s, nil +} + +func testStreamClientInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.streamClientInt = failOkayStream + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(1)), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if _, err := tc.StreamingOutputCall(context.Background(), req); grpc.Code(err) != codes.NotFound { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound) + } +} + +func TestUnaryServerInterceptor(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testUnaryServerInterceptor(t, e) + } +} + +func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return nil, grpc.Errorf(codes.PermissionDenied, "") +} + +func testUnaryServerInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.unaryServerInt = errInjector + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.PermissionDenied { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied) + } +} + +func TestStreamServerInterceptor(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testStreamServerInterceptor(t, e) + } +} + +func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" { + return handler(srv, ss) + } + // Reject the other methods. + return grpc.Errorf(codes.PermissionDenied, "") +} + +func testStreamServerInterceptor(t *testing.T, e env) { + te := newTest(t, e) + te.streamServerInt = fullDuplexOnly + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + tc := testpb.NewTestServiceClient(te.clientConn()) + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(1)), + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + s1, err := tc.StreamingOutputCall(context.Background(), req) + if err != nil { + t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, ", tc, err) + } + if _, err := s1.Recv(); grpc.Code(err) != codes.PermissionDenied { + t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied) + } + s2, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := s2.Send(req); err != nil { + t.Fatalf("%v.Send(_) = %v, want ", s2, err) + } + if _, err := s2.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", s2, err) + } +} + +// funcServer implements methods of TestServiceServer using funcs, +// similar to an http.HandlerFunc. +// Any unimplemented method will crash. Tests implement the method(s) +// they need. +type funcServer struct { + testpb.TestServiceServer + unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) + streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error +} + +func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return s.unaryCall(ctx, in) +} + +func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { + return s.streamingInputCall(stream) +} + +func TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testClientRequestBodyErrorUnexpectedEOF(t, e) + } +} + +func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) { + te := newTest(t, e) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + errUnexpectedCall := errors.New("unexpected call func server method") + t.Error(errUnexpectedCall) + return nil, errUnexpectedCall + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // Say we have 5 bytes coming, but set END_STREAM flag: + st.writeData(1, true, []byte{0, 0, 0, 0, 5}) + st.wantAnyFrame() // wait for server to crash (it used to crash) + }) +} + +func TestClientRequestBodyErrorCloseAfterLength(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testClientRequestBodyErrorCloseAfterLength(t, e) + } +} + +func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise("Server.processUnaryRPC failed to write status") + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + errUnexpectedCall := errors.New("unexpected call func server method") + t.Error(errUnexpectedCall) + return nil, errUnexpectedCall + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // say we're sending 5 bytes, but then close the connection instead. + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + st.cc.Close() + }) +} + +func TestClientRequestBodyErrorCancel(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testClientRequestBodyErrorCancel(t, e) + } +} + +func testClientRequestBodyErrorCancel(t *testing.T, e env) { + te := newTest(t, e) + gotCall := make(chan bool, 1) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + gotCall <- true + return new(testpb.SimpleResponse), nil + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") + // Say we have 5 bytes coming, but cancel it instead. + st.writeRSTStream(1, http2.ErrCodeCancel) + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + + // Verify we didn't a call yet. + select { + case <-gotCall: + t.Fatal("unexpected call") + default: + } + + // And now send an uncanceled (but still invalid), just to get a response. + st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall") + st.writeData(3, true, []byte{0, 0, 0, 0, 0}) + <-gotCall + st.wantAnyFrame() + }) +} + +func TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testClientRequestBodyErrorCancelStreamingInput(t, e) + } +} + +func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) { + te := newTest(t, e) + recvErr := make(chan error, 1) + ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + _, err := stream.Recv() + recvErr <- err + return nil + }} + te.startServer(ts) + defer te.tearDown() + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall") + // Say we have 5 bytes coming, but cancel it instead. + st.writeData(1, false, []byte{0, 0, 0, 0, 5}) + st.writeRSTStream(1, http2.ErrCodeCancel) + + var got error + select { + case got = <-recvErr: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for error") + } + if grpc.Code(got) != codes.Canceled { + t.Errorf("error = %#v; want error code %s", got, codes.Canceled) + } + }) +} + +const clientAlwaysFailCredErrorMsg = "clientAlwaysFailCred always fails" + +var errClientAlwaysFailCred = errors.New(clientAlwaysFailCredErrorMsg) + +type clientAlwaysFailCred struct{} + +func (c clientAlwaysFailCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, nil, errClientAlwaysFailCred +} +func (c clientAlwaysFailCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c clientAlwaysFailCred) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c clientAlwaysFailCred) Clone() credentials.TransportCredentials { + return nil +} +func (c clientAlwaysFailCred) OverrideServerName(s string) error { + return nil +} + +func TestDialWithBlockErrorOnBadCertificates(t *testing.T) { + te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "v1"}) + te.startServer(&testServer{security: te.e.security}) + defer te.tearDown() + + var ( + err error + opts []grpc.DialOption + ) + opts = append(opts, grpc.WithTransportCredentials(clientAlwaysFailCred{}), grpc.WithBlock()) + te.cc, err = grpc.Dial(te.srvAddr, opts...) + if err != errClientAlwaysFailCred { + te.t.Fatalf("Dial(%q) = %v, want %v", te.srvAddr, err, errClientAlwaysFailCred) + } +} + +type clientTimeoutCreds struct { + timeoutReturned bool +} + +func (c *clientTimeoutCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.timeoutReturned { + c.timeoutReturned = true + return nil, nil, context.DeadlineExceeded + } + return rawConn, nil, nil +} +func (c *clientTimeoutCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c *clientTimeoutCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials { + return nil +} +func (c *clientTimeoutCreds) OverrideServerName(s string) error { + return nil +} + +func TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { + te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "clientTimeoutCreds", balancer: "v1"}) + te.userAgent = testAppUA + te.startServer(&testServer{security: te.e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // This unary call should succeed, because ClientHandshake will succeed for the second time. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { + te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want ", err) + } +} + +type serverDispatchCred struct { + rawConnCh chan net.Conn +} + +func newServerDispatchCred() *serverDispatchCred { + return &serverDispatchCred{ + rawConnCh: make(chan net.Conn, 1), + } +} +func (c *serverDispatchCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c *serverDispatchCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + select { + case c.rawConnCh <- rawConn: + default: + } + return nil, nil, credentials.ErrConnDispatched +} +func (c *serverDispatchCred) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c *serverDispatchCred) Clone() credentials.TransportCredentials { + return nil +} +func (c *serverDispatchCred) OverrideServerName(s string) error { + return nil +} +func (c *serverDispatchCred) getRawConn() net.Conn { + return <-c.rawConnCh +} + +func TestServerCredsDispatch(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + cred := newServerDispatchCred() + s := grpc.NewServer(grpc.Creds(cred)) + go s.Serve(lis) + defer s.Stop() + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(cred)) + if err != nil { + t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) + } + defer cc.Close() + + rawConn := cred.getRawConn() + // Give grpc a chance to see the error and potentially close the connection. + // And check that connection is not closed after that. + time.Sleep(100 * time.Millisecond) + // Check rawConn is not closed. + if n, err := rawConn.Write([]byte{0}); n <= 0 || err != nil { + t.Errorf("Read() = %v, %v; want n>0, ", n, err) + } +} + +func TestFlowControlLogicalRace(t *testing.T) { + // Test for a regression of https://github.com/grpc/grpc-go/issues/632, + // and other flow control bugs. + + defer leakcheck.Check(t) + + const ( + itemCount = 100 + itemSize = 1 << 10 + recvCount = 2 + maxFailures = 3 + + requestTimeout = time.Second * 5 + ) + + requestCount := 10000 + if raceMode { + requestCount = 1000 + } + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + defer lis.Close() + + s := grpc.NewServer() + testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ + itemCount: itemCount, + itemSize: itemSize, + }) + defer s.Stop() + + go s.Serve(lis) + + ctx := context.Background() + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) + } + defer cc.Close() + cl := testpb.NewTestServiceClient(cc) + + failures := 0 + for i := 0; i < requestCount; i++ { + ctx, cancel := context.WithTimeout(ctx, requestTimeout) + output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) + if err != nil { + t.Fatalf("StreamingOutputCall; err = %q", err) + } + + j := 0 + loop: + for ; j < recvCount; j++ { + _, err := output.Recv() + if err != nil { + if err == io.EOF { + break loop + } + switch grpc.Code(err) { + case codes.DeadlineExceeded: + break loop + default: + t.Fatalf("Recv; err = %q", err) + } + } + } + cancel() + <-ctx.Done() + + if j < recvCount { + t.Errorf("got %d responses to request %d", j, i) + failures++ + if failures >= maxFailures { + // Continue past the first failure to see if the connection is + // entirely broken, or if only a single RPC was affected + break + } + } + } +} + +type flowControlLogicalRaceServer struct { + testpb.TestServiceServer + + itemSize int + itemCount int +} + +func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { + for i := 0; i < s.itemCount; i++ { + err := srv.Send(&testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + // Sending a large stream of data which the client reject + // helps to trigger some types of flow control bugs. + // + // Reallocating memory here is inefficient, but the stress it + // puts on the GC leads to more frequent flow control + // failures. The GC likely causes more variety in the + // goroutine scheduling orders. + Body: bytes.Repeat([]byte("a"), s.itemSize), + }, + }) + if err != nil { + return err + } + } + return nil +} + +type lockingWriter struct { + mu sync.Mutex + w io.Writer +} + +func (lw *lockingWriter) Write(p []byte) (n int, err error) { + lw.mu.Lock() + defer lw.mu.Unlock() + return lw.w.Write(p) +} + +func (lw *lockingWriter) setWriter(w io.Writer) { + lw.mu.Lock() + defer lw.mu.Unlock() + lw.w = w +} + +var testLogOutput = &lockingWriter{w: os.Stderr} + +// awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to +// terminate, if they're still running. It spams logs with this +// message. We wait for it so our log filter is still +// active. Otherwise the "defer restore()" at the top of various test +// functions restores our log filter and then the goroutine spams. +func awaitNewConnLogOutput() { + awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry") +} + +func awaitLogOutput(maxWait time.Duration, phrase string) { + pb := []byte(phrase) + + timer := time.NewTimer(maxWait) + defer timer.Stop() + wakeup := make(chan bool, 1) + for { + if logOutputHasContents(pb, wakeup) { + return + } + select { + case <-timer.C: + // Too slow. Oh well. + return + case <-wakeup: + } + } +} + +func logOutputHasContents(v []byte, wakeup chan<- bool) bool { + testLogOutput.mu.Lock() + defer testLogOutput.mu.Unlock() + fw, ok := testLogOutput.w.(*filterWriter) + if !ok { + return false + } + fw.mu.Lock() + defer fw.mu.Unlock() + if bytes.Contains(fw.buf.Bytes(), v) { + return true + } + fw.wakeup = wakeup + return false +} + +var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering") + +func noop() {} + +// declareLogNoise declares that t is expected to emit the following noisy phrases, +// even on success. Those phrases will be filtered from grpclog output +// and only be shown if *verbose_logs or t ends up failing. +// The returned restore function should be called with defer to be run +// before the test ends. +func declareLogNoise(t *testing.T, phrases ...string) (restore func()) { + if *verboseLogs { + return noop + } + fw := &filterWriter{dst: os.Stderr, filter: phrases} + testLogOutput.setWriter(fw) + return func() { + if t.Failed() { + fw.mu.Lock() + defer fw.mu.Unlock() + if fw.buf.Len() > 0 { + t.Logf("Complete log output:\n%s", fw.buf.Bytes()) + } + } + testLogOutput.setWriter(os.Stderr) + } +} + +type filterWriter struct { + dst io.Writer + filter []string + + mu sync.Mutex + buf bytes.Buffer + wakeup chan<- bool // if non-nil, gets true on write +} + +func (fw *filterWriter) Write(p []byte) (n int, err error) { + fw.mu.Lock() + fw.buf.Write(p) + if fw.wakeup != nil { + select { + case fw.wakeup <- true: + default: + } + } + fw.mu.Unlock() + + ps := string(p) + for _, f := range fw.filter { + if strings.Contains(ps, f) { + return len(p), nil + } + } + return fw.dst.Write(p) +} + +// stubServer is a server that is easy to customize within individual test +// cases. +type stubServer struct { + // Guarantees we satisfy this interface; panics if unimplemented methods are called. + testpb.TestServiceServer + + // Customizable implementations of server handlers. + emptyCall func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) + fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error + + // A client connected to this service the test may use. Created in Start(). + client testpb.TestServiceClient + + cleanups []func() // Lambdas executed in Stop(); populated by Start(). +} + +func (ss *stubServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return ss.emptyCall(ctx, in) +} + +func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + return ss.fullDuplexCall(stream) +} + +// Start starts the server and creates a client connected to it. +func (ss *stubServer) Start(sopts []grpc.ServerOption) error { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return fmt.Errorf(`net.Listen("tcp", "localhost:0") = %v`, err) + } + ss.cleanups = append(ss.cleanups, func() { lis.Close() }) + + s := grpc.NewServer(sopts...) + testpb.RegisterTestServiceServer(s, ss) + go s.Serve(lis) + ss.cleanups = append(ss.cleanups, s.Stop) + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return fmt.Errorf("grpc.Dial(%q) = %v", lis.Addr().String(), err) + } + ss.cleanups = append(ss.cleanups, func() { cc.Close() }) + + ss.client = testpb.NewTestServiceClient(cc) + return nil +} + +func (ss *stubServer) Stop() { + for i := len(ss.cleanups) - 1; i >= 0; i-- { + ss.cleanups[i]() + } +} + +func TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { + const mdkey = "somedata" + + // endpoint ensures mdkey is NOT in metadata and returns an error if it is. + endpoint := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { + return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) + } + return &testpb.Empty{}, nil + }, + } + if err := endpoint.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer endpoint.Stop() + + // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint + // without explicitly copying the metadata. + proxy := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { + return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey) + } + return endpoint.client.EmptyCall(ctx, in) + }, + } + if err := proxy.Start(nil); err != nil { + t.Fatalf("Error starting proxy server: %v", err) + } + defer proxy.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + md := metadata.Pairs(mdkey, "val") + ctx = metadata.NewOutgoingContext(ctx, md) + + // Sanity check that endpoint properly errors when it sees mdkey. + _, err := endpoint.client.EmptyCall(ctx, &testpb.Empty{}) + if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal { + t.Fatalf("endpoint.client.EmptyCall(_, _) = _, %v; want _, ", err) + } + + if _, err := proxy.client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatal(err.Error()) + } +} + +func TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { + const mdkey = "somedata" + + // doFDC performs a FullDuplexCall with client and returns the error from the + // first stream.Recv call, or nil if that error is io.EOF. Calls t.Fatal if + // the stream cannot be established. + doFDC := func(ctx context.Context, client testpb.TestServiceClient) error { + stream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unwanted error: %v", err) + } + if _, err := stream.Recv(); err != io.EOF { + return err + } + return nil + } + + // endpoint ensures mdkey is NOT in metadata and returns an error if it is. + endpoint := &stubServer{ + fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + ctx := stream.Context() + if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { + return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) + } + return nil + }, + } + if err := endpoint.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer endpoint.Stop() + + // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint + // without explicitly copying the metadata. + proxy := &stubServer{ + fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + ctx := stream.Context() + if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { + return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) + } + return doFDC(ctx, endpoint.client) + }, + } + if err := proxy.Start(nil); err != nil { + t.Fatalf("Error starting proxy server: %v", err) + } + defer proxy.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + md := metadata.Pairs(mdkey, "val") + ctx = metadata.NewOutgoingContext(ctx, md) + + // Sanity check that endpoint properly errors when it sees mdkey in ctx. + err := doFDC(ctx, endpoint.client) + if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal { + t.Fatalf("stream.Recv() = _, %v; want _, ", err) + } + + if err := doFDC(ctx, proxy.client); err != nil { + t.Fatalf("doFDC(_, proxy.client) = %v; want nil", err) + } +} + +func TestStatsTagsAndTrace(t *testing.T) { + // Data added to context by client (typically in a stats handler). + tags := []byte{1, 5, 2, 4, 3} + trace := []byte{5, 2, 1, 3, 4} + + // endpoint ensures Tags() and Trace() in context match those that were added + // by the client and returns an error if not. + endpoint := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) { + return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags) + } + if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) { + return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags) + } + if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) { + return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace) + } + if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) { + return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace) + } + return &testpb.Empty{}, nil + }, + } + if err := endpoint.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer endpoint.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + testCases := []struct { + ctx context.Context + want codes.Code + }{ + {ctx: ctx, want: codes.Internal}, + {ctx: stats.SetTags(ctx, tags), want: codes.Internal}, + {ctx: stats.SetTrace(ctx, trace), want: codes.Internal}, + {ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal}, + {ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK}, + } + + for _, tc := range testCases { + _, err := endpoint.client.EmptyCall(tc.ctx, &testpb.Empty{}) + if tc.want == codes.OK && err != nil { + t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err) + } + if s, ok := status.FromError(err); !ok || s.Code() != tc.want { + t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, ", tc.ctx, err, tc.want) + } + } +} + +func TestTapTimeout(t *testing.T) { + sopts := []grpc.ServerOption{ + grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) { + c, cancel := context.WithCancel(ctx) + // Call cancel instead of setting a deadline so we can detect which error + // occurred -- this cancellation (desired) or the client's deadline + // expired (indicating this cancellation did not affect the RPC). + time.AfterFunc(10*time.Millisecond, cancel) + return c, nil + }), + } + + ss := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + <-ctx.Done() + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(sopts); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // This was known to be flaky; test several times. + for i := 0; i < 10; i++ { + // Set our own deadline in case the server hangs. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + res, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) + cancel() + if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled { + t.Fatalf("ss.client.EmptyCall(context.Background(), _) = %v, %v; want nil, ", res, err) + } + } +} + +type windowSizeConfig struct { + serverStream int32 + serverConn int32 + clientStream int32 + clientConn int32 +} + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func TestConfigurableWindowSizeWithLargeWindow(t *testing.T) { + defer leakcheck.Check(t) + wc := windowSizeConfig{ + serverStream: 8 * 1024 * 1024, + serverConn: 12 * 1024 * 1024, + clientStream: 6 * 1024 * 1024, + clientConn: 8 * 1024 * 1024, + } + for _, e := range listTestEnv() { + testConfigurableWindowSize(t, e, wc) + } +} + +func TestConfigurableWindowSizeWithSmallWindow(t *testing.T) { + defer leakcheck.Check(t) + wc := windowSizeConfig{ + serverStream: 1, + serverConn: 1, + clientStream: 1, + clientConn: 1, + } + for _, e := range listTestEnv() { + testConfigurableWindowSize(t, e, wc) + } +} + +func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) { + te := newTest(t, e) + te.serverInitialWindowSize = wc.serverStream + te.serverInitialConnWindowSize = wc.serverConn + te.clientInitialWindowSize = wc.clientStream + te.clientInitialConnWindowSize = wc.clientConn + + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + numOfIter := 11 + // Set message size to exhaust largest of window sizes. + messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1) + messageSize = max(messageSize, 64*1024) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize) + if err != nil { + t.Fatal(err) + } + respParams := []*testpb.ResponseParameters{ + { + Size: proto.Int32(messageSize), + }, + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParams, + Payload: payload, + } + for i := 0; i < numOfIter; i++ { + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } +} + +var ( + // test authdata + authdata = map[string]string{ + "test-key": "test-value", + "test-key2-bin": string([]byte{1, 2, 3}), + } +) + +type testPerRPCCredentials struct{} + +func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return authdata, nil +} + +func (cr testPerRPCCredentials) RequireTransportSecurity() bool { + return false +} + +func authHandle(ctx context.Context, info *tap.Info) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx, fmt.Errorf("didn't find metadata in context") + } + for k, vwant := range authdata { + vgot, ok := md[k] + if !ok { + return ctx, fmt.Errorf("didn't find authdata key %v in context", k) + } + if vgot[0] != vwant { + return ctx, fmt.Errorf("for key %v, got value %v, want %v", k, vgot, vwant) + } + } + return ctx, nil +} + +func TestPerRPCCredentialsViaDialOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaDialOptions(t, e) + } +} + +func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { + te := newTest(t, e) + te.tapHandle = authHandle + te.perRPCCreds = testPerRPCCredentials{} + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestPerRPCCredentialsViaCallOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaCallOptions(t, e) + } +} + +func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { + te := newTest(t, e) + te.tapHandle = authHandle + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaDialOptionsAndCallOptions(t, e) + } +} + +func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { + te := newTest(t, e) + te.perRPCCreds = testPerRPCCredentials{} + // When credentials are provided via both dial options and call options, + // we apply both sets. + te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx, fmt.Errorf("couldn't find metadata in context") + } + for k, vwant := range authdata { + vgot, ok := md[k] + if !ok { + return ctx, fmt.Errorf("couldn't find metadata for key %v", k) + } + if len(vgot) != 2 { + return ctx, fmt.Errorf("len of value for key %v was %v, want 2", k, len(vgot)) + } + if vgot[0] != vwant || vgot[1] != vwant { + return ctx, fmt.Errorf("value for %v was %v, want [%v, %v]", k, vgot, vwant, vwant) + } + } + return ctx, nil + } + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestWaitForReadyConnection(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testWaitForReadyConnection(t, e) + } + +} + +func testWaitForReadyConnection(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() // Non-blocking dial. + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + state := cc.GetState() + // Wait for connection to be Ready. + for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if state != connectivity.Ready { + t.Fatalf("Want connection state to be Ready, got %v", state) + } + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Make a fail-fast RPC. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err) + } +} + +type errCodec struct { + noError bool +} + +func (c *errCodec) Marshal(v interface{}) ([]byte, error) { + if c.noError { + return []byte{}, nil + } + return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12") +} + +func (c *errCodec) Unmarshal(data []byte, v interface{}) error { + return nil +} + +func (c *errCodec) String() string { + return "Fermat's near-miss." +} + +func TestEncodeDoesntPanic(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testEncodeDoesntPanic(t, e) + } +} + +func testEncodeDoesntPanic(t *testing.T, e env) { + te := newTest(t, e) + erc := &errCodec{} + te.customCodec = erc + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + te.customCodec = nil + tc := testpb.NewTestServiceClient(te.clientConn()) + // Failure case, should not panic. + tc.EmptyCall(context.Background(), &testpb.Empty{}) + erc.noError = true + // Passing case. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall(_, _) = _, %v, want _, ", err) + } +} + +func TestSvrWriteStatusEarlyWrite(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testSvrWriteStatusEarlyWrite(t, e) + } +} + +func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { + te := newTest(t, e) + const smallSize = 1024 + const largeSize = 2048 + const extraLargeSize = 4096 + te.maxServerReceiveMsgSize = newInt(largeSize) + te.maxServerSendMsgSize = newInt(largeSize) + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(smallSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: extraLargePayload, + } + // Test recv case: server receives a message larger than maxServerReceiveMsgSize. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err = stream.Send(sreq); err != nil { + t.Fatalf("%v.Send() = _, %v, want ", stream, err) + } + if _, err = stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + // Test send case: server sends a message larger than maxServerSendMsgSize. + sreq.Payload = smallPayload + respParam[0].Size = proto.Int32(int32(extraLargeSize)) + + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err = stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err = stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go new file mode 100644 index 000000000..bb6efbe7d --- /dev/null +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + grpc_testing/test.proto + +It has these top-level messages: + Empty + Payload + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) Enum() *PayloadType { + p := new(PayloadType) + *p = x + return p +} +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (x *PayloadType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") + if err != nil { + return err + } + *x = PayloadType(value) + return nil +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Payload) GetType() PayloadType { + if m != nil && m.Type != nil { + return *m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil && m.FillUsername != nil { + return *m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil && m.FillOauthScope != nil { + return *m.FillOauthScope + } + return false +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleResponse) GetUsername() string { + if m != nil && m.Username != nil { + return *m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil && m.OauthScope != nil { + return *m.OauthScope + } + return "" +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil && m.AggregatedPayloadSize != nil { + return *m.AggregatedPayloadSize + } + return 0 +} + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *ResponseParameters) GetSize() int32 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil && m.IntervalUs != nil { + return *m.IntervalUs + } + return 0 +} + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TestService service + +type TestServiceClient interface { + // One empty request followed by one empty response. + EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) +} + +type testServiceClient struct { + cc *grpc.ClientConn +} + +func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingOutputCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_StreamingOutputCallClient interface { + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingOutputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceStreamingInputCallClient{stream} + return x, nil +} + +type TestService_StreamingInputCallClient interface { + Send(*StreamingInputCallRequest) error + CloseAndRecv() (*StreamingInputCallResponse, error) + grpc.ClientStream +} + +type testServiceStreamingInputCallClient struct { + grpc.ClientStream +} + +func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamingInputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceFullDuplexCallClient{stream} + return x, nil +} + +type TestService_FullDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceFullDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceHalfDuplexCallClient{stream} + return x, nil +} + +type TestService_HalfDuplexCallClient interface { + Send(*StreamingOutputCallRequest) error + Recv() (*StreamingOutputCallResponse, error) + grpc.ClientStream +} + +type testServiceHalfDuplexCallClient struct { + grpc.ClientStream +} + +func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { + m := new(StreamingOutputCallResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for TestService service + +type TestServiceServer interface { + // One empty request followed by one empty response. + EmptyCall(context.Context, *Empty) (*Empty, error) + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + StreamingInputCall(TestService_StreamingInputCallServer) error + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + FullDuplexCall(TestService_FullDuplexCallServer) error + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + HalfDuplexCall(TestService_HalfDuplexCallServer) error +} + +func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { + s.RegisterService(&_TestService_serviceDesc, srv) +} + +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).EmptyCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/EmptyCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StreamingOutputCallRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) +} + +type TestService_StreamingOutputCallServer interface { + Send(*StreamingOutputCallResponse) error + grpc.ServerStream +} + +type testServiceStreamingOutputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) +} + +type TestService_StreamingInputCallServer interface { + SendAndClose(*StreamingInputCallResponse) error + Recv() (*StreamingInputCallRequest, error) + grpc.ServerStream +} + +type testServiceStreamingInputCallServer struct { + grpc.ServerStream +} + +func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { + m := new(StreamingInputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) +} + +type TestService_FullDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceFullDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) +} + +type TestService_HalfDuplexCallServer interface { + Send(*StreamingOutputCallResponse) error + Recv() (*StreamingOutputCallRequest, error) + grpc.ServerStream +} + +type testServiceHalfDuplexCallServer struct { + grpc.ServerStream +} + +func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { + m := new(StreamingOutputCallRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TestService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EmptyCall", + Handler: _TestService_EmptyCall_Handler, + }, + { + MethodName: "UnaryCall", + Handler: _TestService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingOutputCall", + Handler: _TestService_StreamingOutputCall_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamingInputCall", + Handler: _TestService_StreamingInputCall_Handler, + ClientStreams: true, + }, + { + StreamName: "FullDuplexCall", + Handler: _TestService_FullDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "HalfDuplexCall", + Handler: _TestService_HalfDuplexCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_testing/test.proto", +} + +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 582 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, + 0x10, 0xfd, 0xb6, 0x49, 0xbe, 0x34, 0x93, 0xd4, 0x8a, 0x36, 0xaa, 0xea, 0xba, 0x48, 0x58, 0xe6, + 0x02, 0x83, 0x44, 0x8a, 0x22, 0xc1, 0x25, 0xa8, 0xb4, 0xa9, 0xa8, 0x94, 0x26, 0xc1, 0x4e, 0xae, + 0xa3, 0x25, 0xd9, 0x1a, 0x4b, 0x8e, 0xbd, 0xac, 0xd7, 0x15, 0xe9, 0x05, 0x2f, 0xc6, 0xcb, 0xf0, + 0x10, 0x3c, 0x00, 0x5a, 0xff, 0x24, 0x4e, 0xe2, 0x8a, 0x14, 0x04, 0x57, 0xb6, 0x67, 0xce, 0x9c, + 0x39, 0xc7, 0x33, 0xbb, 0x70, 0xe4, 0x70, 0x36, 0x9d, 0x08, 0x1a, 0x0a, 0xd7, 0x77, 0x4e, 0xe5, + 0xb3, 0xcd, 0x78, 0x20, 0x02, 0xdc, 0x90, 0x89, 0x76, 0x9a, 0x30, 0xaa, 0x50, 0xe9, 0xce, 0x99, + 0x58, 0x18, 0x3d, 0xa8, 0x0e, 0xc9, 0xc2, 0x0b, 0xc8, 0x0c, 0xbf, 0x80, 0xb2, 0x58, 0x30, 0xaa, + 0x22, 0x1d, 0x99, 0x4a, 0xe7, 0xb8, 0x9d, 0x2f, 0x68, 0xa7, 0xa0, 0xd1, 0x82, 0x51, 0x2b, 0x86, + 0x61, 0x0c, 0xe5, 0x8f, 0xc1, 0x6c, 0xa1, 0xee, 0xe9, 0xc8, 0x6c, 0x58, 0xf1, 0xbb, 0xf1, 0x03, + 0xc1, 0x81, 0xed, 0xce, 0x99, 0x47, 0x2d, 0xfa, 0x39, 0xa2, 0xa1, 0xc0, 0x6f, 0xe0, 0x80, 0xd3, + 0x90, 0x05, 0x7e, 0x48, 0x27, 0xbb, 0xb1, 0x37, 0x32, 0xbc, 0xfc, 0xc2, 0x4f, 0x72, 0xf5, 0xa1, + 0x7b, 0x47, 0xe3, 0x76, 0x95, 0x15, 0xc8, 0x76, 0xef, 0x28, 0x3e, 0x85, 0x2a, 0x4b, 0x18, 0xd4, + 0x92, 0x8e, 0xcc, 0x7a, 0xe7, 0xb0, 0x90, 0xde, 0xca, 0x50, 0x92, 0xf5, 0xc6, 0xf5, 0xbc, 0x49, + 0x14, 0x52, 0xee, 0x93, 0x39, 0x55, 0xcb, 0x3a, 0x32, 0xf7, 0xad, 0x86, 0x0c, 0x8e, 0xd3, 0x18, + 0x36, 0xa1, 0x19, 0x83, 0x02, 0x12, 0x89, 0x4f, 0x93, 0x70, 0x1a, 0x30, 0xaa, 0x56, 0x62, 0x9c, + 0x22, 0xe3, 0x03, 0x19, 0xb6, 0x65, 0xd4, 0xf8, 0x0a, 0x4a, 0xe6, 0x3a, 0x51, 0x95, 0x57, 0x84, + 0x76, 0x52, 0xa4, 0xc1, 0xfe, 0x52, 0x8c, 0xb4, 0x58, 0xb3, 0x96, 0xdf, 0xf8, 0x31, 0xd4, 0xf3, + 0x1a, 0x4a, 0x71, 0x1a, 0x82, 0x55, 0xff, 0x1e, 0x1c, 0xdb, 0x82, 0x53, 0x32, 0x77, 0x7d, 0xe7, + 0xca, 0x67, 0x91, 0x38, 0x27, 0x9e, 0x97, 0x4d, 0xe0, 0xa1, 0x52, 0x8c, 0x11, 0x68, 0x45, 0x6c, + 0xa9, 0xb3, 0xd7, 0x70, 0x44, 0x1c, 0x87, 0x53, 0x87, 0x08, 0x3a, 0x9b, 0xa4, 0x35, 0xc9, 0x68, + 0x50, 0x3c, 0x9a, 0xc3, 0x55, 0x3a, 0xa5, 0x96, 0x33, 0x32, 0xae, 0x00, 0x67, 0x1c, 0x43, 0xc2, + 0xc9, 0x9c, 0x0a, 0xca, 0x43, 0xb9, 0x44, 0xb9, 0xd2, 0xf8, 0x5d, 0xda, 0x75, 0x7d, 0x41, 0xf9, + 0x2d, 0x91, 0x03, 0x4a, 0x07, 0x0e, 0x59, 0x68, 0x1c, 0x1a, 0xdf, 0x51, 0x4e, 0xe1, 0x20, 0x12, + 0x1b, 0x86, 0xff, 0x74, 0xe5, 0x3e, 0x40, 0x6b, 0x59, 0xcf, 0x96, 0x52, 0xd5, 0x3d, 0xbd, 0x64, + 0xd6, 0x3b, 0xfa, 0x3a, 0xcb, 0xb6, 0x25, 0x0b, 0xf3, 0x6d, 0x9b, 0x0f, 0x5d, 0x50, 0xa3, 0x0f, + 0x27, 0x85, 0x0e, 0x7f, 0x73, 0xbd, 0x9e, 0xbf, 0x85, 0x7a, 0xce, 0x30, 0x6e, 0x42, 0xe3, 0x7c, + 0x70, 0x3d, 0xb4, 0xba, 0xb6, 0x7d, 0xf6, 0xae, 0xd7, 0x6d, 0xfe, 0x87, 0x31, 0x28, 0xe3, 0xfe, + 0x5a, 0x0c, 0x61, 0x80, 0xff, 0xad, 0xb3, 0xfe, 0xc5, 0xe0, 0xba, 0xb9, 0xd7, 0xf9, 0x56, 0x86, + 0xfa, 0x88, 0x86, 0xc2, 0xa6, 0xfc, 0xd6, 0x9d, 0x52, 0xfc, 0x0a, 0x6a, 0xf1, 0x05, 0x22, 0x65, + 0xe1, 0xd6, 0x7a, 0xf7, 0x38, 0xa1, 0x15, 0x05, 0xf1, 0x25, 0xd4, 0xc6, 0x3e, 0xe1, 0x49, 0xd9, + 0xc9, 0x3a, 0x62, 0xed, 0xe2, 0xd0, 0x1e, 0x15, 0x27, 0xd3, 0x1f, 0xe0, 0x41, 0xab, 0xe0, 0xff, + 0x60, 0x73, 0xa3, 0xe8, 0xde, 0x25, 0xd1, 0x9e, 0xed, 0x80, 0x4c, 0x7a, 0xbd, 0x44, 0xd8, 0x05, + 0xbc, 0x7d, 0x22, 0xf0, 0xd3, 0x7b, 0x28, 0x36, 0x4f, 0xa0, 0x66, 0xfe, 0x1a, 0x98, 0xb4, 0x32, + 0x65, 0x2b, 0xe5, 0x32, 0xf2, 0xbc, 0x8b, 0x88, 0x79, 0xf4, 0xcb, 0x5f, 0xf3, 0x64, 0xa2, 0xd8, + 0x95, 0xf2, 0x9e, 0x78, 0x37, 0xff, 0xa0, 0xd5, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa6, + 0x30, 0x01, 0x96, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto new file mode 100644 index 000000000..fbd22dec9 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto @@ -0,0 +1,154 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto2"; + +package grpc.testing; + +message Empty {} + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + optional PayloadType type = 1; + // Primary contents of payload. + optional bytes body = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + optional PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 response_size = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; + + // Whether SimpleResponse should include username. + optional bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + optional bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + optional Payload payload = 1; + + // The user the request came from, for verifying authentication was + // successful when the client expected it. + optional string username = 2; + + // OAuth scope. + optional string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + optional Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + optional int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + optional int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + optional int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + optional PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + optional Payload payload = 3; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + optional Payload payload = 1; +} + +// A simple service to test the various types of RPCs and experiment with +// performance with various types of payload. +service TestService { + // One empty request followed by one empty response. + rpc EmptyCall(Empty) returns (Empty); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by a sequence of responses (streamed download). + // The server returns the payload with client desired type and sizes. + rpc StreamingOutputCall(StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by one response (streamed upload). + // The server returns the aggregated size of client payload as the result. + rpc StreamingInputCall(stream StreamingInputCallRequest) + returns (StreamingInputCallResponse); + + // A sequence of requests with each request served by the server immediately. + // As one request could lead to multiple responses, this interface + // demonstrates the idea of full duplexing. + rpc FullDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); + + // A sequence of requests followed by a sequence of responses. + // The server buffers all the client requests and then serves them in order. A + // stream of responses are returned to the client when the server starts with + // first request. + rpc HalfDuplexCall(stream StreamingOutputCallRequest) + returns (stream StreamingOutputCallResponse); +} diff --git a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go new file mode 100644 index 000000000..76f9fc541 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leakcheck contains functions to check leaked goroutines. +// +// Call "defer leakcheck.Check(t)" at the beginning of tests. +package leakcheck + +import ( + "runtime" + "sort" + "strings" + "time" +) + +var goroutinesToIgnore = []string{ + "testing.Main(", + "testing.tRunner(", + "testing.(*M).", + "runtime.goexit", + "created by runtime.gc", + "created by runtime/trace.Start", + "interestingGoroutines", + "runtime.MHeap_Scavenger", + "signal.signal_recv", + "sigterm.handler", + "runtime_mcall", + "(*loggingT).flushDaemon", + "goroutine in C code", +} + +// RegisterIgnoreGoroutine appends s into the ignore goroutine list. The +// goroutines whose stack trace contains s will not be identified as leaked +// goroutines. Not thread-safe, only call this function in init(). +func RegisterIgnoreGoroutine(s string) { + goroutinesToIgnore = append(goroutinesToIgnore, s) +} + +func ignore(g string) bool { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + return true + } + stack := strings.TrimSpace(sl[1]) + if strings.HasPrefix(stack, "testing.RunTests") { + return true + } + + if stack == "" { + return true + } + + for _, s := range goroutinesToIgnore { + if strings.Contains(stack, s) { + return true + } + } + + return false +} + +// interestingGoroutines returns all goroutines we care about for the purpose of +// leak checking. It excludes testing or runtime ones. +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + if !ignore(g) { + gs = append(gs, g) + } + } + sort.Strings(gs) + return +} + +// Errorfer is the interface that wraps the Errorf method. It's a subset of +// testing.TB to make it easy to use Check. +type Errorfer interface { + Errorf(format string, args ...interface{}) +} + +func check(efer Errorfer, timeout time.Duration) { + // Loop, waiting for goroutines to shut down. + // Wait up to timeout, but finish as quickly as possible. + deadline := time.Now().Add(timeout) + var leaked []string + for time.Now().Before(deadline) { + if leaked = interestingGoroutines(); len(leaked) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + for _, g := range leaked { + efer.Errorf("Leaked goroutine: %v", g) + } +} + +// Check looks at the currently-running goroutines and checks if there are any +// interestring (created by gRPC) goroutines leaked. It waits up to 10 seconds +// in the error cases. +func Check(efer Errorfer) { + check(efer, 10*time.Second) +} diff --git a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go new file mode 100644 index 000000000..50927e9db --- /dev/null +++ b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package leakcheck + +import ( + "fmt" + "strings" + "testing" + "time" +) + +type testErrorfer struct { + errorCount int + errors []string +} + +func (e *testErrorfer) Errorf(format string, args ...interface{}) { + e.errors = append(e.errors, fmt.Sprintf(format, args...)) + e.errorCount++ +} + +func TestCheck(t *testing.T) { + const leakCount = 3 + for i := 0; i < leakCount; i++ { + go func() { time.Sleep(2 * time.Second) }() + } + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} + +func ignoredTestingLeak(d time.Duration) { + time.Sleep(d) +} + +func TestCheckRegisterIgnore(t *testing.T) { + RegisterIgnoreGoroutine("ignoredTestingLeak") + const leakCount = 3 + for i := 0; i < leakCount; i++ { + go func() { time.Sleep(2 * time.Second) }() + } + go func() { ignoredTestingLeak(3 * time.Second) }() + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} diff --git a/vendor/google.golang.org/grpc/test/race.go b/vendor/google.golang.org/grpc/test/race.go new file mode 100644 index 000000000..acfa0dfae --- /dev/null +++ b/vendor/google.golang.org/grpc/test/race.go @@ -0,0 +1,24 @@ +// +build race + +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +func init() { + raceMode = true +} diff --git a/vendor/google.golang.org/grpc/test/servertester.go b/vendor/google.golang.org/grpc/test/servertester.go new file mode 100644 index 000000000..daeca0622 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/servertester.go @@ -0,0 +1,280 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// This is a subset of http2's serverTester type. +// +// serverTester wraps a io.ReadWriter (acting like the underlying +// network connection) and provides utility methods to read and write +// http2 frames. +// +// NOTE(bradfitz): this could eventually be exported somewhere. Others +// have asked for it too. For now I'm still experimenting with the +// API and don't feel like maintaining a stable testing API. + +type serverTester struct { + cc io.ReadWriteCloser // client conn + t testing.TB + fr *http2.Framer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder + + // reading frames: + frc chan http2.Frame + frErrc chan error + readTimer *time.Timer +} + +func newServerTesterFromConn(t testing.TB, cc io.ReadWriteCloser) *serverTester { + st := &serverTester{ + t: t, + cc: cc, + frc: make(chan http2.Frame, 1), + frErrc: make(chan error, 1), + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.fr = http2.NewFramer(cc, cc) + st.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) + + return st +} + +func (st *serverTester) readFrame() (http2.Frame, error) { + go func() { + fr, err := st.fr.ReadFrame() + if err != nil { + st.frErrc <- err + } else { + st.frc <- fr + } + }() + t := time.NewTimer(2 * time.Second) + defer t.Stop() + select { + case f := <-st.frc: + return f, nil + case err := <-st.frErrc: + return nil, err + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.writePreface() + st.writeInitialSettings() + st.wantSettings() + st.writeSettingsAck() + for { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *http2.WindowUpdateFrame: + // grpc's transport/http2_server sends this + // before the settings ack. The Go http2 + // server uses a setting instead. + case *http2.SettingsFrame: + if f.IsAck() { + return + } + st.t.Fatalf("during greet, got non-ACK settings frame") + default: + st.t.Fatalf("during greet, unexpected frame type %T", f) + } + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write([]byte(http2.ClientPreface)) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(http2.ClientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) wantSettings() *http2.SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*http2.SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.IsAck() { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +// wait for any activity from the server +func (st *serverTester) wantAnyFrame() http2.Frame { + f, err := st.fr.ReadFrame() + if err != nil { + st.t.Fatal(err) + } + return f +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":path", ":scheme"} + vals := map[string][]string{ + ":method": {"GET"}, + ":path": {"/"}, + ":scheme": {"https"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +func (st *serverTester) writeHeadersGRPC(streamID uint32, path string) { + st.writeHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: st.encodeHeader( + ":method", "POST", + ":path", path, + "content-type", "application/grpc", + "te", "trailers", + ), + EndStream: false, + EndHeaders: true, + }) +} + +func (st *serverTester) writeHeaders(p http2.HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { + if err := st.fr.WriteRSTStream(streamID, code); err != nil { + st.t.Fatalf("Error writing RST_STREAM: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, padding []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, padding); err != nil { + st.t.Fatalf("Error writing DATA with padding: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/testdata/ca.pem b/vendor/google.golang.org/grpc/testdata/ca.pem new file mode 100644 index 000000000..6c8511a73 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.key b/vendor/google.golang.org/grpc/testdata/server1.key new file mode 100644 index 000000000..143a5b876 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.pem b/vendor/google.golang.org/grpc/testdata/server1.pem new file mode 100644 index 000000000..f3d43fcc5 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/testdata.go b/vendor/google.golang.org/grpc/testdata/testdata.go new file mode 100644 index 000000000..5609b19b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/testdata.go @@ -0,0 +1,63 @@ +/* + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testdata + +import ( + "log" + "os" + "path/filepath" +) + +// Path returns the absolute path the given relative file or directory path, +// relative to the google.golang.org/grpc/testdata directory in the user's GOPATH. +// If rel is already absolute, it is returned unmodified. +func Path(rel string) string { + if filepath.IsAbs(rel) { + return rel + } + + v, err := goPackagePath("google.golang.org/grpc/testdata") + if err != nil { + log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) + } + + return filepath.Join(v, rel) +} + +func goPackagePath(pkg string) (path string, err error) { + gp := os.Getenv("GOPATH") + if gp == "" { + return path, os.ErrNotExist + } + + for _, p := range filepath.SplitList(gp) { + dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + continue + } + if err != nil { + return "", err + } + if !fi.IsDir() { + continue + } + return dir, nil + } + return path, os.ErrNotExist +} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..c1c96dedc --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 000000000..8dd2ed427 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +var ( + // Adding arbitrary data to ping so that its ack can be + // identified. + // Easter-egg: what does the ping message say? + bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} +) + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go new file mode 100644 index 000000000..dd1a8d42e --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = time.Duration(2 * time.Hour) + defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) + defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultLocalSendQuota sets is default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultLocalSendQuota = 64 * 1024 +) + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool +} + +func (*headerFrame) item() {} + +type continuationFrame struct { + streamID uint32 + endHeaders bool + headerBlockFragment []byte +} + +type dataFrame struct { + streamID uint32 + endStream bool + d []byte + f func() +} + +func (*dataFrame) item() {} + +func (*continuationFrame) item() {} + +type windowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*windowUpdate) item() {} + +type settings struct { + ack bool + ss []http2.Setting +} + +func (*settings) item() {} + +type resetStream struct { + streamID uint32 + code http2.ErrCode +} + +func (*resetStream) item() {} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) item() {} + +type flushIO struct { +} + +func (*flushIO) item() {} + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) item() {} + +// quotaPool is a pool which accumulates the quota and sends it to acquire() +// when it is available. +type quotaPool struct { + c chan int + + mu sync.Mutex + version uint32 + quota int +} + +// newQuotaPool creates a quotaPool which has quota q available to consume. +func newQuotaPool(q int) *quotaPool { + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } + return qb +} + +// add cancels the pending quota sent on acquired, incremented by v and sends +// it back on acquire. +func (qb *quotaPool) add(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) +} + +func (qb *quotaPool) lockedAdd(v int) { + select { + case n := <-qb.c: + qb.quota += n + default: + } + qb.quota += v + if qb.quota <= 0 { + return + } + // After the pool has been created, this is the only place that sends on + // the channel. Since mu is held at this point and any quota that was sent + // on the channel has been retrieved, we know that this code will always + // place any positive quota value on the channel. + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +func (qb *quotaPool) addAndUpdate(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) + // Update the version only after having added to the quota + // so that if acquireWithVesrion sees the new vesrion it is + // guaranteed to have seen the updated quota. + // Also, still keep this inside of the lock, so that when + // compareAndExecute is processing, this function doesn't + // get executed partially (quota gets updated but the version + // doesn't). + atomic.AddUint32(&(qb.version), 1) +} + +func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { + return qb.c, atomic.LoadUint32(&(qb.version)) +} + +func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { + qb.mu.Lock() + defer qb.mu.Unlock() + if version == atomic.LoadUint32(&(qb.version)) { + success() + return true + } + failure() + return false +} + +// acquire returns the channel on which available quota amounts are sent. +func (qb *quotaPool) acquire() <-chan int { + return qb.c +} + +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + d := n - f.limit + f.limit = n + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + defer f.mu.Unlock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) + } + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData == 0 { + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + return wu + } + return 0 +} + +func (f *inFlow) resetPendingUpdate() uint32 { + f.mu.Lock() + defer f.mu.Unlock() + n := f.pendingUpdate + f.pendingUpdate = 0 + return n +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 000000000..f1f6caf89 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,414 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !validContentType(r.Header.Get("Content-Type")) { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + mu sync.Mutex + // streamDone indicates whether WriteStatus has been called and writes channel + // has been closed. + streamDone bool +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. + select { + case <-ht.closedCh: + return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.mu.Lock() + if ht.streamDone { + ht.mu.Unlock() + return nil + } + ht.streamDone = true + ht.mu.Unlock() + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server_test.go b/vendor/google.golang.org/grpc/transport/handler_server_test.go new file mode 100644 index 000000000..06fe813ca --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server_test.go @@ -0,0 +1,462 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/ptypes/duration" + "golang.org/x/net/context" + epb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { + type testCase struct { + name string + req *http.Request + wantErr string + modrw func(http.ResponseWriter) http.ResponseWriter + check func(*serverHandlerTransport, *testCase) error + } + tests := []testCase{ + { + name: "http/1.1", + req: &http.Request{ + ProtoMajor: 1, + ProtoMinor: 1, + }, + wantErr: "gRPC requires HTTP/2", + }, + { + name: "bad method", + req: &http.Request{ + ProtoMajor: 2, + Method: "GET", + Header: http.Header{}, + RequestURI: "/", + }, + wantErr: "invalid gRPC request method", + }, + { + name: "bad content type", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/foo"}, + }, + RequestURI: "/service/foo.bar", + }, + wantErr: "invalid gRPC request content-type", + }, + { + name: "not flusher", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + RequestURI: "/service/foo.bar", + }, + modrw: func(w http.ResponseWriter) http.ResponseWriter { + // Return w without its Flush method + type onlyCloseNotifier interface { + http.ResponseWriter + http.CloseNotifier + } + return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)} + }, + wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + }, + { + name: "not closenotifier", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + RequestURI: "/service/foo.bar", + }, + modrw: func(w http.ResponseWriter) http.ResponseWriter { + // Return w without its CloseNotify method + type onlyFlusher interface { + http.ResponseWriter + http.Flusher + } + return struct{ onlyFlusher }{w.(onlyFlusher)} + }, + wantErr: "gRPC requires a ResponseWriter supporting http.CloseNotifier", + }, + { + name: "valid", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(t *serverHandlerTransport, tt *testCase) error { + if t.req != tt.req { + return fmt.Errorf("t.req = %p; want %p", t.req, tt.req) + } + if t.rw == nil { + return errors.New("t.rw = nil; want non-nil") + } + return nil + }, + }, + { + name: "with timeout", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "Grpc-Timeout": {"200m"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(t *serverHandlerTransport, tt *testCase) error { + if !t.timeoutSet { + return errors.New("timeout not set") + } + if want := 200 * time.Millisecond; t.timeout != want { + return fmt.Errorf("timeout = %v; want %v", t.timeout, want) + } + return nil + }, + }, + { + name: "with bad timeout", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "Grpc-Timeout": {"tomorrow"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + wantErr: `stream error: code = Internal desc = "malformed time-out: transport: timeout unit is not recognized: \"tomorrow\""`, + }, + { + name: "with metadata", + req: &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": []string{"application/grpc"}, + "meta-foo": {"foo-val"}, + "meta-bar": {"bar-val1", "bar-val2"}, + "user-agent": {"x/y a/b"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + }, + check: func(ht *serverHandlerTransport, tt *testCase) error { + want := metadata.MD{ + "meta-bar": {"bar-val1", "bar-val2"}, + "user-agent": {"x/y a/b"}, + "meta-foo": {"foo-val"}, + } + + if !reflect.DeepEqual(ht.headerMD, want) { + return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want) + } + return nil + }, + }, + } + + for _, tt := range tests { + rw := newTestHandlerResponseWriter() + if tt.modrw != nil { + rw = tt.modrw(rw) + } + got, gotErr := NewServerHandlerTransport(rw, tt.req) + if (gotErr != nil) != (tt.wantErr != "") || (gotErr != nil && gotErr.Error() != tt.wantErr) { + t.Errorf("%s: error = %v; want %q", tt.name, gotErr, tt.wantErr) + continue + } + if gotErr != nil { + continue + } + if tt.check != nil { + if err := tt.check(got.(*serverHandlerTransport), &tt); err != nil { + t.Errorf("%s: %v", tt.name, err) + } + } + } +} + +type testHandlerResponseWriter struct { + *httptest.ResponseRecorder + closeNotify chan bool +} + +func (w testHandlerResponseWriter) CloseNotify() <-chan bool { return w.closeNotify } +func (w testHandlerResponseWriter) Flush() {} + +func newTestHandlerResponseWriter() http.ResponseWriter { + return testHandlerResponseWriter{ + ResponseRecorder: httptest.NewRecorder(), + closeNotify: make(chan bool, 1), + } +} + +type handleStreamTest struct { + t *testing.T + bodyw *io.PipeWriter + req *http.Request + rw testHandlerResponseWriter + ht *serverHandlerTransport +} + +func newHandleStreamTest(t *testing.T) *handleStreamTest { + bodyr, bodyw := io.Pipe() + req := &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + Body: bodyr, + } + rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) + ht, err := NewServerHandlerTransport(rw, req) + if err != nil { + t.Fatal(err) + } + return &handleStreamTest{ + t: t, + bodyw: bodyw, + ht: ht.(*serverHandlerTransport), + rw: rw, + } +} + +func TestHandlerTransport_HandleStreams(t *testing.T) { + st := newHandleStreamTest(t) + handleStream := func(s *Stream) { + if want := "/service/foo.bar"; s.method != want { + t.Errorf("stream method = %q; want %q", s.method, want) + } + st.bodyw.Close() // no body + st.ht.WriteStatus(s, status.New(codes.OK, "")) + } + st.ht.HandleStreams( + func(s *Stream) { go handleStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, + "Grpc-Status": {"0"}, + } + if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer Map: %#v; want %#v", st.rw.HeaderMap, wantHeader) + } +} + +// Tests that codes.Unimplemented will close the body, per comment in handler_server.go. +func TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) { + handleStreamCloseBodyTest(t, codes.Unimplemented, "thingy is unimplemented") +} + +// Tests that codes.InvalidArgument will close the body, per comment in handler_server.go. +func TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) { + handleStreamCloseBodyTest(t, codes.InvalidArgument, "bad arg") +} + +func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) { + st := newHandleStreamTest(t) + + handleStream := func(s *Stream) { + st.ht.WriteStatus(s, status.New(statusCode, msg)) + } + st.ht.HandleStreams( + func(s *Stream) { go handleStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, + "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, + "Grpc-Message": {encodeGrpcMessage(msg)}, + } + + if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", st.rw.HeaderMap, wantHeader) + } +} + +func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { + bodyr, bodyw := io.Pipe() + req := &http.Request{ + ProtoMajor: 2, + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/grpc"}, + "Grpc-Timeout": {"200m"}, + }, + URL: &url.URL{ + Path: "/service/foo.bar", + }, + RequestURI: "/service/foo.bar", + Body: bodyr, + } + rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) + ht, err := NewServerHandlerTransport(rw, req) + if err != nil { + t.Fatal(err) + } + runStream := func(s *Stream) { + defer bodyw.Close() + select { + case <-s.ctx.Done(): + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for ctx.Done") + return + } + err := s.ctx.Err() + if err != context.DeadlineExceeded { + t.Errorf("ctx.Err = %v; want %v", err, context.DeadlineExceeded) + return + } + ht.WriteStatus(s, status.New(codes.DeadlineExceeded, "too slow")) + } + ht.HandleStreams( + func(s *Stream) { go runStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, + "Grpc-Status": {"4"}, + "Grpc-Message": {encodeGrpcMessage("too slow")}, + } + if !reflect.DeepEqual(rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer Map mismatch.\n got: %#v\nwant: %#v", rw.HeaderMap, wantHeader) + } +} + +func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) { + st := newHandleStreamTest(t) + handleStream := func(s *Stream) { + if want := "/service/foo.bar"; s.method != want { + t.Errorf("stream method = %q; want %q", s.method, want) + } + st.bodyw.Close() // no body + + var wg sync.WaitGroup + wg.Add(5) + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + st.ht.WriteStatus(s, status.New(codes.OK, "")) + }() + } + wg.Wait() + } + st.ht.HandleStreams( + func(s *Stream) { go handleStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) +} + +func TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { + errDetails := []proto.Message{ + &epb.RetryInfo{ + RetryDelay: &dpb.Duration{Seconds: 60}, + }, + &epb.ResourceInfo{ + ResourceType: "foo bar", + ResourceName: "service.foo.bar", + Owner: "User", + }, + } + + statusCode := codes.ResourceExhausted + msg := "you are being throttled" + st, err := status.New(statusCode, msg).WithDetails(errDetails...) + if err != nil { + t.Fatal(err) + } + + stBytes, err := proto.Marshal(st.Proto()) + if err != nil { + t.Fatal(err) + } + + hst := newHandleStreamTest(t) + handleStream := func(s *Stream) { + hst.ht.WriteStatus(s, st) + } + hst.ht.HandleStreams( + func(s *Stream) { go handleStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, + "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, + "Grpc-Message": {encodeGrpcMessage(msg)}, + "Grpc-Status-Details-Bin": {encodeBinHeader(stBytes)}, + } + + if !reflect.DeepEqual(hst.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", hst.rw.HeaderMap, wantHeader) + } +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 000000000..1abb62e6d --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,1322 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "io" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + target string // server name/addr + userAgent string + md interface{} + conn net.Conn // underlying communication channel + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used + + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool + + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + creds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + + statsHandler stats.Handler + + initialWindowSize int32 + + bdpEst *bdpEstimator + outQuotaVersion uint32 + + mu sync.Mutex // guard the following variables + state transportState // the state of underlying connection + activeStreams map[uint32]*Stream + // The max number of concurrent streams + maxStreams int + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err { + case io.EOF: + // Connection closures may be resolved upon retry, and are thus + // treated as temporary. + return true + case context.DeadlineExceeded: + // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this + // special case is not needed. Until then, we need to keep this + // clause. + return true + } + + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return false +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + connectCancel() + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) + if err != nil { + // Credentials handshake errors are typically considered permanent + // to avoid retrying on e.g. bad certificates. + temp := isTemporary(err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } + t := &http2Client{ + ctx: ctx, + cancel: cancel, + target: addr.Addr, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + // The client initiated stream id is odd starting from 1. + nextID: 1, + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } else { + err = t.framer.fr.WriteSettings() + } + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() + if t.kp.Time != infinity { + go t.keepalive() + } + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + localSendQuota: newQuotaPool(defaultLocalSendQuota), + headerChan: make(chan struct{}), + } + t.nextID += 2 + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + + return s +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + ctx = peer.NewContext(ctx, pr) + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + audience = "https://" + host + callHdr.Method[:pos] + } + for _, c := range t.creds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return nil, ErrConnClosing + } + if t.state == draining { + t.mu.Unlock() + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + t.mu.Unlock() + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := dl.Sub(time.Now()) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + if md, ok := metadata.FromOutgoingContext(ctx); ok { + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.controlBuf.put(&ping{data: [8]byte{}}) + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + t.mu.Unlock() + + s.mu.Lock() + s.bytesSent = true + s.mu.Unlock() + + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return + } + if err != nil { + // notify in-flight streams, before the deletion + s.write(recvMsg{err: err}) + } + delete(t.activeStreams, s.id) + if t.state == draining && len(t.activeStreams) == 0 { + // The transport is draining and s is the last live stream on t. + t.mu.Unlock() + t.Close() + return + } + t.mu.Unlock() + // rstStream is true in case the stream is being closed at the client-side + // and the server needs to be intimated about it by sending a RST_STREAM + // frame. + // To make sure this frame is written to the wire before the headers of the + // next stream waiting for streamsQuota, we add to streamsQuota pool only + // after having acquired the writableChan to send RST_STREAM out (look at + // the controller() routine). + var rstStream bool + var rstError http2.ErrCode + defer func() { + // In case, the client doesn't have to send RST_STREAM to server + // we can safely add back to streamsQuota pool now. + if !rstStream { + t.streamsQuota.add(1) + return + } + t.controlBuf.put(&resetStream{s.id, rstError}) + }() + s.mu.Lock() + rstStream = s.rstStream + rstError = s.rstError + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.state = streamDone + s.mu.Unlock() + if _, ok := err.(StreamError); ok { + rstStream = true + rstError = http2.ErrCodeCancel + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + t.state = closing + t.mu.Unlock() + t.cancel() + err = t.conn.Close() + t.mu.Lock() + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + // Notify all active streams. + for _, s := range streams { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: ErrConnClosing}) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + switch t.state { + case closing, draining: + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + if hdr == nil && data == nil && opts.Last { + // stream.CloseSend uses this to send an empty frame with endStream=True + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) + return nil + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for idx, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. + var endStream bool + // See if this is the last frame to be written. + if opts.Last { + if len(r)-size == 0 { // No more data in r after this iteration. + if idx == 0 { // We're writing data header. + if len(data) == 0 { // There's no data to follow. + endStream = true + } + } else { // We're writing data. + endStream = true + } + } + } + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) + if ps < sq { + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) + } + } + } + if !opts.Last { + return nil + } + s.mu.Lock() + if s.state != streamDone { + s.state = streamWriteDone + } + s.mu.Unlock() + return nil +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback connection's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could've been an empty data frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.rstStream = true + s.rstError = http2.ErrCodeFlowControl + s.finish(status.New(codes.Internal, err.Error())) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) + } + } + s.mu.Unlock() + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.finish(status.New(codes.Internal, "server closed the stream without sending trailers")) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state != reachable && t.state != draining { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay + // with the ID of the last stream the server will process. + // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we + // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server + // was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + close(stream.goAway) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = NoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = TooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + s.mu.Lock() + s.bytesReceived = true + s.mu.Unlock() + var state decodeState + if err := state.decodeResponseHeader(frame); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return + } + + endStream := frame.StreamEnded() + var isHeader bool + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } + if !s.headerDone { + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata + } + close(s.headerChan) + s.headerDone = true + isHeader = true + } + if !endStream || s.state == streamDone { + s.mu.Unlock() + return + } + + if len(state.mdata) > 0 { + s.trailer = state.mdata + } + s.finish(state.status()) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() + return + } + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() + return + } + t.handleSettings(sf) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.fr.ReadFrame() + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + t.streamsQuota.add(int(s.Val) - ms) + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Client) itemHandler(i item) error { + var err error + switch i := i.(type) { + case *dataFrame: + err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) + if err == nil { + i.f() + } + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + case *windowUpdate: + err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + err = t.framer.fr.WriteSettingsAck() + } else { + err = t.framer.fr.WriteSettings(i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + t.streamsQuota.add(1) + case *flushIO: + err = t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + err = t.framer.fr.WritePing(i.ack, i.data) + default: + errorf("transport: http2Client.controller got unexpected item type %v\n", i) + } + return err +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 000000000..bad29b88a --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,1188 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + cancel context.CancelFunc + conn net.Conn + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { + streamID := frame.Header().StreamID + + var state decodeState + for _, hf := range frame.Fields { + if err := state.processHeaderField(hf); err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) + } + return + } + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) + } + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) + return + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + s.localSendQuota = newQuotaPool(defaultLocalSendQuota) + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + handle(s) + return +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket) + if err != io.EOF { + errorf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + errorf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + errorf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + t.Close() + return + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + errorf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + + for { + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could be an empty frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + errorf("transport: http2Server %v", err) + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) + } + } + s.mu.Unlock() + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.mu.Lock() + if s.state != streamDone { + s.state = streamReadDone + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + + } +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got to too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + s.headerOk = true + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + md = s.header + s.mu.Unlock() + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + if t.stats != nil { + outHeader := &stats.OutHeader{ + //WireLength: // TODO(mmukhi): Revisit this later, if needed. + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + var headersSent, hasHeader bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return nil + } + if s.headerOk { + headersSent = true + } + if s.header.Len() > 0 { + hasHeader = true + } + s.mu.Unlock() + + if !headersSent && hasHeader { + t.WriteHeader(s, nil) + headersSent = true + } + + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !headersSent { + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + + // Attach the trailer metadata. + for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + t.closeStream(s) + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + var writeHeaderFrame bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return streamErrorf(codes.Unknown, "the stream has been done") + } + if !s.headerOk { + writeHeaderFrame = true + } + s.mu.Unlock() + if writeHeaderFrame { + t.WriteHeader(s, nil) + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for _, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok we make this negative. + // Reset ping strikes when sending data since this might cause + // the peer to send ping. + atomic.StoreUint32(&t.resetPingStrikes, 1) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { + s.localSendQuota.add(ps) + }}) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) + } + } + } + return nil +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Reseting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Server) itemHandler(i item) error { + switch i := i.(type) { + case *dataFrame: + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err + } + i.f() + return nil + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil + case *windowUpdate: + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + return t.framer.fr.WriteSettingsAck() + } + return t.framer.fr.WriteSettings(i.ss...) + case *resetStream: + return t.framer.fr.WriteRSTStream(i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return fmt.Errorf("transport: Connection closing") + } + sid := t.maxStreamID + if !i.headsUp { + // Stop accepting more streams now. + t.state = draining + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { + return err + } + if i.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return fmt.Errorf("transport: Connection closing") + } + return nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) + }() + return nil + case *flushIO: + return t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + return t.framer.fr.WritePing(i.ack, i.data) + default: + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.cancel() + err := t.conn.Close() + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream) { + t.mu.Lock() + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + if t.state == draining && len(t.activeStreams) == 0 { + defer t.Close() + } + t.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.mu.Unlock() +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rgen.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 000000000..39f878cfd --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,489 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + "te": + return true + default: + return false + } +} + +// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders +// that should be propagated into metadata visible to users. +func isWhitelistedPseudoHeader(hdr string) bool { + switch hdr { + case ":authority": + return true + default: + return false + } +} + +func validContentType(t string) bool { + e := "application/grpc" + if !strings.HasPrefix(t, e) { + return false + } + // Support variations on the content-type + // (e.g. "application/grpc+blah", "application/grpc;blah"). + if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { + return false + } + return true +} + +func (d *decodeState) status() *status.Status { + if d.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + } + return d.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { + switch f.Name { + case "content-type": + if !validContentType(f.Value) { + return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + } + d.rawStatusCode = &code + case "grpc-message": + d.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + d.statusGen = status.FromProto(s) + case "grpc-timeout": + d.timeoutSet = true + var err error + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, string(v)) + } + return nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". +// It checks to see if each individual byte in msg is an +// allowable byte, and then either percent encoding or passing it through. +// When percent encoding, the byte is converted into hexadecimal notation +// with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + buf.WriteByte(c) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type framer struct { + numWriters int32 + reader io.Reader + writer *bufio.Writer + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { + f := &framer{ + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), + } + f.fr = http2.NewFramer(f.writer, f.reader) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/transport/http_util_test.go b/vendor/google.golang.org/grpc/transport/http_util_test.go new file mode 100644 index 000000000..4ebb23905 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http_util_test.go @@ -0,0 +1,175 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestTimeoutEncode(t *testing.T) { + for _, test := range []struct { + in string + out string + }{ + {"12345678ns", "12345678n"}, + {"123456789ns", "123457u"}, + {"12345678us", "12345678u"}, + {"123456789us", "123457m"}, + {"12345678ms", "12345678m"}, + {"123456789ms", "123457S"}, + {"12345678s", "12345678S"}, + {"123456789s", "2057614M"}, + {"12345678m", "12345678M"}, + {"123456789m", "2057614H"}, + } { + d, err := time.ParseDuration(test.in) + if err != nil { + t.Fatalf("failed to parse duration string %s: %v", test.in, err) + } + out := encodeTimeout(d) + if out != test.out { + t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out) + } + } +} + +func TestTimeoutDecode(t *testing.T) { + for _, test := range []struct { + // input + s string + // output + d time.Duration + err error + }{ + {"1234S", time.Second * 1234, nil}, + {"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")}, + {"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")}, + {"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")}, + } { + d, err := decodeTimeout(test.s) + if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) { + t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err) + } + } +} + +func TestValidContentType(t *testing.T) { + tests := []struct { + h string + want bool + }{ + {"application/grpc", true}, + {"application/grpc+", true}, + {"application/grpc+blah", true}, + {"application/grpc;", true}, + {"application/grpc;blah", true}, + {"application/grpcd", false}, + {"application/grpd", false}, + {"application/grp", false}, + } + for _, tt := range tests { + got := validContentType(tt.h) + if got != tt.want { + t.Errorf("validContentType(%q) = %v; want %v", tt.h, got, tt.want) + } + } +} + +func TestEncodeGrpcMessage(t *testing.T) { + for _, tt := range []struct { + input string + expected string + }{ + {"", ""}, + {"Hello", "Hello"}, + {"my favorite character is \u0000", "my favorite character is %00"}, + {"my favorite character is %", "my favorite character is %25"}, + } { + actual := encodeGrpcMessage(tt.input) + if tt.expected != actual { + t.Errorf("encodeGrpcMessage(%v) = %v, want %v", tt.input, actual, tt.expected) + } + } +} + +func TestDecodeGrpcMessage(t *testing.T) { + for _, tt := range []struct { + input string + expected string + }{ + {"", ""}, + {"Hello", "Hello"}, + {"H%61o", "Hao"}, + {"H%6", "H%6"}, + {"%G0", "%G0"}, + {"%E7%B3%BB%E7%BB%9F", "系统"}, + } { + actual := decodeGrpcMessage(tt.input) + if tt.expected != actual { + t.Errorf("dncodeGrpcMessage(%v) = %v, want %v", tt.input, actual, tt.expected) + } + } +} + +const binaryValue = string(128) + +func TestEncodeMetadataHeader(t *testing.T) { + for _, test := range []struct { + // input + kin string + vin string + // output + vout string + }{ + {"key", "abc", "abc"}, + {"KEY", "abc", "abc"}, + {"key-bin", "abc", "YWJj"}, + {"key-bin", binaryValue, "woA"}, + } { + v := encodeMetadataHeader(test.kin, test.vin) + if !reflect.DeepEqual(v, test.vout) { + t.Fatalf("encodeMetadataHeader(%q, %q) = %q, want %q", test.kin, test.vin, v, test.vout) + } + } +} + +func TestDecodeMetadataHeader(t *testing.T) { + for _, test := range []struct { + // input + kin string + vin string + // output + vout string + err error + }{ + {"a", "abc", "abc", nil}, + {"key-bin", "Zm9vAGJhcg==", "foo\x00bar", nil}, + {"key-bin", "Zm9vAGJhcg", "foo\x00bar", nil}, + {"key-bin", "woA=", binaryValue, nil}, + {"a", "abc,efg", "abc,efg", nil}, + } { + v, err := decodeMetadataHeader(test.kin, test.vin) + if !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) { + t.Fatalf("decodeMetadataHeader(%q, %q) = %q, %v, want %q, %v", test.kin, test.vin, v, err, test.vout, test.err) + } + } +} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 000000000..ac8e358c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go new file mode 100644 index 000000000..bde8fa5c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,777 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). +package transport // import "google.golang.org/grpc/transport" + +import ( + stdctx "context" + "fmt" + "io" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + goAway chan struct{} + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { + // Read remaining data left in last call. + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil + } + select { + case <-r.ctx.Done(): + return 0, ContextErr(r.ctx.Err()) + case <-r.goAway: + return 0, ErrStreamDrain + case m := <-r.recv.get(): + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil + } +} + +// All items in an out of a controlBuffer should be the same type. +type item interface { + item() +} + +// controlBuffer is an unbounded channel of item. +type controlBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newControlBuffer() *controlBuffer { + b := &controlBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *controlBuffer) put(r item) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *controlBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *controlBuffer) get() <-chan item { + return b.c +} + +type streamState uint8 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + // nil for client side Stream. + st ServerTransport + // ctx is the associated context of the stream. + ctx context.Context + // cancel is always nil for client side Stream. + cancel context.CancelFunc + // done is closed when the final status arrives. + done chan struct{} + // goAway is closed when the server sent GoAways signal before this stream was initiated. + goAway chan struct{} + // method records the associated RPC method of the stream. + method string + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + recvQuota uint32 + + // TODO: Remote this unused variable. + // The accumulated inbound quota pending for window update. + updateQuota uint32 + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) + + sendQuotaPool *quotaPool + localSendQuota *quotaPool + // Close headerChan to indicate the end of reception of header metadata. + headerChan chan struct{} + // header caches the received header metadata. + header metadata.MD + // The key-value map of trailer metadata. + trailer metadata.MD + + mu sync.RWMutex // guard the following + // headerOK becomes true from the first header is about to send. + headerOk bool + state streamState + // true iff headerChan is closed. Used to avoid closing headerChan + // multiple times. + headerDone bool + // the status error received from the server. + status *status.Status + // rstStream indicates whether a RST_STREAM frame needs to be sent + // to the server to signify that this stream is closing. + rstStream bool + // rstError is the error that needs to be sent along with the RST_STREAM frame. + rstError http2.ErrCode + // bytesSent and bytesReceived indicates whether any bytes have been sent or + // received on this stream. + bytesSent bool + bytesReceived bool +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a chanel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// GoAway returns a channel which is closed when the server sent GoAways signal +// before this stream was initiated. +func (s *Stream) GoAway() <-chan struct{} { + return s.goAway +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is canceled/expired. +func (s *Stream) Header() (metadata.MD, error) { + var err error + select { + case <-s.ctx.Done(): + err = ContextErr(s.ctx.Err()) + case <-s.goAway: + err = ErrStreamDrain + case <-s.headerChan: + return s.header.Copy(), nil + } + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + return s.header.Copy(), nil + default: + } + return nil, err +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +func (s *Stream) Trailer() metadata.MD { + s.mu.RLock() + c := s.trailer.Copy() + s.mu.RUnlock() + return c +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +func (s *Stream) SetHeader(md metadata.MD) error { + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + if md.Len() == 0 { + s.mu.Unlock() + return nil + } + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// finish sets the stream's state and status, and closes the done channel. +// s.mu must be held by the caller. st must always be non-nil. +func (s *Stream) finish(st *status.Status) { + s.status = st + s.state = streamDone + close(s.done) +} + +// BytesSent indicates whether any bytes have been sent on this stream. +func (s *Stream) BytesSent() bool { + s.mu.Lock() + bs := s.bytesSent + s.mu.Unlock() + return bs +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + s.mu.Lock() + br := s.bytesReceived + s.mu.Unlock() + return br +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// The key to save transport.Stream in the context. +type streamKey struct{} + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey{}).(*Stream) + return +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Authority is the :authority pseudo-header to use. This field has no effect if + // TransportCredentials is set. + Authority string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // transport implementation may ignore the hint. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. + // If it's true, the transport may modify the flush decision + // for performance purposes. + // If it's false, new stream will never be flushed. + Flush bool +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() +} + +// streamErrorf creates an StreamError with the specified error code and description. +func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // ErrStreamDrain indicates that the stream is rejected by the server because + // the server stops accepting new RPCs. + ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") +) + +// TODO: See if we can replace StreamError with status package errors. + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) +} + +// wait blocks until it can receive from one of the provided contexts or channels +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + case <-done: + return 0, io.EOF + case <-goAway: + return 0, ErrStreamDrain + case <-tctx.Done(): + return 0, ErrConnClosing + case i := <-proceed: + return i, nil + } +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, stdctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // Invalid indicates that no GoAway frame is received. + Invalid GoAwayReason = 0 + // NoReason is the default value when GoAway frame is received. + NoReason GoAwayReason = 1 + // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm + // was received and that the debug data said "too_many_pings". + TooManyPings GoAwayReason = 2 +) + +// loopyWriter is run in a separate go routine. It is the single code path that will +// write data on wire. +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + } + hasData: + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + default: + if err := handler(&flushIO{}); err != nil { + return + } + break hasData + } + } + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport_test.go b/vendor/google.golang.org/grpc/transport/transport_test.go new file mode 100644 index 000000000..e1dd080a1 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/transport_test.go @@ -0,0 +1,2158 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +type server struct { + lis net.Listener + port string + startedErr chan error // error (or nil) with server start value + mu sync.Mutex + conns map[ServerTransport]bool + h *testStreamHandler +} + +var ( + expectedRequest = []byte("ping") + expectedResponse = []byte("pong") + expectedRequestLarge = make([]byte, initialWindowSize*2) + expectedResponseLarge = make([]byte, initialWindowSize*2) + expectedInvalidHeaderField = "invalid/content-type" +) + +type testStreamHandler struct { + t *http2Server + notify chan struct{} +} + +type hType int + +const ( + normal hType = iota + suspended + notifyCall + misbehaved + encodingRequiredStatus + invalidHeaderField + delayRead + delayWrite + pingpong +) + +func (h *testStreamHandler) handleStreamAndNotify(s *Stream) { + if h.notify == nil { + return + } + go func() { + select { + case <-h.notify: + default: + close(h.notify) + } + }() +} + +func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + _, err := s.Read(p) + if err != nil { + return + } + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + // send a response back to the client. + h.t.Write(s, nil, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, status.New(codes.OK, "")) +} + +func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) { + header := make([]byte, 5) + for i := 0; i < 10; i++ { + if _, err := s.Read(header); err != nil { + t.Fatalf("Error on server while reading data header: %v", err) + } + sz := binary.BigEndian.Uint32(header[1:]) + msg := make([]byte, int(sz)) + if _, err := s.Read(msg); err != nil { + t.Fatalf("Error on server while reading message: %v", err) + } + buf := make([]byte, sz+5) + buf[0] = byte(0) + binary.BigEndian.PutUint32(buf[1:], uint32(sz)) + copy(buf[5:], msg) + h.t.Write(s, nil, buf, &Options{}) + } +} + +func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { + conn, ok := s.ServerTransport().(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", s.ServerTransport()) + } + var sent int + p := make([]byte, http2MaxFrameLen) + for sent < initialWindowSize { + n := initialWindowSize - sent + // The last message may be smaller than http2MaxFrameLen + if n <= http2MaxFrameLen { + if s.Method() == "foo.Connection" { + // Violate connection level flow control window of client but do not + // violate any stream level windows. + p = make([]byte, n) + } else { + // Violate stream level flow control window of client. + p = make([]byte, n+1) + } + } + conn.controlBuf.put(&dataFrame{s.id, false, p, func() {}}) + sent += len(p) + } +} + +func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) { + // raw newline is not accepted by http2 framer so it must be encoded. + h.t.WriteStatus(s, encodingTestStatus) +} + +func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) { + headerFields := []hpack.HeaderField{} + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) + h.t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) +} + +func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + + // Wait before reading. Give time to client to start sending + // before server starts reading. + time.Sleep(2 * time.Second) + _, err := s.Read(p) + if err != nil { + t.Fatalf("s.Read(_) = _, %v, want _, ", err) + return + } + + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + // send a response back to the client. + h.t.Write(s, nil, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, status.New(codes.OK, "")) +} + +func (h *testStreamHandler) handleStreamDelayWrite(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + _, err := s.Read(p) + if err != nil { + t.Fatalf("s.Read(_) = _, %v, want _, ", err) + return + } + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + + // Wait before sending. Give time to client to start reading + // before server starts sending. + time.Sleep(2 * time.Second) + h.t.Write(s, nil, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, status.New(codes.OK, "")) +} + +// start starts server. Other goroutines should block on s.readyChan for further operations. +func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) { + var err error + if port == 0 { + s.lis, err = net.Listen("tcp", "localhost:0") + } else { + s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) + } + if err != nil { + s.startedErr <- fmt.Errorf("failed to listen: %v", err) + return + } + _, p, err := net.SplitHostPort(s.lis.Addr().String()) + if err != nil { + s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) + return + } + s.port = p + s.conns = make(map[ServerTransport]bool) + s.startedErr <- nil + for { + conn, err := s.lis.Accept() + if err != nil { + return + } + transport, err := NewServerTransport("http2", conn, serverConfig) + if err != nil { + return + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + transport.Close() + return + } + s.conns[transport] = true + h := &testStreamHandler{t: transport.(*http2Server)} + s.h = h + s.mu.Unlock() + switch ht { + case notifyCall: + go transport.HandleStreams(h.handleStreamAndNotify, + func(ctx context.Context, _ string) context.Context { + return ctx + }) + case suspended: + go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream. + func(ctx context.Context, method string) context.Context { + return ctx + }) + case misbehaved: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamMisbehave(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case encodingRequiredStatus: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamEncodingRequiredStatus(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case invalidHeaderField: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamInvalidHeaderField(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case delayRead: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamDelayRead(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case delayWrite: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamDelayWrite(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case pingpong: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamPingPong(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + default: + go transport.HandleStreams(func(s *Stream) { + go h.handleStream(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + } + } +} + +func (s *server) wait(t *testing.T, timeout time.Duration) { + select { + case err := <-s.startedErr: + if err != nil { + t.Fatal(err) + } + case <-time.After(timeout): + t.Fatalf("Timed out after %v waiting for server to be ready", timeout) + } +} + +func (s *server) stop() { + s.lis.Close() + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = nil + s.mu.Unlock() +} + +func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, ClientTransport) { + return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{}) +} + +func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, ClientTransport) { + server := &server{startedErr: make(chan error, 1)} + go server.start(t, port, serverConfig, ht) + server.wait(t, 2*time.Second) + addr := "localhost:" + server.port + var ( + ct ClientTransport + connErr error + ) + target := TargetInfo{ + Addr: addr, + } + ct, connErr = NewClientTransport(context.Background(), target, copts, 2*time.Second) + if connErr != nil { + t.Fatalf("failed to create transport: %v", connErr) + } + return server, ct +} + +func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, done chan net.Conn) ClientTransport { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + // Launch a non responsive server. + go func() { + defer lis.Close() + conn, err := lis.Accept() + if err != nil { + t.Errorf("Error at server-side while accepting: %v", err) + close(done) + return + } + done <- conn + }() + tr, err := NewClientTransport(context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts, 2*time.Second) + if err != nil { + // Server clean-up. + lis.Close() + if conn, ok := <-done; ok { + conn.Close() + } + t.Fatalf("Failed to dial: %v", err) + } + return tr +} + +// TestInflightStreamClosing ensures that closing in-flight stream +// sends StreamError to concurrent stream reader. +func TestInflightStreamClosing(t *testing.T) { + serverConfig := &ServerConfig{} + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + + stream, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("Client failed to create RPC request: %v", err) + } + + donec := make(chan struct{}) + serr := StreamError{Desc: "client connection is closing"} + go func() { + defer close(donec) + if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr { + t.Errorf("unexpected Stream error %v, expected %v", err, serr) + } + }() + + // should unblock concurrent stream.Read + client.CloseStream(stream, serr) + + // wait for stream.Read error + timeout := time.NewTimer(5 * time.Second) + select { + case <-donec: + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test timed out, expected a StreamError.") + } +} + +// TestMaxConnectionIdle tests that a server will send GoAway to a idle client. +// An idle client is one who doesn't make any RPC calls for a duration of +// MaxConnectionIdle time. +func TestMaxConnectionIdle(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionIdle: 2 * time.Second, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + stream, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create RPC request: %v", err) + } + stream.mu.Lock() + stream.rstStream = true + stream.mu.Unlock() + client.CloseStream(stream, nil) + // wait for server to see that closed stream and max-age logic to send goaway after no new RPCs are mode + timeout := time.NewTimer(time.Second * 4) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test timed out, expected a GoAway from the server.") + } +} + +// TestMaxConenctionIdleNegative tests that a server will not send GoAway to a non-idle(busy) client. +func TestMaxConnectionIdleNegative(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionIdle: 2 * time.Second, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + _, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create RPC request: %v", err) + } + timeout := time.NewTimer(time.Second * 4) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + t.Fatalf("A non-idle client received a GoAway.") + case <-timeout.C: + } + +} + +// TestMaxConnectionAge tests that a server will send GoAway after a duration of MaxConnectionAge. +func TestMaxConnectionAge(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + MaxConnectionAge: 2 * time.Second, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + _, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("Client failed to create stream: %v", err) + } + // Wait for max-age logic to send GoAway. + timeout := time.NewTimer(4 * time.Second) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test timer out, expected a GoAway from the server.") + } +} + +// TestKeepaliveServer tests that a server closes connection with a client that doesn't respond to keepalive pings. +func TestKeepaliveServer(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: 2 * time.Second, + Timeout: 1 * time.Second, + }, + } + server, c := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer c.Close() + client, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Failed to dial: %v", err) + } + defer client.Close() + // Set read deadline on client conn so that it doesn't block forever in errorsome cases. + client.SetReadDeadline(time.Now().Add(10 * time.Second)) + // Wait for keepalive logic to close the connection. + time.Sleep(4 * time.Second) + b := make([]byte, 24) + for { + _, err = client.Read(b) + if err == nil { + continue + } + if err != io.EOF { + t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) + } + break + } +} + +// TestKeepaliveServerNegative tests that a server doesn't close connection with a client that responds to keepalive pings. +func TestKeepaliveServerNegative(t *testing.T) { + serverConfig := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: 2 * time.Second, + Timeout: 1 * time.Second, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + // Give keepalive logic some time by sleeping. + time.Sleep(4 * time.Second) + // Assert that client is still active. + clientTr := client.(*http2Client) + clientTr.mu.Lock() + defer clientTr.mu.Unlock() + if clientTr.state != reachable { + t.Fatalf("Test failed: Expected server-client connection to be healthy.") + } +} + +func TestKeepaliveClientClosesIdleTransport(t *testing.T) { + done := make(chan net.Conn, 1) + tr := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 2 * time.Second, // Keepalive time = 2 sec. + Timeout: 1 * time.Second, // Keepalive timeout = 1 sec. + PermitWithoutStream: true, // Run keepalive even with no RPCs. + }}, done) + defer tr.Close() + conn, ok := <-done + if !ok { + t.Fatalf("Server didn't return connection object") + } + defer conn.Close() + // Sleep for keepalive to close the connection. + time.Sleep(4 * time.Second) + // Assert that the connection was closed. + ct := tr.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state == reachable { + t.Fatalf("Test Failed: Expected client transport to have closed.") + } +} + +func TestKeepaliveClientStaysHealthyOnIdleTransport(t *testing.T) { + done := make(chan net.Conn, 1) + tr := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 2 * time.Second, // Keepalive time = 2 sec. + Timeout: 1 * time.Second, // Keepalive timeout = 1 sec. + }}, done) + defer tr.Close() + conn, ok := <-done + if !ok { + t.Fatalf("server didn't reutrn connection object") + } + defer conn.Close() + // Give keepalive some time. + time.Sleep(4 * time.Second) + // Assert that connections is still healthy. + ct := tr.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state != reachable { + t.Fatalf("Test failed: Expected client transport to be healthy.") + } +} + +func TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { + done := make(chan net.Conn, 1) + tr := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 2 * time.Second, // Keepalive time = 2 sec. + Timeout: 1 * time.Second, // Keepalive timeout = 1 sec. + }}, done) + defer tr.Close() + conn, ok := <-done + if !ok { + t.Fatalf("Server didn't return connection object") + } + defer conn.Close() + // Create a stream. + _, err := tr.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create a new stream: %v", err) + } + // Give keepalive some time. + time.Sleep(4 * time.Second) + // Assert that transport was closed. + ct := tr.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state == reachable { + t.Fatalf("Test failed: Expected client transport to have closed.") + } +} + +func TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { + s, tr := setUpWithOptions(t, 0, &ServerConfig{MaxStreams: math.MaxUint32}, normal, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ + Time: 2 * time.Second, // Keepalive time = 2 sec. + Timeout: 1 * time.Second, // Keepalive timeout = 1 sec. + PermitWithoutStream: true, // Run keepalive even with no RPCs. + }}) + defer s.stop() + defer tr.Close() + // Give keep alive some time. + time.Sleep(4 * time.Second) + // Assert that transport is healthy. + ct := tr.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state != reachable { + t.Fatalf("Test failed: Expected client transport to be healthy.") + } +} + +func TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 50 * time.Millisecond, + PermitWithoutStream: true, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) + defer server.stop() + defer client.Close() + + timeout := time.NewTimer(2 * time.Second) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test failed: Expected a GoAway from server.") + } + time.Sleep(500 * time.Millisecond) + ct := client.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state == reachable { + t.Fatalf("Test failed: Expected the connection to be closed.") + } +} + +func TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 50 * time.Millisecond, + Timeout: 50 * time.Millisecond, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) + defer server.stop() + defer client.Close() + + if _, err := client.NewStream(context.Background(), &CallHdr{Flush: true}); err != nil { + t.Fatalf("Client failed to create stream.") + } + timeout := time.NewTimer(2 * time.Second) + select { + case <-client.GoAway(): + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test failed: Expected a GoAway from server.") + } + time.Sleep(500 * time.Millisecond) + ct := client.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state == reachable { + t.Fatalf("Test failed: Expected the connection to be closed.") + } +} + +func TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 100 * time.Millisecond, + PermitWithoutStream: true, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 101 * time.Millisecond, + Timeout: 50 * time.Millisecond, + PermitWithoutStream: true, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) + defer server.stop() + defer client.Close() + + // Give keepalive enough time. + time.Sleep(2 * time.Second) + // Assert that connection is healthy. + ct := client.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state != reachable { + t.Fatalf("Test failed: Expected connection to be healthy.") + } +} + +func TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { + serverConfig := &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 100 * time.Millisecond, + }, + } + clientOptions := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 101 * time.Millisecond, + Timeout: 50 * time.Millisecond, + }, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) + defer server.stop() + defer client.Close() + + if _, err := client.NewStream(context.Background(), &CallHdr{Flush: true}); err != nil { + t.Fatalf("Client failed to create stream.") + } + + // Give keepalive enough time. + time.Sleep(2 * time.Second) + // Assert that connection is healthy. + ct := client.(*http2Client) + ct.mu.Lock() + defer ct.mu.Unlock() + if ct.state != reachable { + t.Fatalf("Test failed: Expected connection to be healthy.") + } +} + +func TestClientSendAndReceive(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s1, err1 := ct.NewStream(context.Background(), callHdr) + if err1 != nil { + t.Fatalf("failed to open stream: %v", err1) + } + if s1.id != 1 { + t.Fatalf("wrong stream id: %d", s1.id) + } + s2, err2 := ct.NewStream(context.Background(), callHdr) + if err2 != nil { + t.Fatalf("failed to open stream: %v", err2) + } + if s2.id != 3 { + t.Fatalf("wrong stream id: %d", s2.id) + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF { + t.Fatalf("failed to send data: %v", err) + } + p := make([]byte, len(expectedResponse)) + _, recvErr := s1.Read(p) + if recvErr != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) + } + _, recvErr = s1.Read(p) + if recvErr != io.EOF { + t.Fatalf("Error: %v; want ", recvErr) + } + ct.Close() + server.stop() +} + +func TestClientErrorNotify(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + go server.stop() + // ct.reader should detect the error and activate ct.Error(). + <-ct.Error() + ct.Close() +} + +func performOneRPC(ct ClientTransport) { + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF { + time.Sleep(5 * time.Millisecond) + // The following s.Recv()'s could error out because the + // underlying transport is gone. + // + // Read response + p := make([]byte, len(expectedResponse)) + s.Read(p) + // Read io.EOF + s.Read(p) + } +} + +func TestClientMix(t *testing.T) { + s, ct := setUp(t, 0, math.MaxUint32, normal) + go func(s *server) { + time.Sleep(5 * time.Second) + s.stop() + }(s) + go func(ct ClientTransport) { + <-ct.Error() + ct.Close() + }(ct) + for i := 0; i < 1000; i++ { + time.Sleep(10 * time.Millisecond) + go performOneRPC(ct) + } +} + +func TestLargeMessage(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("s.Read(%v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = s.Read(p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageWithDelayRead(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, delayRead) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + + // Give time to server to begin sending before client starts reading. + time.Sleep(2 * time.Second) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("s.Read(_) = _, %v, want _, ", err) + } + if _, err = s.Read(p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageDelayWrite(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, delayWrite) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + + // Give time to server to start reading before client starts sending. + time.Sleep(2 * time.Second) + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("io.ReadFull(%v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = s.Read(p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestGracefulClose(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, normal) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err = ct.GracefulClose(); err != nil { + t.Fatalf("%v.GracefulClose() = %v, want ", ct, err) + } + var wg sync.WaitGroup + // Expect the failure for all the follow-up streams because ct has been closed gracefully. + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if _, err := ct.NewStream(context.Background(), callHdr); err != ErrStreamDrain { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, %v", ct, err, ErrStreamDrain) + } + }() + } + opts := Options{ + Last: true, + Delay: false, + } + // The stream which was created before graceful close can still proceed. + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { + t.Fatalf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponse)) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("s.Read(%v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = s.Read(p); err != io.EOF { + t.Fatalf("Failed to complete the stream %v; want ", err) + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageSuspension(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Set a long enough timeout for writing a large message out. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + s, err := ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("failed to open stream: %v", err) + } + // Write should not be done successfully due to flow control. + msg := make([]byte, initialWindowSize*8) + err = ct.Write(s, nil, msg, &Options{Last: true, Delay: false}) + expectedErr := streamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded) + if err != expectedErr { + t.Fatalf("Write got %v, want %v", err, expectedErr) + } + ct.Close() + server.stop() +} + +func TestMaxStreams(t *testing.T) { + server, ct := setUp(t, 0, 1, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + // Have a pending stream which takes all streams quota. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + done := make(chan struct{}) + ch := make(chan int) + ready := make(chan struct{}) + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + select { + case ch <- 0: + case <-ready: + return + } + case <-time.After(5 * time.Second): + close(done) + return + case <-ready: + return + } + } + }() + for { + select { + case <-ch: + case <-done: + t.Fatalf("Client has not received the max stream setting in 5 seconds.") + } + cc.mu.Lock() + // cc.maxStreams should be equal to 1 after having received settings frame from + // server. + if cc.maxStreams == 1 { + cc.mu.Unlock() + select { + case <-cc.streamsQuota.acquire(): + t.Fatalf("streamsQuota.acquire() becomes readable mistakenly.") + default: + cc.streamsQuota.mu.Lock() + quota := cc.streamsQuota.quota + cc.streamsQuota.mu.Unlock() + if quota != 0 { + t.Fatalf("streamsQuota.quota got non-zero quota mistakenly.") + } + } + break + } + cc.mu.Unlock() + } + close(ready) + // Close the pending stream so that the streams quota becomes available for the next new stream. + ct.CloseStream(s, nil) + select { + case i := <-cc.streamsQuota.acquire(): + if i != 1 { + t.Fatalf("streamsQuota.acquire() got %d quota, want 1.", i) + } + cc.streamsQuota.add(i) + default: + t.Fatalf("streamsQuota.acquire() is not readable.") + } + if _, err := ct.NewStream(context.Background(), callHdr); err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + ct.Close() + server.stop() +} + +func TestServerContextCanceledOnClosedConnection(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}}) + // Loop until the server side stream is created. + var ss *Stream + for { + time.Sleep(time.Second) + sc.mu.Lock() + if len(sc.activeStreams) == 0 { + sc.mu.Unlock() + continue + } + ss = sc.activeStreams[s.id] + sc.mu.Unlock() + break + } + cc.Close() + select { + case <-ss.Context().Done(): + if ss.Context().Err() != context.Canceled { + t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled) + } + case <-time.After(5 * time.Second): + t.Fatalf("Failed to cancel the context of the sever side stream.") + } + server.stop() +} + +func TestClientConnDecoupledFromApplicationRead(t *testing.T) { + connectOptions := ConnectOptions{ + InitialWindowSize: defaultWindowSize, + InitialConnWindowSize: defaultWindowSize, + } + server, client := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions) + defer server.stop() + defer client.Close() + + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + notifyChan := make(chan struct{}) + server.h.notify = notifyChan + server.mu.Unlock() + cstream1, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create first stream. Err: %v", err) + } + + <-notifyChan + var sstream1 *Stream + // Access stream on the server. + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == cstream1.id { + sstream1 = v + } + } + st.mu.Unlock() + if sstream1 == nil { + t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id) + } + // Exhaust client's connection window. + if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Server failed to write data. Err: %v", err) + } + notifyChan = make(chan struct{}) + server.mu.Lock() + server.h.notify = notifyChan + server.mu.Unlock() + // Create another stream on client. + cstream2, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create second stream. Err: %v", err) + } + <-notifyChan + var sstream2 *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == cstream2.id { + sstream2 = v + } + } + st.mu.Unlock() + if sstream2 == nil { + t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id) + } + // Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream. + if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Server failed to write data. Err: %v", err) + } + + // Client should be able to read data on second stream. + if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = _, %v, want _, ", err) + } + + // Client should be able to read data on first stream. + if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = _, %v, want _, ", err) + } +} + +func TestServerConnDecoupledFromApplicationRead(t *testing.T) { + serverConfig := &ServerConfig{ + InitialWindowSize: defaultWindowSize, + InitialConnWindowSize: defaultWindowSize, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + cstream1, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create 1st stream. Err: %v", err) + } + // Exhaust server's connection window. + if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil { + t.Fatalf("Client failed to write data. Err: %v", err) + } + //Client should be able to create another stream and send data on it. + cstream2, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create 2nd stream. Err: %v", err) + } + if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Client failed to write data. Err: %v", err) + } + // Get the streams on server. + waitWhileTrue(t, func() (bool, error) { + st.mu.Lock() + defer st.mu.Unlock() + + if len(st.activeStreams) != 2 { + return true, fmt.Errorf("timed-out while waiting for server to have created the streams") + } + return false, nil + }) + var sstream1 *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == 1 { + sstream1 = v + } + } + st.mu.Unlock() + // Trying to write more on a max-ed out stream should result in a RST_STREAM from the server. + ct := client.(*http2Client) + ct.controlBuf.put(&dataFrame{cstream2.id, true, make([]byte, 1), func() {}}) + code := http2ErrConvTab[http2.ErrCodeFlowControl] + waitWhileTrue(t, func() (bool, error) { + cstream2.mu.Lock() + defer cstream2.mu.Unlock() + if cstream2.status.Code() != code { + return true, fmt.Errorf("want code = %v, got %v", code, cstream2.status.Code()) + } + return false, nil + }) + // Reading from the stream on server should succeed. + if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = %v, want ", err) + } + + if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF { + t.Fatalf("_.Read(_) = %v, want io.EOF", err) + } + +} + +func TestServerWithMisbehavedClient(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, suspended) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + cc, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test server behavior for violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + var sent int + // Drain the stream flow control window + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}}) + sent += http2MaxFrameLen + // Wait until the server creates the corresponding stream and receive some data. + var ss *Stream + for { + time.Sleep(time.Millisecond) + sc.mu.Lock() + if len(sc.activeStreams) == 0 { + sc.mu.Unlock() + continue + } + ss = sc.activeStreams[s.id] + sc.mu.Unlock() + ss.fc.mu.Lock() + if ss.fc.pendingData > 0 { + ss.fc.mu.Unlock() + break + } + ss.fc.mu.Unlock() + } + if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate != 0 { + t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, 0, 0) + } + // Keep sending until the server inbound window is drained for that stream. + for sent <= initialWindowSize { + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, 1), func() {}}) + sent++ + } + // Server sent a resetStream for s already. + code := http2ErrConvTab[http2.ErrCodeFlowControl] + if _, err := s.Read(make([]byte, 1)); err != io.EOF { + t.Fatalf("%v got err %v want ", s, err) + } + if s.status.Code() != code { + t.Fatalf("%v got status %v; want Code=%v", s, s.status, code) + } + + ct.CloseStream(s, nil) + ct.Close() + server.stop() +} + +func TestClientWithMisbehavedServer(t *testing.T) { + // Turn off BDP estimation so that the server can + // violate stream window. + connectOptions := ConnectOptions{ + InitialWindowSize: initialWindowSize, + } + server, ct := setUpWithOptions(t, 0, &ServerConfig{}, misbehaved, connectOptions) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Stream", + } + conn, ok := ct.(*http2Client) + if !ok { + t.Fatalf("Failed to convert %v to *http2Client", ct) + } + // Test the logic for the violation of stream flow control window size restriction. + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + d := make([]byte, 1) + if err := ct.Write(s, nil, d, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Fatalf("Failed to write: %v", err) + } + // Read without window update. + for { + p := make([]byte, http2MaxFrameLen) + if _, err = s.trReader.(*transportReader).reader.Read(p); err != nil { + break + } + } + if s.fc.pendingData <= initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != 0 { + t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want >%d, %d, %d, >%d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, 0, 0) + } + + if err != io.EOF { + t.Fatalf("Got err %v, want ", err) + } + if s.status.Code() != codes.Internal { + t.Fatalf("Got s.status %v, want s.status.Code()=Internal", s.status) + } + + conn.CloseStream(s, err) + ct.Close() + server.stop() +} + +var encodingTestStatus = status.New(codes.Internal, "\n") + +func TestEncodingRequiredStatus(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, encodingRequiredStatus) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { + t.Fatalf("Failed to write the request: %v", err) + } + p := make([]byte, http2MaxFrameLen) + if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF { + t.Fatalf("Read got error %v, want %v", err, io.EOF) + } + if !reflect.DeepEqual(s.Status(), encodingTestStatus) { + t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus) + } + ct.Close() + server.stop() +} + +func TestInvalidHeaderField(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, invalidHeaderField) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo", + } + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + return + } + opts := Options{ + Last: true, + Delay: false, + } + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { + t.Fatalf("Failed to write the request: %v", err) + } + p := make([]byte, http2MaxFrameLen) + _, err = s.trReader.(*transportReader).Read(p) + if se, ok := err.(StreamError); !ok || se.Code != codes.FailedPrecondition || !strings.Contains(err.Error(), expectedInvalidHeaderField) { + t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.FailedPrecondition, expectedInvalidHeaderField) + } + ct.Close() + server.stop() +} + +func TestStreamContext(t *testing.T) { + expectedStream := &Stream{} + ctx := newContextWithStream(context.Background(), expectedStream) + s, ok := StreamFromContext(ctx) + if !ok || expectedStream != s { + t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, s, ok, expectedStream) + } +} + +func TestIsReservedHeader(t *testing.T) { + tests := []struct { + h string + want bool + }{ + {"", false}, // but should be rejected earlier + {"foo", false}, + {"content-type", true}, + {"grpc-message-type", true}, + {"grpc-encoding", true}, + {"grpc-message", true}, + {"grpc-status", true}, + {"grpc-timeout", true}, + {"te", true}, + } + for _, tt := range tests { + got := isReservedHeader(tt.h) + if got != tt.want { + t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want) + } + } +} + +func TestContextErr(t *testing.T) { + for _, test := range []struct { + // input + errIn error + // outputs + errOut StreamError + }{ + {context.DeadlineExceeded, StreamError{codes.DeadlineExceeded, context.DeadlineExceeded.Error()}}, + {context.Canceled, StreamError{codes.Canceled, context.Canceled.Error()}}, + } { + err := ContextErr(test.errIn) + if err != test.errOut { + t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) + } + } +} + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} + +type windowSizeConfig struct { + serverStream int32 + serverConn int32 + clientStream int32 + clientConn int32 +} + +func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) { + wc := windowSizeConfig{ + serverStream: 10 * 1024 * 1024, + serverConn: 12 * 1024 * 1024, + clientStream: 6 * 1024 * 1024, + clientConn: 8 * 1024 * 1024, + } + testAccountCheckWindowSize(t, wc) +} + +func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) { + wc := windowSizeConfig{ + serverStream: defaultWindowSize, + // Note this is smaller than initialConnWindowSize which is the current default. + serverConn: defaultWindowSize, + clientStream: defaultWindowSize, + clientConn: defaultWindowSize, + } + testAccountCheckWindowSize(t, wc) +} + +func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) { + serverConfig := &ServerConfig{ + InitialWindowSize: wc.serverStream, + InitialConnWindowSize: wc.serverConn, + } + connectOptions := ConnectOptions{ + InitialWindowSize: wc.clientStream, + InitialConnWindowSize: wc.clientConn, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, connectOptions) + defer server.stop() + defer client.Close() + + // Wait for server conns to be populated with new server transport. + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + if len(server.conns) == 0 { + return true, fmt.Errorf("timed out waiting for server transport to be created") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + ct := client.(*http2Client) + cstream, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create stream. Err: %v", err) + } + // Wait for server to receive headers. + waitWhileTrue(t, func() (bool, error) { + st.mu.Lock() + defer st.mu.Unlock() + if len(st.activeStreams) == 0 { + return true, fmt.Errorf("timed out waiting for server to receive headers") + } + return false, nil + }) + // Sleeping to make sure the settings are applied in case of negative test. + time.Sleep(time.Second) + + waitWhileTrue(t, func() (bool, error) { + st.fc.mu.Lock() + lim := st.fc.limit + st.fc.mu.Unlock() + if lim != uint32(serverConfig.InitialConnWindowSize) { + return true, fmt.Errorf("Server transport flow control window size: got %v, want %v", lim, serverConfig.InitialConnWindowSize) + } + return false, nil + }) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + serverSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire()) + if err != nil { + t.Fatalf("Error while acquiring sendQuota on server. Err: %v", err) + } + cancel() + st.sendQuotaPool.add(serverSendQuota) + if serverSendQuota != int(connectOptions.InitialConnWindowSize) { + t.Fatalf("Server send quota(%v) not equal to client's window size(%v) on conn.", serverSendQuota, connectOptions.InitialConnWindowSize) + } + st.mu.Lock() + ssq := st.streamSendQuota + st.mu.Unlock() + if ssq != uint32(connectOptions.InitialWindowSize) { + t.Fatalf("Server stream send quota(%v) not equal to client's window size(%v) on stream.", ssq, connectOptions.InitialWindowSize) + } + ct.fc.mu.Lock() + limit := ct.fc.limit + ct.fc.mu.Unlock() + if limit != uint32(connectOptions.InitialConnWindowSize) { + t.Fatalf("Client transport flow control window size is %v, want %v", limit, connectOptions.InitialConnWindowSize) + } + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire()) + if err != nil { + t.Fatalf("Error while acquiring sendQuota on client. Err: %v", err) + } + cancel() + ct.sendQuotaPool.add(clientSendQuota) + if clientSendQuota != int(serverConfig.InitialConnWindowSize) { + t.Fatalf("Client send quota(%v) not equal to server's window size(%v) on conn.", clientSendQuota, serverConfig.InitialConnWindowSize) + } + ct.mu.Lock() + ssq = ct.streamSendQuota + ct.mu.Unlock() + if ssq != uint32(serverConfig.InitialWindowSize) { + t.Fatalf("Client stream send quota(%v) not equal to server's window size(%v) on stream.", ssq, serverConfig.InitialWindowSize) + } + cstream.fc.mu.Lock() + limit = cstream.fc.limit + cstream.fc.mu.Unlock() + if limit != uint32(connectOptions.InitialWindowSize) { + t.Fatalf("Client stream flow control window size is %v, want %v", limit, connectOptions.InitialWindowSize) + } + var sstream *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + sstream = v + } + st.mu.Unlock() + sstream.fc.mu.Lock() + limit = sstream.fc.limit + sstream.fc.mu.Unlock() + if limit != uint32(serverConfig.InitialWindowSize) { + t.Fatalf("Server stream flow control window size is %v, want %v", limit, serverConfig.InitialWindowSize) + } +} + +// Check accounting on both sides after sending and receiving large messages. +func TestAccountCheckExpandingWindow(t *testing.T) { + server, client := setUp(t, 0, 0, pingpong) + defer server.stop() + defer client.Close() + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + if len(server.conns) == 0 { + return true, fmt.Errorf("timed out while waiting for server transport to be created") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + ct := client.(*http2Client) + cstream, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create stream. Err: %v", err) + } + + msgSize := 65535 * 16 * 2 + msg := make([]byte, msgSize) + buf := make([]byte, msgSize+5) + buf[0] = byte(0) + binary.BigEndian.PutUint32(buf[1:], uint32(msgSize)) + copy(buf[5:], msg) + opts := Options{} + header := make([]byte, 5) + for i := 1; i <= 10; i++ { + if err := ct.Write(cstream, nil, buf, &opts); err != nil { + t.Fatalf("Error on client while writing message: %v", err) + } + if _, err := cstream.Read(header); err != nil { + t.Fatalf("Error on client while reading data frame header: %v", err) + } + sz := binary.BigEndian.Uint32(header[1:]) + recvMsg := make([]byte, int(sz)) + if _, err := cstream.Read(recvMsg); err != nil { + t.Fatalf("Error on client while reading data: %v", err) + } + if len(recvMsg) != len(msg) { + t.Fatalf("Length of message received by client: %v, want: %v", len(recvMsg), len(msg)) + } + } + var sstream *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + sstream = v + } + st.mu.Unlock() + + waitWhileTrue(t, func() (bool, error) { + // Check that pendingData and delta on flow control windows on both sides are 0. + cstream.fc.mu.Lock() + if cstream.fc.delta != 0 { + cstream.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of client stream is non-zero") + } + if cstream.fc.pendingData != 0 { + cstream.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of client stream is non-zero") + } + cstream.fc.mu.Unlock() + sstream.fc.mu.Lock() + if sstream.fc.delta != 0 { + sstream.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of server stream is non-zero") + } + if sstream.fc.pendingData != 0 { + sstream.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of sercer stream is non-zero") + } + sstream.fc.mu.Unlock() + ct.fc.mu.Lock() + if ct.fc.delta != 0 { + ct.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of client transport is non-zero") + } + if ct.fc.pendingData != 0 { + ct.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of client transport is non-zero") + } + ct.fc.mu.Unlock() + st.fc.mu.Lock() + if st.fc.delta != 0 { + st.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of server transport is non-zero") + } + if st.fc.pendingData != 0 { + st.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of server transport is non-zero") + } + st.fc.mu.Unlock() + + // Check flow conrtrol window on client stream is equal to out flow on server stream. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + serverStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, sstream.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring server stream send quota. Err: %v", err) + } + sstream.sendQuotaPool.add(serverStreamSendQuota) + cstream.fc.mu.Lock() + clientEst := cstream.fc.limit - cstream.fc.pendingUpdate + cstream.fc.mu.Unlock() + if uint32(serverStreamSendQuota) != clientEst { + return true, fmt.Errorf("server stream outflow: %v, estimated by client: %v", serverStreamSendQuota, clientEst) + } + + // Check flow control window on server stream is equal to out flow on client stream. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, cstream.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring client stream send quota. Err: %v", err) + } + cstream.sendQuotaPool.add(clientStreamSendQuota) + sstream.fc.mu.Lock() + serverEst := sstream.fc.limit - sstream.fc.pendingUpdate + sstream.fc.mu.Unlock() + if uint32(clientStreamSendQuota) != serverEst { + return true, fmt.Errorf("client stream outflow: %v. estimated by server: %v", clientStreamSendQuota, serverEst) + } + + // Check flow control window on client transport is equal to out flow of server transport. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + serverTrSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquring server transport send quota. Err: %v", err) + } + st.sendQuotaPool.add(serverTrSendQuota) + ct.fc.mu.Lock() + clientEst = ct.fc.limit - ct.fc.pendingUpdate + ct.fc.mu.Unlock() + if uint32(serverTrSendQuota) != clientEst { + return true, fmt.Errorf("server transport outflow: %v, estimated by client: %v", serverTrSendQuota, clientEst) + } + + // Check flow control window on server transport is equal to out flow of client transport. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientTrSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring client transport send quota. Err: %v", err) + } + ct.sendQuotaPool.add(clientTrSendQuota) + st.fc.mu.Lock() + serverEst = st.fc.limit - st.fc.pendingUpdate + st.fc.mu.Unlock() + if uint32(clientTrSendQuota) != serverEst { + return true, fmt.Errorf("client transport outflow: %v, estimated by client: %v", clientTrSendQuota, serverEst) + } + + return false, nil + }) + +} + +func waitWhileTrue(t *testing.T, condition func() (bool, error)) { + var ( + wait bool + err error + ) + timer := time.NewTimer(time.Second * 5) + for { + wait, err = condition() + if wait { + select { + case <-timer.C: + t.Fatalf(err.Error()) + default: + time.Sleep(50 * time.Millisecond) + continue + } + } + if !timer.Stop() { + <-timer.C + } + break + } +} + +// A function of type writeHeaders writes out +// http status with the given stream ID using the given framer. +type writeHeaders func(*http2.Framer, uint32, int) error + +func writeOneHeader(framer *http2.Framer, sid uint32, httpStatus int) error { + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + henc.WriteField(hpack.HeaderField{Name: ":status", Value: fmt.Sprint(httpStatus)}) + return framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) +} + +func writeTwoHeaders(framer *http2.Framer, sid uint32, httpStatus int) error { + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + henc.WriteField(hpack.HeaderField{ + Name: ":status", + Value: fmt.Sprint(http.StatusOK), + }) + if err := framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndHeaders: true, + }); err != nil { + return err + } + buf.Reset() + henc.WriteField(hpack.HeaderField{ + Name: ":status", + Value: fmt.Sprint(httpStatus), + }) + return framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) +} + +type httpServer struct { + conn net.Conn + httpStatus int + wh writeHeaders +} + +func (s *httpServer) start(t *testing.T, lis net.Listener) { + // Launch an HTTP server to send back header with httpStatus. + go func() { + var err error + s.conn, err = lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + defer s.conn.Close() + // Read preface sent by client. + if _, err = io.ReadFull(s.conn, make([]byte, len(http2.ClientPreface))); err != nil { + t.Errorf("Error at server-side while reading preface from cleint. Err: %v", err) + return + } + reader := bufio.NewReaderSize(s.conn, defaultWriteBufSize) + writer := bufio.NewWriterSize(s.conn, defaultReadBufSize) + framer := http2.NewFramer(writer, reader) + if err = framer.WriteSettingsAck(); err != nil { + t.Errorf("Error at server-side while sending Settings ack. Err: %v", err) + return + } + var sid uint32 + // Read frames until a header is received. + for { + frame, err := framer.ReadFrame() + if err != nil { + t.Errorf("Error at server-side while reading frame. Err: %v", err) + return + } + if hframe, ok := frame.(*http2.HeadersFrame); ok { + sid = hframe.Header().StreamID + break + } + } + if err = s.wh(framer, sid, s.httpStatus); err != nil { + t.Errorf("Error at server-side while writing headers. Err: %v", err) + return + } + writer.Flush() + }() +} + +func (s *httpServer) cleanUp() { + if s.conn != nil { + s.conn.Close() + } +} + +func setUpHTTPStatusTest(t *testing.T, httpStatus int, wh writeHeaders) (stream *Stream, cleanUp func()) { + var ( + err error + lis net.Listener + server *httpServer + client ClientTransport + ) + cleanUp = func() { + if lis != nil { + lis.Close() + } + if server != nil { + server.cleanUp() + } + if client != nil { + client.Close() + } + } + defer func() { + if err != nil { + cleanUp() + } + }() + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + server = &httpServer{ + httpStatus: httpStatus, + wh: wh, + } + server.start(t, lis) + client, err = newHTTP2Client(context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{}, 2*time.Second) + if err != nil { + t.Fatalf("Error creating client. Err: %v", err) + } + stream, err = client.NewStream(context.Background(), &CallHdr{Method: "bogus/method", Flush: true}) + if err != nil { + t.Fatalf("Error creating stream at client-side. Err: %v", err) + } + return +} + +func TestHTTPToGRPCStatusMapping(t *testing.T) { + for k := range httpStatusConvTab { + testHTTPToGRPCStatusMapping(t, k, writeOneHeader) + } +} + +func testHTTPToGRPCStatusMapping(t *testing.T, httpStatus int, wh writeHeaders) { + stream, cleanUp := setUpHTTPStatusTest(t, httpStatus, wh) + defer cleanUp() + want := httpStatusConvTab[httpStatus] + buf := make([]byte, 8) + _, err := stream.Read(buf) + if err == nil { + t.Fatalf("Stream.Read(_) unexpectedly returned no error. Expected stream error with code %v", want) + } + serr, ok := err.(StreamError) + if !ok { + t.Fatalf("err.(Type) = %T, want StreamError", err) + } + if want != serr.Code { + t.Fatalf("Want error code: %v, got: %v", want, serr.Code) + } +} + +func TestHTTPStatusOKAndMissingGRPCStatus(t *testing.T) { + stream, cleanUp := setUpHTTPStatusTest(t, http.StatusOK, writeOneHeader) + defer cleanUp() + buf := make([]byte, 8) + _, err := stream.Read(buf) + if err != io.EOF { + t.Fatalf("stream.Read(_) = _, %v, want _, io.EOF", err) + } + want := codes.Unknown + stream.mu.Lock() + defer stream.mu.Unlock() + if stream.status.Code() != want { + t.Fatalf("Status code of stream: %v, want: %v", stream.status.Code(), want) + } +} + +func TestHTTPStatusNottOKAndMissingGRPCStatusInSecondHeader(t *testing.T) { + testHTTPToGRPCStatusMapping(t, http.StatusUnauthorized, writeTwoHeaders) +} + +// If any error occurs on a call to Stream.Read, future calls +// should continue to return that same error. +func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) { + testRecvBuffer := newRecvBuffer() + s := &Stream{ + ctx: context.Background(), + goAway: make(chan struct{}), + buf: testRecvBuffer, + requestRead: func(int) {}, + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(int) {}, + } + testData := make([]byte, 1) + testData[0] = 5 + testErr := errors.New("test error") + s.write(recvMsg{data: testData, err: testErr}) + + inBuf := make([]byte, 1) + actualCount, actualErr := s.Read(inBuf) + if actualCount != 0 { + t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount) + } + if actualErr.Error() != testErr.Error() { + t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) + } + + s.write(recvMsg{data: testData, err: nil}) + s.write(recvMsg{data: testData, err: errors.New("different error from first")}) + + for i := 0; i < 2; i++ { + inBuf := make([]byte, 1) + actualCount, actualErr := s.Read(inBuf) + if actualCount != 0 { + t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount) + } + if actualErr.Error() != testErr.Error() { + t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) + } + } +} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 000000000..72ef3290a --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/golang/protobuf/protoc-gen-go \ + golang.org/x/tools/cmd/stringer + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|_string|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... From 90984dabceed059ecb1ed7321f2c11c0a68c3f8e Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 30 Oct 2017 13:41:42 +0100 Subject: [PATCH 4/5] Fix connection timeout. Dial() always succeeds, attacher must wait until the connection is Ready. --- cmd/csi-attacher/main.go | 7 +++++-- pkg/connection/connection.go | 30 +++++++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/cmd/csi-attacher/main.go b/cmd/csi-attacher/main.go index dca0dad3a..b986f1380 100644 --- a/cmd/csi-attacher/main.go +++ b/cmd/csi-attacher/main.go @@ -39,13 +39,16 @@ const ( // Default timeout of short CSI calls like GetPluginInfo csiTimeout = time.Second + + // Name of CSI plugin for dummy operation + dummyAttacherName = "csi/dummy" ) // Command line flags var ( kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig file. Required only when running out of cluster.") resync = flag.Duration("resync", 10*time.Second, "Resync interval of the controller.") - connectionTimeout = flag.Int("connection-timeout", 60, "Timeout for waiting for CSI driver socket (in seconds).") + connectionTimeout = flag.Duration("connection-timeout", 1*time.Minute, "Timeout for waiting for CSI driver socket.") csiAddress = flag.String("csi-address", "/run/csi/socket", "Address of the CSI driver socket.") dummy = flag.Bool("dummy", false, "Run in dummy mode, i.e. not connecting to CSI driver and marking everything as attached. Expected CSI driver name is \"csi/dummy\".") ) @@ -74,7 +77,7 @@ func main() { if *dummy { // Do not connect to any CSI, mark everything as attached. handler = controller.NewTrivialHandler(clientset) - attacher = "csi/dummy" + attacher = dummyAttacherName } else { // Connect to CSI. csiConn, err := connection.New(*csiAddress, *connectionTimeout) diff --git a/pkg/connection/connection.go b/pkg/connection/connection.go index f30bb2fe9..a5cdceccb 100644 --- a/pkg/connection/connection.go +++ b/pkg/connection/connection.go @@ -24,6 +24,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/glog" "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" ) // CSIConnection is gRPC connection to a remote CSI driver and abstracts all @@ -56,8 +57,8 @@ var ( } ) -func New(address string, timeoutSeconds int) (CSIConnection, error) { - conn, err := connect(address, timeoutSeconds) +func New(address string, timeout time.Duration) (CSIConnection, error) { + conn, err := connect(address, timeout) if err != nil { return nil, err } @@ -66,18 +67,25 @@ func New(address string, timeoutSeconds int) (CSIConnection, error) { }, nil } -func connect(address string, timeoutSeconds int) (*grpc.ClientConn, error) { - var err error - for i := 0; i < timeoutSeconds; i++ { - var conn *grpc.ClientConn - conn, err = grpc.Dial(address, grpc.WithInsecure()) - if err == nil { +func connect(address string, timeout time.Duration) (*grpc.ClientConn, error) { + glog.V(2).Infof("Connecting to %s", address) + conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBackoffMaxDelay(time.Second)) + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + if !conn.WaitForStateChange(ctx, conn.GetState()) { + glog.V(4).Infof("Connection timed out") + return conn, nil // return nil, subsequent GetPluginInfo will show the real connection error + } + if conn.GetState() == connectivity.Ready { + glog.V(3).Infof("Connected") return conn, nil } - glog.Warningf("Error connecting to %s: %s", address, err) - time.Sleep(time.Second) + glog.V(4).Infof("Still trying, connection is %s", conn.GetState()) } - return nil, err } func (c *csiConnection) GetDriverName(ctx context.Context) (string, error) { From 29e3bde68e52f36a7520c24d8f8259e5c069d069 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 30 Oct 2017 14:25:27 +0100 Subject: [PATCH 5/5] Update README --- README.md | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d0fb444b8..2bee1a98d 100644 --- a/README.md +++ b/README.md @@ -2,15 +2,55 @@ The csi-attacher is part of Kubernetes implementation of [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec). -## Design +## Overview In short, it's an external controller that monitors `VolumeAttachment` objects and attaches/detaches volumes to/from nodes. Full design can be found at Kubernetes proposal at https://github.com/kubernetes/community/pull/1258. TODO: update the link after merge. There is no plan to implement a generic external attacher library, csi-attacher is the only external attacher that exists. If this proves false in future, splitting a generic external-attacher library should be possible with some effort. +## Design + +External attacher follows [controller](https://github.com/kubernetes/community/blob/master/contributors/devel/controllers.md) pattern and uses informers to watch for `VolumeAttachment` and `PersistentVolume` create/update/delete events. It filters out `VolumeAttachment` instances with `Attacher==` and processes these events in workqueues with exponential backoff. Real handling is deferred to `Handler` interface. + +`Handler` interface has two implementations, trivial and real one. + +### Trivial handler + +Trivial handler will be used for CSI drivers that don't support `ControllerPublish` calls and marks all `VolumeAttachment` as attached. It does not use any finalizers. This attacher can also be used for testing. + +### Real attacher + +"Real" attacher talks to CSI over socket (`/run/csi/socket` by default, configurable by `-csi-address`). The attacher tries to connect for `-connection-timeout` (1 minute by default), allowing CSI driver to start and create its server socket a bit later. + +The attacher then: + +* Discovers the supported attacher name by `GetPluginInfo` calls. The attacher only processes `VolumeAttachment` instances that have `Attacher==GetPluginInfoResponse.Name`. +* Uses `ControllerGetCapabilities` to find out if CSI driver supports `ControllerPublish` calls. It degrades to trivial mode if not. +* Processes `VolumeAttach` instances and attaches/detaches volumes. TBD: details + VolumeAttachment finalizers. +* TBD: PV finalizers. +* TBD: async operations - we can't block worker queue for 20 minutes (= current AWS attach timeout). + ## Usage -TBD +### Dummy mode + +Dummy attacher watches for `VolumeAttachment` instances with `Attacher=="csi/dummy"` and marks them attached. It does not use any finalizers and is useful for testing. + +To run dummy attacher in `hack/local-up-cluster.sh` environment: + +```sh +$ csi-attacher -dummy -kubeconfig=~/kube/config -v 5 +``` + +TBD: running as a pod + +### Real attacher + +```sh +$ csi-attacher -kubeconfig=~/kube/config -v 5 -csi-address /run/csi/socket +``` + +TBD: running as a pod ## Vendoring